diff --git a/ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..3bf5b16c122eb79444a075ccd4b74fe8cfa95f05 --- /dev/null +++ b/ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb13ebab68cba0a7509fa628d7990d44d14ef15c0b32b25714a9350d1ecd507a +size 9372 diff --git a/ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..820f23032ed77b9dd4a58b67c0fc3769e6545640 --- /dev/null +++ b/ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2859762b5b059963806e36692d5907386cfd22ed71d4f8e4ccaf8e5e0e8f7ca8 +size 9387 diff --git a/ckpts/universal/global_step40/zero/7.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/7.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..58c6c3af2d19e7a718eaf61c193f3eaf027737c6 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad175c6c25f46ed73ac689c66406d96a30a211304a349044cc6a1fa571deadde +size 16778411 diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5f2b90a026aaecbdc090b3d3234954ab29fce8ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Quansight-Labs +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91afdcedb180599a41758cdd8c03416cf6c20d76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py @@ -0,0 +1,116 @@ +""" +.. note: + If you are looking for overrides for NumPy-specific methods, see the + documentation for :obj:`unumpy`. This page explains how to write + back-ends and multimethods. + +``uarray`` is built around a back-end protocol, and overridable multimethods. +It is necessary to define multimethods for back-ends to be able to override them. +See the documentation of :obj:`generate_multimethod` on how to write multimethods. + + + +Let's start with the simplest: + +``__ua_domain__`` defines the back-end *domain*. The domain consists of period- +separated string consisting of the modules you extend plus the submodule. For +example, if a submodule ``module2.submodule`` extends ``module1`` +(i.e., it exposes dispatchables marked as types available in ``module1``), +then the domain string should be ``"module1.module2.submodule"``. + + +For the purpose of this demonstration, we'll be creating an object and setting +its attributes directly. However, note that you can use a module or your own type +as a backend as well. + +>>> class Backend: pass +>>> be = Backend() +>>> be.__ua_domain__ = "ua_examples" + +It might be useful at this point to sidetrack to the documentation of +:obj:`generate_multimethod` to find out how to generate a multimethod +overridable by :obj:`uarray`. Needless to say, writing a backend and +creating multimethods are mostly orthogonal activities, and knowing +one doesn't necessarily require knowledge of the other, although it +is certainly helpful. We expect core API designers/specifiers to write the +multimethods, and implementors to override them. But, as is often the case, +similar people write both. + +Without further ado, here's an example multimethod: + +>>> import uarray as ua +>>> from uarray import Dispatchable +>>> def override_me(a, b): +... return Dispatchable(a, int), +>>> def override_replacer(args, kwargs, dispatchables): +... return (dispatchables[0], args[1]), {} +>>> overridden_me = ua.generate_multimethod( +... override_me, override_replacer, "ua_examples" +... ) + +Next comes the part about overriding the multimethod. This requires +the ``__ua_function__`` protocol, and the ``__ua_convert__`` +protocol. The ``__ua_function__`` protocol has the signature +``(method, args, kwargs)`` where ``method`` is the passed +multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables`` +is the list of converted dispatchables passed in. + +>>> def __ua_function__(method, args, kwargs): +... return method.__name__, args, kwargs +>>> be.__ua_function__ = __ua_function__ + +The other protocol of interest is the ``__ua_convert__`` protocol. It has the +signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion +between the formats should ideally be an ``O(1)`` operation, but it means that +no memory copying should be involved, only views of the existing data. + +>>> def __ua_convert__(dispatchables, coerce): +... for d in dispatchables: +... if d.type is int: +... if coerce and d.coercible: +... yield str(d.value) +... else: +... yield d.value +>>> be.__ua_convert__ = __ua_convert__ + +Now that we have defined the backend, the next thing to do is to call the multimethod. + +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +Note that the marked type has no effect on the actual type of the passed object. +We can also coerce the type of the input. + +>>> with ua.set_backend(be, coerce=True): +... overridden_me(1, "2") +... overridden_me(1.0, "2") +('override_me', ('1', '2'), {}) +('override_me', ('1.0', '2'), {}) + +Another feature is that if you remove ``__ua_convert__``, the arguments are not +converted at all and it's up to the backend to handle that. + +>>> del be.__ua_convert__ +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +You also have the option to return ``NotImplemented``, in which case processing moves on +to the next back-end, which in this case, doesn't exist. The same applies to +``__ua_convert__``. + +>>> be.__ua_function__ = lambda *a, **kw: NotImplemented +>>> with ua.set_backend(be): +... overridden_me(1, "2") +Traceback (most recent call last): + ... +uarray.BackendNotImplementedError: ... + +The last possibility is if we don't have ``__ua_convert__``, in which case the job is +left up to ``__ua_function__``, but putting things back into arrays after conversion +will not be possible. +""" + +from ._backend import * +__version__ = '0.8.8.dev0+aa94c5a4.scipy' diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d76ee8f1d0638c1b362cc4726656d93a8dbc0c61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed553bb3818117929dd607a519e82184341d7ec6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..67da7d35ccea8ad26bd471b16e9400071a821cc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py @@ -0,0 +1,704 @@ +import typing +import types +import inspect +import functools +from . import _uarray +import copyreg +import pickle +import contextlib + +from ._uarray import ( # type: ignore + BackendNotImplementedError, + _Function, + _SkipBackendContext, + _SetBackendContext, + _BackendState, +) + +__all__ = [ + "set_backend", + "set_global_backend", + "skip_backend", + "register_backend", + "determine_backend", + "determine_backend_multi", + "clear_backends", + "create_multimethod", + "generate_multimethod", + "_Function", + "BackendNotImplementedError", + "Dispatchable", + "wrap_single_convertor", + "wrap_single_convertor_instance", + "all_of_type", + "mark_as", + "set_state", + "get_state", + "reset_state", + "_BackendState", + "_SkipBackendContext", + "_SetBackendContext", +] + +ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]] +ArgumentReplacerType = typing.Callable[ + [tuple, dict, tuple], tuple[tuple, dict] +] + +def unpickle_function(mod_name, qname, self_): + import importlib + + try: + module = importlib.import_module(mod_name) + qname = qname.split(".") + func = module + for q in qname: + func = getattr(func, q) + + if self_ is not None: + func = types.MethodType(func, self_) + + return func + except (ImportError, AttributeError) as e: + from pickle import UnpicklingError + + raise UnpicklingError from e + + +def pickle_function(func): + mod_name = getattr(func, "__module__", None) + qname = getattr(func, "__qualname__", None) + self_ = getattr(func, "__self__", None) + + try: + test = unpickle_function(mod_name, qname, self_) + except pickle.UnpicklingError: + test = None + + if test is not func: + raise pickle.PicklingError( + f"Can't pickle {func}: it's not the same object as {test}" + ) + + return unpickle_function, (mod_name, qname, self_) + + +def pickle_state(state): + return _uarray._BackendState._unpickle, state._pickle() + + +def pickle_set_backend_context(ctx): + return _SetBackendContext, ctx._pickle() + + +def pickle_skip_backend_context(ctx): + return _SkipBackendContext, ctx._pickle() + + +copyreg.pickle(_Function, pickle_function) +copyreg.pickle(_uarray._BackendState, pickle_state) +copyreg.pickle(_SetBackendContext, pickle_set_backend_context) +copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context) + + +def get_state(): + """ + Returns an opaque object containing the current state of all the backends. + + Can be used for synchronization between threads/processes. + + See Also + -------- + set_state + Sets the state returned by this function. + """ + return _uarray.get_state() + + +@contextlib.contextmanager +def reset_state(): + """ + Returns a context manager that resets all state once exited. + + See Also + -------- + set_state + Context manager that sets the backend state. + get_state + Gets a state to be set by this context manager. + """ + with set_state(get_state()): + yield + + +@contextlib.contextmanager +def set_state(state): + """ + A context manager that sets the state of the backends to one returned by :obj:`get_state`. + + See Also + -------- + get_state + Gets a state to be set by this context manager. + """ # noqa: E501 + old_state = get_state() + _uarray.set_state(state) + try: + yield + finally: + _uarray.set_state(old_state, True) + + +def create_multimethod(*args, **kwargs): + """ + Creates a decorator for generating multimethods. + + This function creates a decorator that can be used with an argument + extractor in order to generate a multimethod. Other than for the + argument extractor, all arguments are passed on to + :obj:`generate_multimethod`. + + See Also + -------- + generate_multimethod + Generates a multimethod. + """ + + def wrapper(a): + return generate_multimethod(a, *args, **kwargs) + + return wrapper + + +def generate_multimethod( + argument_extractor: ArgumentExtractorType, + argument_replacer: ArgumentReplacerType, + domain: str, + default: typing.Optional[typing.Callable] = None, +): + """ + Generates a multimethod. + + Parameters + ---------- + argument_extractor : ArgumentExtractorType + A callable which extracts the dispatchable arguments. Extracted arguments + should be marked by the :obj:`Dispatchable` class. It has the same signature + as the desired multimethod. + argument_replacer : ArgumentReplacerType + A callable with the signature (args, kwargs, dispatchables), which should also + return an (args, kwargs) pair with the dispatchables replaced inside the + args/kwargs. + domain : str + A string value indicating the domain of this multimethod. + default: Optional[Callable], optional + The default implementation of this multimethod, where ``None`` (the default) + specifies there is no default implementation. + + Examples + -------- + In this example, ``a`` is to be dispatched over, so we return it, while marking it + as an ``int``. + The trailing comma is needed because the args have to be returned as an iterable. + + >>> def override_me(a, b): + ... return Dispatchable(a, int), + + Next, we define the argument replacer that replaces the dispatchables inside + args/kwargs with the supplied ones. + + >>> def override_replacer(args, kwargs, dispatchables): + ... return (dispatchables[0], args[1]), {} + + Next, we define the multimethod. + + >>> overridden_me = generate_multimethod( + ... override_me, override_replacer, "ua_examples" + ... ) + + Notice that there's no default implementation, unless you supply one. + + >>> overridden_me(1, "a") + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + >>> overridden_me2 = generate_multimethod( + ... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y) + ... ) + >>> overridden_me2(1, "a") + (1, 'a') + + See Also + -------- + uarray + See the module documentation for how to override the method by creating + backends. + """ + kw_defaults, arg_defaults, opts = get_defaults(argument_extractor) + ua_func = _Function( + argument_extractor, + argument_replacer, + domain, + arg_defaults, + kw_defaults, + default, + ) + + return functools.update_wrapper(ua_func, argument_extractor) + + +def set_backend(backend, coerce=False, only=False): + """ + A context manager that sets the preferred backend. + + Parameters + ---------- + backend + The backend to set. + coerce + Whether or not to coerce to a specific backend's types. Implies ``only``. + only + Whether or not this should be the last backend to try. + + See Also + -------- + skip_backend: A context manager that allows skipping of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["set", coerce, only] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SetBackendContext(backend, coerce, only) + backend.__ua_cache__["set", coerce, only] = ctx + return ctx + + +def skip_backend(backend): + """ + A context manager that allows one to skip a given backend from processing + entirely. This allows one to use another backend's code in a library that + is also a consumer of the same backend. + + Parameters + ---------- + backend + The backend to skip. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["skip"] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SkipBackendContext(backend) + backend.__ua_cache__["skip"] = ctx + return ctx + + +def get_defaults(f): + sig = inspect.signature(f) + kw_defaults = {} + arg_defaults = [] + opts = set() + for k, v in sig.parameters.items(): + if v.default is not inspect.Parameter.empty: + kw_defaults[k] = v.default + if v.kind in ( + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ): + arg_defaults.append(v.default) + opts.add(k) + + return kw_defaults, tuple(arg_defaults), opts + + +def set_global_backend(backend, coerce=False, only=False, *, try_last=False): + """ + This utility method replaces the default backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. This will be the first tried + backend outside the :obj:`set_backend` context manager. + + Note that this method is not thread-safe. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves, or by a reference + implementation, if one exists. + + Parameters + ---------- + backend + The backend to register. + coerce : bool + Whether to coerce input types when trying this backend. + only : bool + If ``True``, no more backends will be tried if this fails. + Implied by ``coerce=True``. + try_last : bool + If ``True``, the global backend is tried after registered backends. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + skip_backend: A context manager that allows skipping of backends. + """ + _uarray.set_global_backend(backend, coerce, only, try_last) + + +def register_backend(backend): + """ + This utility method sets registers backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. + + Note that this method is not thread-safe. + + Parameters + ---------- + backend + The backend to register. + """ + _uarray.register_backend(backend) + + +def clear_backends(domain, registered=True, globals=False): + """ + This utility method clears registered backends. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves. + + .. warning:: + Do NOT use this method inside a multimethod call, or the + program is likely to crash. + + Parameters + ---------- + domain : Optional[str] + The domain for which to de-register backends. ``None`` means + de-register for all domains. + registered : bool + Whether or not to clear registered backends. See :obj:`register_backend`. + globals : bool + Whether or not to clear global backends. See :obj:`set_global_backend`. + + See Also + -------- + register_backend : Register a backend globally. + set_global_backend : Set a global backend. + """ + _uarray.clear_backends(domain, registered, globals) + + +class Dispatchable: + """ + A utility class which marks an argument with a specific dispatch type. + + + Attributes + ---------- + value + The value of the Dispatchable. + + type + The type of the Dispatchable. + + Examples + -------- + >>> x = Dispatchable(1, str) + >>> x + , value=1> + + See Also + -------- + all_of_type + Marks all unmarked parameters of a function. + + mark_as + Allows one to create a utility function to mark as a given type. + """ + + def __init__(self, value, dispatch_type, coercible=True): + self.value = value + self.type = dispatch_type + self.coercible = coercible + + def __getitem__(self, index): + return (self.type, self.value)[index] + + def __str__(self): + return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>" + + __repr__ = __str__ + + +def mark_as(dispatch_type): + """ + Creates a utility function to mark something as a specific type. + + Examples + -------- + >>> mark_int = mark_as(int) + >>> mark_int(1) + , value=1> + """ + return functools.partial(Dispatchable, dispatch_type=dispatch_type) + + +def all_of_type(arg_type): + """ + Marks all unmarked arguments as a given type. + + Examples + -------- + >>> @all_of_type(str) + ... def f(a, b): + ... return a, Dispatchable(b, int) + >>> f('a', 1) + (, value='a'>, + , value=1>) + """ + + def outer(func): + @functools.wraps(func) + def inner(*args, **kwargs): + extracted_args = func(*args, **kwargs) + return tuple( + Dispatchable(arg, arg_type) + if not isinstance(arg, Dispatchable) + else arg + for arg in extracted_args + ) + + return inner + + return outer + + +def wrap_single_convertor(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def wrap_single_convertor_instance(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(self, dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(self, d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False): + """Set the backend to the first active backend that supports ``value`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend` to ensure the same backend + is used everywhere in a block of multimethod calls. + + Parameters + ---------- + value + The value being tested + dispatch_type + The dispatch type associated with ``value``, aka + ":ref:`marking `". + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + + See Also + -------- + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting + different types, ``TypeA`` and ``TypeB``. Neither supporting the other type: + + >>> with ua.set_backend(ex.BackendA): + ... ex.call_multimethod(ex.TypeB(), ex.TypeB()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + Now consider a multimethod that creates a new object of ``TypeA``, or + ``TypeB`` depending on the active backend. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, ex.TypeA()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + ``res`` is an object of ``TypeB`` because ``BackendB`` is set in the + innermost with statement. So, ``call_multimethod`` fails since the types + don't match. + + Instead, we need to first find a backend suitable for all of our objects. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... x = ex.TypeA() + ... with ua.determine_backend(x, "mark", domain="ua_examples"): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, x) + TypeA + + """ + dispatchables = (Dispatchable(value, dispatch_type, coerce),) + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) + + +def determine_backend_multi( + dispatchables, *, domain, only=True, coerce=False, **kwargs +): + """Set a backend supporting all ``dispatchables`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend_multi` to ensure the same + backend is used everywhere in a block of multimethod calls involving + multiple arrays. + + Parameters + ---------- + dispatchables: Sequence[Union[uarray.Dispatchable, Any]] + The dispatchables that must be supported + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + dispatch_type: Optional[Any] + The default dispatch type associated with ``dispatchables``, aka + ":ref:`marking `". + + See Also + -------- + determine_backend: For a single dispatch value + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + :func:`determine_backend` allows the backend to be set from a single + object. :func:`determine_backend_multi` allows multiple objects to be + checked simultaneously for support in the backend. Suppose we have a + ``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call, + and a ``BackendBC`` that doesn't support ``TypeA``. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")], + ... domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + This won't call ``BackendBC`` because it doesn't support ``TypeA``. + + We can also use leave out the ``ua.Dispatchable`` if we specify the + default ``dispatch_type`` for the ``dispatchables`` argument. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [a, b], dispatch_type="mark", domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + """ + if "dispatch_type" in kwargs: + disp_type = kwargs.pop("dispatch_type") + dispatchables = tuple( + d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type) + for d in dispatchables + ) + else: + dispatchables = tuple(dispatchables) + if not all(isinstance(d, Dispatchable) for d in dispatchables): + raise TypeError("dispatchables must be instances of uarray.Dispatchable") + + if len(kwargs) != 0: + raise TypeError(f"Received unexpected keyword arguments: {kwargs}") + + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d442a8273d1a633e01a54badb1668b46f0fc19d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..28ffc7e79d2cc197d2b8bb95743caabb9691d0aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py @@ -0,0 +1,22 @@ +""" +NumPy Array API compatibility library + +This is a small wrapper around NumPy and CuPy that is compatible with the +Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 +https://numpy.org/neps/nep-0047-array-api-standard.html. + +Unlike numpy.array_api, this is not a strict minimal implementation of the +Array API, but rather just an extension of the main NumPy namespace with +changes needed to be compliant with the Array API. See +https://numpy.org/doc/stable/reference/array_api.html for a full list of +changes. In particular, unlike numpy.array_api, this package does not use a +separate Array object, but rather just uses numpy.ndarray directly. + +Library authors using the Array API may wish to test against numpy.array_api +to ensure they are not using functionality outside of the standard, but prefer +this implementation for the default when working with NumPy arrays. + +""" +__version__ = '1.4.1' + +from .common import * diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a43be366d6014bd47500ae0ccc3f1d551069d366 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b774344bc560675b23669928573703529f26d709 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..553c03561b45e7791548b78b17cec6f86b86f9c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py @@ -0,0 +1,43 @@ +""" +Internal helpers +""" + +from functools import wraps +from inspect import signature + +def get_xp(xp): + """ + Decorator to automatically replace xp with the corresponding array module. + + Use like + + import numpy as np + + @get_xp(np) + def func(x, /, xp, kwarg=None): + return xp.func(x, kwarg=kwarg) + + Note that xp must be a keyword argument and come after all non-keyword + arguments. + + """ + def inner(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, xp=xp, **kwargs) + + sig = signature(f) + new_sig = sig.replace(parameters=[sig.parameters[i] for i in sig.parameters if i != 'xp']) + + if wrapped_f.__doc__ is None: + wrapped_f.__doc__ = f"""\ +Array API compatibility wrapper for {f.__name__}. + +See the corresponding documentation in NumPy/CuPy and/or the array API +specification for more details. + +""" + wrapped_f.__signature__ = new_sig + return wrapped_f + + return inner diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3f44dd486cb373ba9cca450dc486d6ecb66352 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py @@ -0,0 +1 @@ +from ._helpers import * diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07f0b024cf5b834ce3b336c30b9d85f326add8af Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01be3cfb89d17f7824e5f253b062eb3e7e03a520 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d95a646eccdcd7b27592b6f05a8be16c1d501f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..076075b4452ec2e53a787be0d864a923b5594fd3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..841bfb2f3ed2602d8379f90d3aed347e49ce76c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..c057e71d3c85df1fd2abbe0251083cf7bee45213 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py @@ -0,0 +1,536 @@ +""" +These are functions that are just aliases of existing functions in NumPy. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional, Sequence, Tuple, Union, List + from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol + +from typing import NamedTuple +from types import ModuleType +import inspect + +from ._helpers import _check_device, _is_numpy_array, array_namespace + +# These functions are modified from the NumPy versions. + +def arange( + start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs) + +def empty( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.empty(shape, dtype=dtype, **kwargs) + +def empty_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.empty_like(x, dtype=dtype, **kwargs) + +def eye( + n_rows: int, + n_cols: Optional[int] = None, + /, + *, + xp, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs) + +def full( + shape: Union[int, Tuple[int, ...]], + fill_value: Union[int, float], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.full(shape, fill_value, dtype=dtype, **kwargs) + +def full_like( + x: ndarray, + /, + fill_value: Union[int, float], + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.full_like(x, fill_value, dtype=dtype, **kwargs) + +def linspace( + start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs) + +def ones( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.ones(shape, dtype=dtype, **kwargs) + +def ones_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.ones_like(x, dtype=dtype, **kwargs) + +def zeros( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.zeros(shape, dtype=dtype, **kwargs) + +def zeros_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.zeros_like(x, dtype=dtype, **kwargs) + +# np.unique() is split into four functions in the array API: +# unique_all, unique_counts, unique_inverse, and unique_values (this is done +# to remove polymorphic return types). + +# The functions here return namedtuples (np.unique() returns a normal +# tuple). +class UniqueAllResult(NamedTuple): + values: ndarray + indices: ndarray + inverse_indices: ndarray + counts: ndarray + + +class UniqueCountsResult(NamedTuple): + values: ndarray + counts: ndarray + + +class UniqueInverseResult(NamedTuple): + values: ndarray + inverse_indices: ndarray + + +def _unique_kwargs(xp): + # Older versions of NumPy and CuPy do not have equal_nan. Rather than + # trying to parse version numbers, just check if equal_nan is in the + # signature. + s = inspect.signature(xp.unique) + if 'equal_nan' in s.parameters: + return {'equal_nan': False} + return {} + +def unique_all(x: ndarray, /, xp) -> UniqueAllResult: + kwargs = _unique_kwargs(xp) + values, indices, inverse_indices, counts = xp.unique( + x, + return_counts=True, + return_index=True, + return_inverse=True, + **kwargs, + ) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueAllResult( + values, + indices, + inverse_indices, + counts, + ) + + +def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult: + kwargs = _unique_kwargs(xp) + res = xp.unique( + x, + return_counts=True, + return_index=False, + return_inverse=False, + **kwargs + ) + + return UniqueCountsResult(*res) + + +def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult: + kwargs = _unique_kwargs(xp) + values, inverse_indices = xp.unique( + x, + return_counts=False, + return_index=False, + return_inverse=True, + **kwargs, + ) + # xp.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueInverseResult(values, inverse_indices) + + +def unique_values(x: ndarray, /, xp) -> ndarray: + kwargs = _unique_kwargs(xp) + return xp.unique( + x, + return_counts=False, + return_index=False, + return_inverse=False, + **kwargs, + ) + +def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray: + if not copy and dtype == x.dtype: + return x + return x.astype(dtype=dtype, copy=copy) + +# These functions have different keyword argument names + +def std( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, # correction instead of ddof + keepdims: bool = False, + **kwargs, +) -> ndarray: + return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) + +def var( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, # correction instead of ddof + keepdims: bool = False, + **kwargs, +) -> ndarray: + return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) + +# Unlike transpose(), the axes argument to permute_dims() is required. +def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray: + return xp.transpose(x, axes) + +# Creation functions add the device keyword (which does nothing for NumPy) + +# asarray also adds the copy keyword +def _asarray( + obj: Union[ + ndarray, + bool, + int, + float, + NestedSequence[bool | int | float], + SupportsBufferProtocol, + ], + /, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + copy: "Optional[Union[bool, np._CopyMode]]" = None, + namespace = None, + **kwargs, +) -> ndarray: + """ + Array API compatibility wrapper for asarray(). + + See the corresponding documentation in NumPy/CuPy and/or the array API + specification for more details. + + """ + if namespace is None: + try: + xp = array_namespace(obj, _use_compat=False) + except ValueError: + # TODO: What about lists of arrays? + raise ValueError("A namespace must be specified for asarray() with non-array input") + elif isinstance(namespace, ModuleType): + xp = namespace + elif namespace == 'numpy': + import numpy as xp + elif namespace == 'cupy': + import cupy as xp + else: + raise ValueError("Unrecognized namespace argument to asarray()") + + _check_device(xp, device) + if _is_numpy_array(obj): + import numpy as np + if hasattr(np, '_CopyMode'): + # Not present in older NumPys + COPY_FALSE = (False, np._CopyMode.IF_NEEDED) + COPY_TRUE = (True, np._CopyMode.ALWAYS) + else: + COPY_FALSE = (False,) + COPY_TRUE = (True,) + else: + COPY_FALSE = (False,) + COPY_TRUE = (True,) + if copy in COPY_FALSE: + # copy=False is not yet implemented in xp.asarray + raise NotImplementedError("copy=False is not yet implemented") + if isinstance(obj, xp.ndarray): + if dtype is not None and obj.dtype != dtype: + copy = True + if copy in COPY_TRUE: + return xp.array(obj, copy=True, dtype=dtype) + return obj + + return xp.asarray(obj, dtype=dtype, **kwargs) + +# np.reshape calls the keyword argument 'newshape' instead of 'shape' +def reshape(x: ndarray, + /, + shape: Tuple[int, ...], + xp, copy: Optional[bool] = None, + **kwargs) -> ndarray: + if copy is True: + x = x.copy() + elif copy is False: + y = x.view() + y.shape = shape + return y + return xp.reshape(x, shape, **kwargs) + +# The descending keyword is new in sort and argsort, and 'kind' replaced with +# 'stable' +def argsort( + x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, + **kwargs, +) -> ndarray: + # Note: this keyword argument is different, and the default is different. + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + if not descending: + res = xp.argsort(x, axis=axis, **kwargs) + else: + # As NumPy has no native descending sort, we imitate it here. Note that + # simply flipping the results of xp.argsort(x, ...) would not + # respect the relative order like it would in native descending sorts. + res = xp.flip( + xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs), + axis=axis, + ) + # Rely on flip()/argsort() to validate axis + normalised_axis = axis if axis >= 0 else x.ndim + axis + max_i = x.shape[normalised_axis] - 1 + res = max_i - res + return res + +def sort( + x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, + **kwargs, +) -> ndarray: + # Note: this keyword argument is different, and the default is different. + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + res = xp.sort(x, axis=axis, **kwargs) + if descending: + res = xp.flip(res, axis=axis) + return res + +# nonzero should error for zero-dimensional arrays +def nonzero(x: ndarray, /, xp, **kwargs) -> Tuple[ndarray, ...]: + if x.ndim == 0: + raise ValueError("nonzero() does not support zero-dimensional arrays") + return xp.nonzero(x, **kwargs) + +# sum() and prod() should always upcast when dtype=None +def sum( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs, +) -> ndarray: + # `xp.sum` already upcasts integers, but not floats or complexes + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def prod( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs, +) -> ndarray: + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs) + +# ceil, floor, and trunc return integers for integer inputs + +def ceil(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.ceil(x, **kwargs) + +def floor(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.floor(x, **kwargs) + +def trunc(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.trunc(x, **kwargs) + +# linear algebra functions + +def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.matmul(x1, x2, **kwargs) + +# Unlike transpose, matrix_transpose only transposes the last two axes. +def matrix_transpose(x: ndarray, /, xp) -> ndarray: + if x.ndim < 2: + raise ValueError("x must be at least 2-dimensional for matrix_transpose") + return xp.swapaxes(x, -1, -2) + +def tensordot(x1: ndarray, + x2: ndarray, + /, + xp, + *, + axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, + **kwargs, +) -> ndarray: + return xp.tensordot(x1, x2, axes=axes, **kwargs) + +def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: + ndim = max(x1.ndim, x2.ndim) + x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) + x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) + if x1_shape[axis] != x2_shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + if hasattr(xp, 'broadcast_tensors'): + _broadcast = xp.broadcast_tensors + else: + _broadcast = xp.broadcast_arrays + + x1_, x2_ = _broadcast(x1, x2) + x1_ = xp.moveaxis(x1_, axis, -1) + x2_ = xp.moveaxis(x2_, axis, -1) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + +# isdtype is a new function in the 2022.12 array API specification. + +def isdtype( + dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp, + *, _tuple=True, # Disallow nested tuples +) -> bool: + """ + Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. + + Note that outside of this function, this compat library does not yet fully + support complex numbers. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + for more details + """ + if isinstance(kind, tuple) and _tuple: + return any(isdtype(dtype, k, xp, _tuple=False) for k in kind) + elif isinstance(kind, str): + if kind == 'bool': + return dtype == xp.bool_ + elif kind == 'signed integer': + return xp.issubdtype(dtype, xp.signedinteger) + elif kind == 'unsigned integer': + return xp.issubdtype(dtype, xp.unsignedinteger) + elif kind == 'integral': + return xp.issubdtype(dtype, xp.integer) + elif kind == 'real floating': + return xp.issubdtype(dtype, xp.floating) + elif kind == 'complex floating': + return xp.issubdtype(dtype, xp.complexfloating) + elif kind == 'numeric': + return xp.issubdtype(dtype, xp.number) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + # This will allow things that aren't required by the spec, like + # isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be + # more strict here to match the type annotation? Note that the + # numpy.array_api implementation will be very strict. + return dtype == kind + +__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', + 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like', + 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', + 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', + 'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort', + 'sort', 'nonzero', 'sum', 'prod', 'ceil', 'floor', 'trunc', + 'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype'] diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b0aef3f7328eac55b059fb2729b163c327e669 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py @@ -0,0 +1,232 @@ +""" +Various helper functions which are not part of the spec. + +Functions which start with an underscore are for internal use only but helpers +that are in __all__ are intended as additional helper functions for use by end +users of the compat library. +""" +from __future__ import annotations + +import sys +import math + +def _is_numpy_array(x): + # Avoid importing NumPy if it isn't already + if 'numpy' not in sys.modules: + return False + + import numpy as np + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, (np.ndarray, np.generic)) + +def _is_cupy_array(x): + # Avoid importing NumPy if it isn't already + if 'cupy' not in sys.modules: + return False + + import cupy as cp + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, (cp.ndarray, cp.generic)) + +def _is_torch_array(x): + # Avoid importing torch if it isn't already + if 'torch' not in sys.modules: + return False + + import torch + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, torch.Tensor) + +def is_array_api_obj(x): + """ + Check if x is an array API compatible array object. + """ + return _is_numpy_array(x) \ + or _is_cupy_array(x) \ + or _is_torch_array(x) \ + or hasattr(x, '__array_namespace__') + +def _check_api_version(api_version): + if api_version is not None and api_version != '2021.12': + raise ValueError("Only the 2021.12 version of the array API specification is currently supported") + +def array_namespace(*xs, api_version=None, _use_compat=True): + """ + Get the array API compatible namespace for the arrays `xs`. + + `xs` should contain one or more arrays. + + Typical usage is + + def your_function(x, y): + xp = array_api_compat.array_namespace(x, y) + # Now use xp as the array library namespace + return xp.mean(x, axis=0) + 2*xp.std(y, axis=0) + + api_version should be the newest version of the spec that you need support + for (currently the compat library wrapped APIs only support v2021.12). + """ + namespaces = set() + for x in xs: + if _is_numpy_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import numpy as numpy_namespace + namespaces.add(numpy_namespace) + else: + import numpy as np + namespaces.add(np) + elif _is_cupy_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import cupy as cupy_namespace + namespaces.add(cupy_namespace) + else: + import cupy as cp + namespaces.add(cp) + elif _is_torch_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import torch as torch_namespace + namespaces.add(torch_namespace) + else: + import torch + namespaces.add(torch) + elif hasattr(x, '__array_namespace__'): + namespaces.add(x.__array_namespace__(api_version=api_version)) + else: + # TODO: Support Python scalars? + raise TypeError(f"{type(x).__name__} is not a supported array type") + + if not namespaces: + raise TypeError("Unrecognized array input") + + if len(namespaces) != 1: + raise TypeError(f"Multiple namespaces for array inputs: {namespaces}") + + xp, = namespaces + + return xp + +# backwards compatibility alias +get_namespace = array_namespace + +def _check_device(xp, device): + if xp == sys.modules.get('numpy'): + if device not in ["cpu", None]: + raise ValueError(f"Unsupported device for NumPy: {device!r}") + +# device() is not on numpy.ndarray and and to_device() is not on numpy.ndarray +# or cupy.ndarray. They are not included in array objects of this library +# because this library just reuses the respective ndarray classes without +# wrapping or subclassing them. These helper functions can be used instead of +# the wrapper functions for libraries that need to support both NumPy/CuPy and +# other libraries that use devices. +def device(x: "Array", /) -> "Device": + """ + Hardware device the array data resides on. + + Parameters + ---------- + x: array + array instance from NumPy or an array API compatible library. + + Returns + ------- + out: device + a ``device`` object (see the "Device Support" section of the array API specification). + """ + if _is_numpy_array(x): + return "cpu" + return x.device + +# Based on cupy.array_api.Array.to_device +def _cupy_to_device(x, device, /, stream=None): + import cupy as cp + from cupy.cuda import Device as _Device + from cupy.cuda import stream as stream_module + from cupy_backends.cuda.api import runtime + + if device == x.device: + return x + elif device == "cpu": + # allowing us to use `to_device(x, "cpu")` + # is useful for portable test swapping between + # host and device backends + return x.get() + elif not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + else: + # see cupy/cupy#5985 for the reason how we handle device/stream here + prev_device = runtime.getDevice() + prev_stream: stream_module.Stream = None + if stream is not None: + prev_stream = stream_module.get_current_stream() + # stream can be an int as specified in __dlpack__, or a CuPy stream + if isinstance(stream, int): + stream = cp.cuda.ExternalStream(stream) + elif isinstance(stream, cp.cuda.Stream): + pass + else: + raise ValueError('the input stream is not recognized') + stream.use() + try: + runtime.setDevice(device.id) + arr = x.copy() + finally: + runtime.setDevice(prev_device) + if stream is not None: + prev_stream.use() + return arr + +def _torch_to_device(x, device, /, stream=None): + if stream is not None: + raise NotImplementedError + return x.to(device) + +def to_device(x: "Array", device: "Device", /, *, stream: "Optional[Union[int, Any]]" = None) -> "Array": + """ + Copy the array from the device on which it currently resides to the specified ``device``. + + Parameters + ---------- + x: array + array instance from NumPy or an array API compatible library. + device: device + a ``device`` object (see the "Device Support" section of the array API specification). + stream: Optional[Union[int, Any]] + stream object to use during copy. In addition to the types supported in ``array.__dlpack__``, implementations may choose to support any library-specific stream object with the caveat that any code using such an object would not be portable. + + Returns + ------- + out: array + an array with the same data and data type as ``x`` and located on the specified ``device``. + + .. note:: + If ``stream`` is given, the copy operation should be enqueued on the provided ``stream``; otherwise, the copy operation should be enqueued on the default stream/queue. Whether the copy is performed synchronously or asynchronously is implementation-dependent. Accordingly, if synchronization is required to guarantee data safety, this must be clearly explained in a conforming library's documentation. + """ + if _is_numpy_array(x): + if stream is not None: + raise ValueError("The stream argument to to_device() is not supported") + if device == 'cpu': + return x + raise ValueError(f"Unsupported device {device!r}") + elif _is_cupy_array(x): + # cupy does not yet have to_device + return _cupy_to_device(x, device, stream=stream) + elif _is_torch_array(x): + return _torch_to_device(x, device, stream=stream) + return x.to_device(device, stream=stream) + +def size(x): + """ + Return the total number of elements of x + """ + if None in x.shape: + return None + return math.prod(x.shape) + +__all__ = ['is_array_api_obj', 'array_namespace', 'get_namespace', 'device', 'to_device', 'size'] diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5b55d1a7202870a3de71d797f0e62a86af3dc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, NamedTuple +if TYPE_CHECKING: + from typing import Literal, Optional, Sequence, Tuple, Union + from ._typing import ndarray + +import numpy as np +if np.__version__[0] == "2": + from numpy.lib.array_utils import normalize_axis_tuple +else: + from numpy.core.numeric import normalize_axis_tuple + +from ._aliases import matmul, matrix_transpose, tensordot, vecdot, isdtype +from .._internal import get_xp + +# These are in the main NumPy namespace but not in numpy.linalg +def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray: + return xp.cross(x1, x2, axis=axis, **kwargs) + +def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.outer(x1, x2, **kwargs) + +class EighResult(NamedTuple): + eigenvalues: ndarray + eigenvectors: ndarray + +class QRResult(NamedTuple): + Q: ndarray + R: ndarray + +class SlogdetResult(NamedTuple): + sign: ndarray + logabsdet: ndarray + +class SVDResult(NamedTuple): + U: ndarray + S: ndarray + Vh: ndarray + +# These functions are the same as their NumPy counterparts except they return +# a namedtuple. +def eigh(x: ndarray, /, xp, **kwargs) -> EighResult: + return EighResult(*xp.linalg.eigh(x, **kwargs)) + +def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced', + **kwargs) -> QRResult: + return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs)) + +def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult: + return SlogdetResult(*xp.linalg.slogdet(x, **kwargs)) + +def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult: + return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs)) + +# These functions have additional keyword arguments + +# The upper keyword argument is new from NumPy +def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray: + L = xp.linalg.cholesky(x, **kwargs) + if upper: + U = get_xp(xp)(matrix_transpose)(L) + if get_xp(xp)(isdtype)(U.dtype, 'complex floating'): + U = xp.conj(U) + return U + return L + +# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy. +# Note that it has a different semantic meaning from tol and rcond. +def matrix_rank(x: ndarray, + /, + xp, + *, + rtol: Optional[Union[float, ndarray]] = None, + **kwargs) -> ndarray: + # this is different from xp.linalg.matrix_rank, which supports 1 + # dimensional arrays. + if x.ndim < 2: + raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional") + S = xp.linalg.svd(x, compute_uv=False, **kwargs) + if rtol is None: + tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps + else: + # this is different from xp.linalg.matrix_rank, which does not + # multiply the tolerance by the largest singular value. + tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis] + return xp.count_nonzero(S > tol, axis=-1) + +def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray: + # this is different from xp.linalg.pinv, which does not multiply the + # default tolerance by max(M, N). + if rtol is None: + rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps + return xp.linalg.pinv(x, rcond=rtol, **kwargs) + +# These functions are new in the array API spec + +def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray: + return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + +# svdvals is not in NumPy (but it is in SciPy). It is equivalent to +# xp.linalg.svd(compute_uv=False). +def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]: + return xp.linalg.svd(x, compute_uv=False) + +def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray: + # xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or + # when axis=None and the input is 2-D, so to force a vector norm, we make + # it so the input is 1-D (for axis=None), or reshape so that norm is done + # on a single dimension. + if axis is None: + # Note: xp.linalg.norm() doesn't handle 0-D arrays + x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # xp.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + x = xp.transpose(x, newshape).reshape( + (xp.prod([x.shape[i] for i in axis], dtype=int), *[x.shape[i] for i in rest])) + _axis = 0 + else: + _axis = axis + + res = xp.linalg.norm(x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + shape = list(x.shape) + _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim) + for i in _axis: + shape[i] = 1 + res = xp.reshape(res, tuple(shape)) + + return res + +# xp.diagonal and xp.trace operate on the first two axes whereas these +# operates on the last two + +def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray: + return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs) + +def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray: + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs)) + +__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult', + 'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet', + 'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm', + 'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal', + 'trace'] diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..3f17806094baa04355abe360bb0fc7792ea6e1bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +__all__ = [ + "NestedSequence", + "SupportsBufferProtocol", +] + +from typing import ( + Any, + TypeVar, + Protocol, +) + +_T_co = TypeVar("_T_co", covariant=True) + +class NestedSequence(Protocol[_T_co]): + def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ... + def __len__(self, /) -> int: ... + +SupportsBufferProtocol = Any diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec113f9d6f55d19abb31507cc32cd7545ce0da55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py @@ -0,0 +1,16 @@ +from cupy import * + +# from cupy import * doesn't overwrite these builtin names +from cupy import abs, max, min, round + +# These imports may overwrite names from the import * above. +from ._aliases import * + +# See the comment in the numpy __init__.py +__import__(__package__ + '.linalg') + +from .linalg import matrix_transpose, vecdot + +from ..common._helpers import * + +__array_api_version__ = '2022.12' diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4a495ba8d8879fb5d17e96ccc9c37745a3a604c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d076dd0f7bec0e59b795851bea71d9fba48f606 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716a61661ba62d18ab90897870f8007a0928c409 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4f5fe7905178605abfc2a26c7126de88122cadc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d3cda968db731c849776bee8df74e28d572ffa --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from functools import partial + +from ..common import _aliases + +from .._internal import get_xp + +asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy') +asarray.__doc__ = _aliases._asarray.__doc__ +del partial + +import cupy as cp +bool = cp.bool_ + +# Basic renames +acos = cp.arccos +acosh = cp.arccosh +asin = cp.arcsin +asinh = cp.arcsinh +atan = cp.arctan +atan2 = cp.arctan2 +atanh = cp.arctanh +bitwise_left_shift = cp.left_shift +bitwise_invert = cp.invert +bitwise_right_shift = cp.right_shift +concat = cp.concatenate +pow = cp.power + +arange = get_xp(cp)(_aliases.arange) +empty = get_xp(cp)(_aliases.empty) +empty_like = get_xp(cp)(_aliases.empty_like) +eye = get_xp(cp)(_aliases.eye) +full = get_xp(cp)(_aliases.full) +full_like = get_xp(cp)(_aliases.full_like) +linspace = get_xp(cp)(_aliases.linspace) +ones = get_xp(cp)(_aliases.ones) +ones_like = get_xp(cp)(_aliases.ones_like) +zeros = get_xp(cp)(_aliases.zeros) +zeros_like = get_xp(cp)(_aliases.zeros_like) +UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult) +UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult) +UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult) +unique_all = get_xp(cp)(_aliases.unique_all) +unique_counts = get_xp(cp)(_aliases.unique_counts) +unique_inverse = get_xp(cp)(_aliases.unique_inverse) +unique_values = get_xp(cp)(_aliases.unique_values) +astype = _aliases.astype +std = get_xp(cp)(_aliases.std) +var = get_xp(cp)(_aliases.var) +permute_dims = get_xp(cp)(_aliases.permute_dims) +reshape = get_xp(cp)(_aliases.reshape) +argsort = get_xp(cp)(_aliases.argsort) +sort = get_xp(cp)(_aliases.sort) +nonzero = get_xp(cp)(_aliases.nonzero) +sum = get_xp(cp)(_aliases.sum) +prod = get_xp(cp)(_aliases.prod) +ceil = get_xp(cp)(_aliases.ceil) +floor = get_xp(cp)(_aliases.floor) +trunc = get_xp(cp)(_aliases.trunc) +matmul = get_xp(cp)(_aliases.matmul) +matrix_transpose = get_xp(cp)(_aliases.matrix_transpose) +tensordot = get_xp(cp)(_aliases.tensordot) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(cp, 'vecdot'): + vecdot = cp.vecdot +else: + vecdot = get_xp(cp)(_aliases.vecdot) +if hasattr(cp, 'isdtype'): + isdtype = cp.isdtype +else: + isdtype = get_xp(cp)(_aliases.isdtype) + +__all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos', + 'acosh', 'asin', 'asinh', 'atan', 'atan2', + 'atanh', 'bitwise_left_shift', 'bitwise_invert', + 'bitwise_right_shift', 'concat', 'pow'] diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d9aab67e52f3300cd96c3d0e701d1604eaccbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +__all__ = [ + "ndarray", + "Device", + "Dtype", +] + +import sys +from typing import ( + Union, + TYPE_CHECKING, +) + +from cupy import ( + ndarray, + dtype, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, +) + +from cupy.cuda.device import Device + +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..84752e1a58ed4900ef43a99ab248342212290a43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py @@ -0,0 +1,47 @@ +from cupy.linalg import * +# cupy.linalg doesn't have __all__. If it is added, replace this with +# +# from cupy.linalg import __all__ as linalg_all +_n = {} +exec('from cupy.linalg import *', _n) +del _n['__builtins__'] +linalg_all = list(_n) +del _n + +from ..common import _linalg +from .._internal import get_xp +from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) + +import cupy as cp + +cross = get_xp(cp)(_linalg.cross) +outer = get_xp(cp)(_linalg.outer) +EighResult = _linalg.EighResult +QRResult = _linalg.QRResult +SlogdetResult = _linalg.SlogdetResult +SVDResult = _linalg.SVDResult +eigh = get_xp(cp)(_linalg.eigh) +qr = get_xp(cp)(_linalg.qr) +slogdet = get_xp(cp)(_linalg.slogdet) +svd = get_xp(cp)(_linalg.svd) +cholesky = get_xp(cp)(_linalg.cholesky) +matrix_rank = get_xp(cp)(_linalg.matrix_rank) +pinv = get_xp(cp)(_linalg.pinv) +matrix_norm = get_xp(cp)(_linalg.matrix_norm) +svdvals = get_xp(cp)(_linalg.svdvals) +diagonal = get_xp(cp)(_linalg.diagonal) +trace = get_xp(cp)(_linalg.trace) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(cp.linalg, 'vector_norm'): + vector_norm = cp.linalg.vector_norm +else: + vector_norm = get_xp(cp)(_linalg.vector_norm) + +__all__ = linalg_all + _linalg.__all__ + +del get_xp +del cp +del linalg_all +del _linalg diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a49f2f14bc26c3d6e9fdd115dee6499263d5cba --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py @@ -0,0 +1,22 @@ +from numpy import * + +# from numpy import * doesn't overwrite these builtin names +from numpy import abs, max, min, round + +# These imports may overwrite names from the import * above. +from ._aliases import * + +# Don't know why, but we have to do an absolute import to import linalg. If we +# instead do +# +# from . import linalg +# +# It doesn't overwrite np.linalg from above. The import is generated +# dynamically so that the library can be vendored. +__import__(__package__ + '.linalg') + +from .linalg import matrix_transpose, vecdot + +from ..common._helpers import * + +__array_api_version__ = '2022.12' diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42887727b7263effb2e97312517fd03971d55fa5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66941443477ae67bcd16c84069e0d2ba0745e574 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b7e5d52a6d77ef10f667c2b55560c6602c5bf9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b94d7ae45cdb9184e0c8db21dd9158f070d6bc5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..e7d4a1be2f8001f804d338043d638c32c3958d3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from functools import partial + +from ..common import _aliases + +from .._internal import get_xp + +asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy') +asarray.__doc__ = _aliases._asarray.__doc__ +del partial + +import numpy as np +bool = np.bool_ + +# Basic renames +acos = np.arccos +acosh = np.arccosh +asin = np.arcsin +asinh = np.arcsinh +atan = np.arctan +atan2 = np.arctan2 +atanh = np.arctanh +bitwise_left_shift = np.left_shift +bitwise_invert = np.invert +bitwise_right_shift = np.right_shift +concat = np.concatenate +pow = np.power + +arange = get_xp(np)(_aliases.arange) +empty = get_xp(np)(_aliases.empty) +empty_like = get_xp(np)(_aliases.empty_like) +eye = get_xp(np)(_aliases.eye) +full = get_xp(np)(_aliases.full) +full_like = get_xp(np)(_aliases.full_like) +linspace = get_xp(np)(_aliases.linspace) +ones = get_xp(np)(_aliases.ones) +ones_like = get_xp(np)(_aliases.ones_like) +zeros = get_xp(np)(_aliases.zeros) +zeros_like = get_xp(np)(_aliases.zeros_like) +UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult) +UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult) +UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult) +unique_all = get_xp(np)(_aliases.unique_all) +unique_counts = get_xp(np)(_aliases.unique_counts) +unique_inverse = get_xp(np)(_aliases.unique_inverse) +unique_values = get_xp(np)(_aliases.unique_values) +astype = _aliases.astype +std = get_xp(np)(_aliases.std) +var = get_xp(np)(_aliases.var) +permute_dims = get_xp(np)(_aliases.permute_dims) +reshape = get_xp(np)(_aliases.reshape) +argsort = get_xp(np)(_aliases.argsort) +sort = get_xp(np)(_aliases.sort) +nonzero = get_xp(np)(_aliases.nonzero) +sum = get_xp(np)(_aliases.sum) +prod = get_xp(np)(_aliases.prod) +ceil = get_xp(np)(_aliases.ceil) +floor = get_xp(np)(_aliases.floor) +trunc = get_xp(np)(_aliases.trunc) +matmul = get_xp(np)(_aliases.matmul) +matrix_transpose = get_xp(np)(_aliases.matrix_transpose) +tensordot = get_xp(np)(_aliases.tensordot) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(np, 'vecdot'): + vecdot = np.vecdot +else: + vecdot = get_xp(np)(_aliases.vecdot) +if hasattr(np, 'isdtype'): + isdtype = np.isdtype +else: + isdtype = get_xp(np)(_aliases.isdtype) + +__all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos', + 'acosh', 'asin', 'asinh', 'atan', 'atan2', + 'atanh', 'bitwise_left_shift', 'bitwise_invert', + 'bitwise_right_shift', 'concat', 'pow'] diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ebb5abb987572be625ee864a37e61126d36d8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +__all__ = [ + "ndarray", + "Device", + "Dtype", +] + +import sys +from typing import ( + Literal, + Union, + TYPE_CHECKING, +) + +from numpy import ( + ndarray, + dtype, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, +) + +Device = Literal["cpu"] +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..39997df81e171ae44ae5a149e53c9ccacb118ae9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py @@ -0,0 +1,40 @@ +from numpy.linalg import * +from numpy.linalg import __all__ as linalg_all + +from ..common import _linalg +from .._internal import get_xp +from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) + +import numpy as np + +cross = get_xp(np)(_linalg.cross) +outer = get_xp(np)(_linalg.outer) +EighResult = _linalg.EighResult +QRResult = _linalg.QRResult +SlogdetResult = _linalg.SlogdetResult +SVDResult = _linalg.SVDResult +eigh = get_xp(np)(_linalg.eigh) +qr = get_xp(np)(_linalg.qr) +slogdet = get_xp(np)(_linalg.slogdet) +svd = get_xp(np)(_linalg.svd) +cholesky = get_xp(np)(_linalg.cholesky) +matrix_rank = get_xp(np)(_linalg.matrix_rank) +pinv = get_xp(np)(_linalg.pinv) +matrix_norm = get_xp(np)(_linalg.matrix_norm) +svdvals = get_xp(np)(_linalg.svdvals) +diagonal = get_xp(np)(_linalg.diagonal) +trace = get_xp(np)(_linalg.trace) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(np.linalg, 'vector_norm'): + vector_norm = np.linalg.vector_norm +else: + vector_norm = get_xp(np)(_linalg.vector_norm) + +__all__ = linalg_all + _linalg.__all__ + +del get_xp +del np +del linalg_all +del _linalg diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae53ec52dee920bf7a7f2b4758f8ad787f7e6784 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py @@ -0,0 +1,22 @@ +from torch import * + +# Several names are not included in the above import * +import torch +for n in dir(torch): + if (n.startswith('_') + or n.endswith('_') + or 'cuda' in n + or 'cpu' in n + or 'backward' in n): + continue + exec(n + ' = torch.' + n) + +# These imports may overwrite names from the import * above. +from ._aliases import * + +# See the comment in the numpy __init__.py +__import__(__package__ + '.linalg') + +from ..common._helpers import * + +__array_api_version__ = '2022.12' diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc1afb254a2844b8ff9932200fa2f241c7f5178c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26af6c458d93d4fd2f580aa85cd36ae4eb2be15a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..980ed5861ee4fb50c14d7b07e1938811775c8ac4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..929d31aa81ea9f019d0601c8c69213264efa75c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py @@ -0,0 +1,707 @@ +from __future__ import annotations + +from functools import wraps +from builtins import all as builtin_all, any as builtin_any + +from ..common._aliases import (UniqueAllResult, UniqueCountsResult, + UniqueInverseResult, + matrix_transpose as _aliases_matrix_transpose, + vecdot as _aliases_vecdot) +from .._internal import get_xp + +import torch + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import List, Optional, Sequence, Tuple, Union + from ..common._typing import Device + from torch import dtype as Dtype + + array = torch.Tensor + +_int_dtypes = { + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, +} + +_array_api_dtypes = { + torch.bool, + *_int_dtypes, + torch.float32, + torch.float64, + torch.complex64, + torch.complex128, +} + +_promotion_table = { + # bool + (torch.bool, torch.bool): torch.bool, + # ints + (torch.int8, torch.int8): torch.int8, + (torch.int8, torch.int16): torch.int16, + (torch.int8, torch.int32): torch.int32, + (torch.int8, torch.int64): torch.int64, + (torch.int16, torch.int8): torch.int16, + (torch.int16, torch.int16): torch.int16, + (torch.int16, torch.int32): torch.int32, + (torch.int16, torch.int64): torch.int64, + (torch.int32, torch.int8): torch.int32, + (torch.int32, torch.int16): torch.int32, + (torch.int32, torch.int32): torch.int32, + (torch.int32, torch.int64): torch.int64, + (torch.int64, torch.int8): torch.int64, + (torch.int64, torch.int16): torch.int64, + (torch.int64, torch.int32): torch.int64, + (torch.int64, torch.int64): torch.int64, + # uints + (torch.uint8, torch.uint8): torch.uint8, + # ints and uints (mixed sign) + (torch.int8, torch.uint8): torch.int16, + (torch.int16, torch.uint8): torch.int16, + (torch.int32, torch.uint8): torch.int32, + (torch.int64, torch.uint8): torch.int64, + (torch.uint8, torch.int8): torch.int16, + (torch.uint8, torch.int16): torch.int16, + (torch.uint8, torch.int32): torch.int32, + (torch.uint8, torch.int64): torch.int64, + # floats + (torch.float32, torch.float32): torch.float32, + (torch.float32, torch.float64): torch.float64, + (torch.float64, torch.float32): torch.float64, + (torch.float64, torch.float64): torch.float64, + # complexes + (torch.complex64, torch.complex64): torch.complex64, + (torch.complex64, torch.complex128): torch.complex128, + (torch.complex128, torch.complex64): torch.complex128, + (torch.complex128, torch.complex128): torch.complex128, + # Mixed float and complex + (torch.float32, torch.complex64): torch.complex64, + (torch.float32, torch.complex128): torch.complex128, + (torch.float64, torch.complex64): torch.complex128, + (torch.float64, torch.complex128): torch.complex128, +} + + +def _two_arg(f): + @wraps(f) + def _f(x1, x2, /, **kwargs): + x1, x2 = _fix_promotion(x1, x2) + return f(x1, x2, **kwargs) + if _f.__doc__ is None: + _f.__doc__ = f"""\ +Array API compatibility wrapper for torch.{f.__name__}. + +See the corresponding PyTorch documentation and/or the array API specification +for more details. + +""" + return _f + +def _fix_promotion(x1, x2, only_scalar=True): + if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes: + return x1, x2 + # If an argument is 0-D pytorch downcasts the other argument + if not only_scalar or x1.shape == (): + dtype = result_type(x1, x2) + x2 = x2.to(dtype) + if not only_scalar or x2.shape == (): + dtype = result_type(x1, x2) + x1 = x1.to(dtype) + return x1, x2 + +def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: + if len(arrays_and_dtypes) == 0: + raise TypeError("At least one array or dtype must be provided") + if len(arrays_and_dtypes) == 1: + x = arrays_and_dtypes[0] + if isinstance(x, torch.dtype): + return x + return x.dtype + if len(arrays_and_dtypes) > 2: + return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:])) + + x, y = arrays_and_dtypes + xdt = x.dtype if not isinstance(x, torch.dtype) else x + ydt = y.dtype if not isinstance(y, torch.dtype) else y + + if (xdt, ydt) in _promotion_table: + return _promotion_table[xdt, ydt] + + # This doesn't result_type(dtype, dtype) for non-array API dtypes + # because torch.result_type only accepts tensors. This does however, allow + # cross-kind promotion. + x = torch.tensor([], dtype=x) if isinstance(x, torch.dtype) else x + y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y + return torch.result_type(x, y) + +def can_cast(from_: Union[Dtype, array], to: Dtype, /) -> bool: + if not isinstance(from_, torch.dtype): + from_ = from_.dtype + return torch.can_cast(from_, to) + +# Basic renames +bitwise_invert = torch.bitwise_not +newaxis = None + +# Two-arg elementwise functions +# These require a wrapper to do the correct type promotion on 0-D tensors +add = _two_arg(torch.add) +atan2 = _two_arg(torch.atan2) +bitwise_and = _two_arg(torch.bitwise_and) +bitwise_left_shift = _two_arg(torch.bitwise_left_shift) +bitwise_or = _two_arg(torch.bitwise_or) +bitwise_right_shift = _two_arg(torch.bitwise_right_shift) +bitwise_xor = _two_arg(torch.bitwise_xor) +divide = _two_arg(torch.divide) +# Also a rename. torch.equal does not broadcast +equal = _two_arg(torch.eq) +floor_divide = _two_arg(torch.floor_divide) +greater = _two_arg(torch.greater) +greater_equal = _two_arg(torch.greater_equal) +less = _two_arg(torch.less) +less_equal = _two_arg(torch.less_equal) +logaddexp = _two_arg(torch.logaddexp) +# logical functions are not included here because they only accept bool in the +# spec, so type promotion is irrelevant. +multiply = _two_arg(torch.multiply) +not_equal = _two_arg(torch.not_equal) +pow = _two_arg(torch.pow) +remainder = _two_arg(torch.remainder) +subtract = _two_arg(torch.subtract) + +# These wrappers are mostly based on the fact that pytorch uses 'dim' instead +# of 'axis'. + +# torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745 +def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + return torch.amax(x, axis, keepdims=keepdims) + +def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + return torch.amin(x, axis, keepdims=keepdims) + +# torch.sort also returns a tuple +# https://github.com/pytorch/pytorch/issues/70921 +def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> array: + return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values + +def _normalize_axes(axis, ndim): + axes = [] + if ndim == 0 and axis: + # Better error message in this case + raise IndexError(f"Dimension out of range: {axis[0]}") + lower, upper = -ndim, ndim - 1 + for a in axis: + if a < lower or a > upper: + # Match torch error message (e.g., from sum()) + raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}") + if a < 0: + a = a + ndim + if a in axes: + # Use IndexError instead of RuntimeError, and "axis" instead of "dim" + raise IndexError(f"Axis {a} appears multiple times in the list of axes") + axes.append(a) + return sorted(axes) + +def _axis_none_keepdims(x, ndim, keepdims): + # Apply keepdims when axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + # Note that this is only valid for the axis=None case. + if keepdims: + for i in range(ndim): + x = torch.unsqueeze(x, 0) + return x + +def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs): + # Some reductions don't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + axes = _normalize_axes(axis, x.ndim) + for a in reversed(axes): + x = torch.movedim(x, a, -1) + x = torch.flatten(x, -len(axes)) + + out = f(x, -1, **kwargs) + + if keepdims: + for a in axes: + out = torch.unsqueeze(out, a) + return out + +def prod(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. Separate from the logic + # below because it still needs to upcast. + if axis == (): + if dtype is None: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: + return x.to(torch.int64) + return x.clone() + return x.to(dtype) + + # torch.prod doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.prod(x, dtype=dtype, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res + + return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + + +def sum(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. + # Make sure it upcasts. + if axis == (): + if dtype is None: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: + return x.to(torch.int64) + return x.clone() + return x.to(dtype) + + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.sum(x, dtype=dtype, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res + + return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def any(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + # torch.any doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.any(x, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.any doesn't return bool for uint8 + return torch.any(x, axis, keepdims=keepdims).to(torch.bool) + +def all(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + # torch.all doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.all(x, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.all doesn't return bool for uint8 + return torch.all(x, axis, keepdims=keepdims).to(torch.bool) + +def mean(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.mean(x, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.mean(x, axis, keepdims=keepdims, **kwargs) + +def std(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + if isinstance(correction, float): + _correction = int(correction) + if correction != _correction: + raise NotImplementedError("float correction in torch std() is not yet supported") + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.std(x, tuple(range(x.ndim)), correction=_correction, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.std(x, axis, correction=_correction, keepdims=keepdims, **kwargs) + +def var(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + # if isinstance(correction, float): + # correction = int(correction) + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs) + +# torch.concat doesn't support dim=None +# https://github.com/pytorch/pytorch/issues/70925 +def concat(arrays: Union[Tuple[array, ...], List[array]], + /, + *, + axis: Optional[int] = 0, + **kwargs) -> array: + if axis is None: + arrays = tuple(ar.flatten() for ar in arrays) + axis = 0 + return torch.concat(arrays, axis, **kwargs) + +# torch.squeeze only accepts int dim and doesn't require it +# https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was +# added at https://github.com/pytorch/pytorch/pull/89017. +def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: + if isinstance(axis, int): + axis = (axis,) + for a in axis: + if x.shape[a] != 1: + raise ValueError("squeezed dimensions must be equal to 1") + axes = _normalize_axes(axis, x.ndim) + # Remove this once pytorch 1.14 is released with the above PR #89017. + sequence = [a - i for i, a in enumerate(axes)] + for a in sequence: + x = torch.squeeze(x, a) + return x + +# torch.broadcast_to uses size instead of shape +def broadcast_to(x: array, /, shape: Tuple[int, ...], **kwargs) -> array: + return torch.broadcast_to(x, shape, **kwargs) + +# torch.permute uses dims instead of axes +def permute_dims(x: array, /, axes: Tuple[int, ...]) -> array: + return torch.permute(x, axes) + +# The axis parameter doesn't work for flip() and roll() +# https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't +# accept axis=None +def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: + if axis is None: + axis = tuple(range(x.ndim)) + # torch.flip doesn't accept dim as an int but the method does + # https://github.com/pytorch/pytorch/issues/18095 + return x.flip(axis, **kwargs) + +def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: + return torch.roll(x, shift, axis, **kwargs) + +def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]: + if x.ndim == 0: + raise ValueError("nonzero() does not support zero-dimensional arrays") + return torch.nonzero(x, as_tuple=True, **kwargs) + +def where(condition: array, x1: array, x2: array, /) -> array: + x1, x2 = _fix_promotion(x1, x2) + return torch.where(condition, x1, x2) + +# torch.reshape doesn't have the copy keyword +def reshape(x: array, + /, + shape: Tuple[int, ...], + copy: Optional[bool] = None, + **kwargs) -> array: + if copy is not None: + raise NotImplementedError("torch.reshape doesn't yet support the copy keyword") + return torch.reshape(x, shape, **kwargs) + +# torch.arange doesn't support returning empty arrays +# (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some +# keyword argument combinations +# (https://github.com/pytorch/pytorch/issues/70914) +def arange(start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if stop is None: + start, stop = 0, start + if step > 0 and stop <= start or step < 0 and stop >= start: + if dtype is None: + if builtin_all(isinstance(i, int) for i in [start, stop, step]): + dtype = torch.int64 + else: + dtype = torch.float32 + return torch.empty(0, dtype=dtype, device=device, **kwargs) + return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs) + +# torch.eye does not accept None as a default for the second argument and +# doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910) +def eye(n_rows: int, + n_cols: Optional[int] = None, + /, + *, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if n_cols is None: + n_cols = n_rows + z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs) + if abs(k) <= n_rows + n_cols: + z.diagonal(k).fill_(1) + return z + +# torch.linspace doesn't have the endpoint parameter +def linspace(start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs) -> array: + if not endpoint: + return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1] + return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs) + +# torch.full does not accept an int size +# https://github.com/pytorch/pytorch/issues/70906 +def full(shape: Union[int, Tuple[int, ...]], + fill_value: Union[bool, int, float, complex], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if isinstance(shape, int): + shape = (shape,) + + return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) + +# ones, zeros, and empty do not accept shape as a keyword argument +def ones(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.ones(shape, dtype=dtype, device=device, **kwargs) + +def zeros(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.zeros(shape, dtype=dtype, device=device, **kwargs) + +def empty(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.empty(shape, dtype=dtype, device=device, **kwargs) + +# tril and triu do not call the keyword argument k + +def tril(x: array, /, *, k: int = 0) -> array: + return torch.tril(x, k) + +def triu(x: array, /, *, k: int = 0) -> array: + return torch.triu(x, k) + +# Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742 +def expand_dims(x: array, /, *, axis: int = 0) -> array: + return torch.unsqueeze(x, axis) + +def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: + return x.to(dtype, copy=copy) + +def broadcast_arrays(*arrays: array) -> List[array]: + shape = torch.broadcast_shapes(*[a.shape for a in arrays]) + return [torch.broadcast_to(a, shape) for a in arrays] + +# https://github.com/pytorch/pytorch/issues/70920 +def unique_all(x: array) -> UniqueAllResult: + # torch.unique doesn't support returning indices. + # https://github.com/pytorch/pytorch/issues/36748. The workaround + # suggested in that issue doesn't actually function correctly (it relies + # on non-deterministic behavior of scatter()). + raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)") + + # values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) + # # torch.unique incorrectly gives a 0 count for nan values. + # # https://github.com/pytorch/pytorch/issues/94106 + # counts[torch.isnan(values)] = 1 + # return UniqueAllResult(values, indices, inverse_indices, counts) + +def unique_counts(x: array) -> UniqueCountsResult: + values, counts = torch.unique(x, return_counts=True) + + # torch.unique incorrectly gives a 0 count for nan values. + # https://github.com/pytorch/pytorch/issues/94106 + counts[torch.isnan(values)] = 1 + return UniqueCountsResult(values, counts) + +def unique_inverse(x: array) -> UniqueInverseResult: + values, inverse = torch.unique(x, return_inverse=True) + return UniqueInverseResult(values, inverse) + +def unique_values(x: array) -> array: + return torch.unique(x) + +def matmul(x1: array, x2: array, /, **kwargs) -> array: + # torch.matmul doesn't type promote (but differently from _fix_promotion) + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.matmul(x1, x2, **kwargs) + +matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) +_vecdot = get_xp(torch)(_aliases_vecdot) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return _vecdot(x1, x2, axis=axis) + +# torch.tensordot uses dims instead of axes +def tensordot(x1: array, x2: array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs) -> array: + # Note: torch.tensordot fails with integer dtypes when there is only 1 + # element in the axis (https://github.com/pytorch/pytorch/issues/84530). + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.tensordot(x1, x2, dims=axes, **kwargs) + + +def isdtype( + dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], + *, _tuple=True, # Disallow nested tuples +) -> bool: + """ + Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. + + Note that outside of this function, this compat library does not yet fully + support complex numbers. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + for more details + """ + if isinstance(kind, tuple) and _tuple: + return builtin_any(isdtype(dtype, k, _tuple=False) for k in kind) + elif isinstance(kind, str): + if kind == 'bool': + return dtype == torch.bool + elif kind == 'signed integer': + return dtype in _int_dtypes and dtype.is_signed + elif kind == 'unsigned integer': + return dtype in _int_dtypes and not dtype.is_signed + elif kind == 'integral': + return dtype in _int_dtypes + elif kind == 'real floating': + return dtype.is_floating_point + elif kind == 'complex floating': + return dtype.is_complex + elif kind == 'numeric': + return isdtype(dtype, ('integral', 'real floating', 'complex floating')) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + return dtype == kind + +def take(x: array, indices: array, /, *, axis: Optional[int] = None, **kwargs) -> array: + if axis is None: + if x.ndim != 1: + raise ValueError("axis must be specified when ndim > 1") + axis = 0 + return torch.index_select(x, axis, indices, **kwargs) + +__all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'newaxis', + 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', + 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', + 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', + 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', + 'subtract', 'max', 'min', 'sort', 'prod', 'sum', 'any', 'all', + 'mean', 'std', 'var', 'concat', 'squeeze', 'broadcast_to', 'flip', 'roll', + 'nonzero', 'where', 'reshape', 'arange', 'eye', 'linspace', 'full', + 'ones', 'zeros', 'empty', 'tril', 'triu', 'expand_dims', 'astype', + 'broadcast_arrays', 'unique_all', 'unique_counts', + 'unique_inverse', 'unique_values', 'matmul', 'matrix_transpose', + 'vecdot', 'tensordot', 'isdtype', 'take'] diff --git a/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..5266739106bd419515eacf17e5a9dc0ad519589d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + import torch + array = torch.Tensor + from torch import dtype as Dtype + from typing import Optional + +from torch.linalg import * + +# torch.linalg doesn't define __all__ +# from torch.linalg import __all__ as linalg_all +from torch import linalg as torch_linalg +linalg_all = [i for i in dir(torch_linalg) if not i.startswith('_')] + +# outer is implemented in torch but aren't in the linalg namespace +from torch import outer +from ._aliases import _fix_promotion, matrix_transpose, tensordot, sum + +# Note: torch.linalg.cross does not default to axis=-1 (it defaults to the +# first axis with size 3), see https://github.com/pytorch/pytorch/issues/58743 +def cross(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch_linalg.cross(x1, x2, dim=axis) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1, **kwargs) -> array: + from ._aliases import isdtype + + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + + # torch.linalg.vecdot doesn't support integer dtypes + if isdtype(x1.dtype, 'integral') or isdtype(x2.dtype, 'integral'): + if kwargs: + raise RuntimeError("vecdot kwargs not supported for integral dtypes") + ndim = max(x1.ndim, x2.ndim) + x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) + x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) + if x1_shape[axis] != x2_shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + x1_, x2_ = torch.broadcast_tensors(x1, x2) + x1_ = torch.moveaxis(x1_, axis, -1) + x2_ = torch.moveaxis(x2_, axis, -1) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + return torch.linalg.vecdot(x1, x2, dim=axis, **kwargs) + +def solve(x1: array, x2: array, /, **kwargs) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.linalg.solve(x1, x2, **kwargs) + +# torch.trace doesn't support the offset argument and doesn't support stacking +def trace(x: array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> array: + # Use our wrapped sum to make sure it does upcasting correctly + return sum(torch.diagonal(x, offset=offset, dim1=-2, dim2=-1), axis=-1, dtype=dtype) + +__all__ = linalg_all + ['outer', 'trace', 'matrix_transpose', 'tensordot', + 'vecdot', 'solve'] + +del linalg_all diff --git a/venv/lib/python3.10/site-packages/scipy/constants/__init__.py b/venv/lib/python3.10/site-packages/scipy/constants/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2805070eef1d77567ecf094aa08049d0b0a797 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/__init__.py @@ -0,0 +1,347 @@ +r""" +================================== +Constants (:mod:`scipy.constants`) +================================== + +.. currentmodule:: scipy.constants + +Physical and mathematical constants and units. + + +Mathematical constants +====================== + +================ ================================================================= +``pi`` Pi +``golden`` Golden ratio +``golden_ratio`` Golden ratio +================ ================================================================= + + +Physical constants +================== + +=========================== ================================================================= +``c`` speed of light in vacuum +``speed_of_light`` speed of light in vacuum +``mu_0`` the magnetic constant :math:`\mu_0` +``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0` +``h`` the Planck constant :math:`h` +``Planck`` the Planck constant :math:`h` +``hbar`` :math:`\hbar = h/(2\pi)` +``G`` Newtonian constant of gravitation +``gravitational_constant`` Newtonian constant of gravitation +``g`` standard acceleration of gravity +``e`` elementary charge +``elementary_charge`` elementary charge +``R`` molar gas constant +``gas_constant`` molar gas constant +``alpha`` fine-structure constant +``fine_structure`` fine-structure constant +``N_A`` Avogadro constant +``Avogadro`` Avogadro constant +``k`` Boltzmann constant +``Boltzmann`` Boltzmann constant +``sigma`` Stefan-Boltzmann constant :math:`\sigma` +``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma` +``Wien`` Wien displacement law constant +``Rydberg`` Rydberg constant +``m_e`` electron mass +``electron_mass`` electron mass +``m_p`` proton mass +``proton_mass`` proton mass +``m_n`` neutron mass +``neutron_mass`` neutron mass +=========================== ================================================================= + + +Constants database +------------------ + +In addition to the above variables, :mod:`scipy.constants` also contains the +2018 CODATA recommended values [CODATA2018]_ database containing more physical +constants. + +.. autosummary:: + :toctree: generated/ + + value -- Value in physical_constants indexed by key + unit -- Unit in physical_constants indexed by key + precision -- Relative precision in physical_constants indexed by key + find -- Return list of physical_constant keys with a given string + ConstantWarning -- Constant sought not in newest CODATA data set + +.. data:: physical_constants + + Dictionary of physical constants, of the format + ``physical_constants[name] = (value, unit, uncertainty)``. + +Available constants: + +====================================================================== ==== +%(constant_names)s +====================================================================== ==== + + +Units +===== + +SI prefixes +----------- + +============ ================================================================= +``quetta`` :math:`10^{30}` +``ronna`` :math:`10^{27}` +``yotta`` :math:`10^{24}` +``zetta`` :math:`10^{21}` +``exa`` :math:`10^{18}` +``peta`` :math:`10^{15}` +``tera`` :math:`10^{12}` +``giga`` :math:`10^{9}` +``mega`` :math:`10^{6}` +``kilo`` :math:`10^{3}` +``hecto`` :math:`10^{2}` +``deka`` :math:`10^{1}` +``deci`` :math:`10^{-1}` +``centi`` :math:`10^{-2}` +``milli`` :math:`10^{-3}` +``micro`` :math:`10^{-6}` +``nano`` :math:`10^{-9}` +``pico`` :math:`10^{-12}` +``femto`` :math:`10^{-15}` +``atto`` :math:`10^{-18}` +``zepto`` :math:`10^{-21}` +``yocto`` :math:`10^{-24}` +``ronto`` :math:`10^{-27}` +``quecto`` :math:`10^{-30}` +============ ================================================================= + +Binary prefixes +--------------- + +============ ================================================================= +``kibi`` :math:`2^{10}` +``mebi`` :math:`2^{20}` +``gibi`` :math:`2^{30}` +``tebi`` :math:`2^{40}` +``pebi`` :math:`2^{50}` +``exbi`` :math:`2^{60}` +``zebi`` :math:`2^{70}` +``yobi`` :math:`2^{80}` +============ ================================================================= + +Mass +---- + +================= ============================================================ +``gram`` :math:`10^{-3}` kg +``metric_ton`` :math:`10^{3}` kg +``grain`` one grain in kg +``lb`` one pound (avoirdupous) in kg +``pound`` one pound (avoirdupous) in kg +``blob`` one inch version of a slug in kg (added in 1.0.0) +``slinch`` one inch version of a slug in kg (added in 1.0.0) +``slug`` one slug in kg (added in 1.0.0) +``oz`` one ounce in kg +``ounce`` one ounce in kg +``stone`` one stone in kg +``grain`` one grain in kg +``long_ton`` one long ton in kg +``short_ton`` one short ton in kg +``troy_ounce`` one Troy ounce in kg +``troy_pound`` one Troy pound in kg +``carat`` one carat in kg +``m_u`` atomic mass constant (in kg) +``u`` atomic mass constant (in kg) +``atomic_mass`` atomic mass constant (in kg) +================= ============================================================ + +Angle +----- + +================= ============================================================ +``degree`` degree in radians +``arcmin`` arc minute in radians +``arcminute`` arc minute in radians +``arcsec`` arc second in radians +``arcsecond`` arc second in radians +================= ============================================================ + + +Time +---- + +================= ============================================================ +``minute`` one minute in seconds +``hour`` one hour in seconds +``day`` one day in seconds +``week`` one week in seconds +``year`` one year (365 days) in seconds +``Julian_year`` one Julian year (365.25 days) in seconds +================= ============================================================ + + +Length +------ + +===================== ============================================================ +``inch`` one inch in meters +``foot`` one foot in meters +``yard`` one yard in meters +``mile`` one mile in meters +``mil`` one mil in meters +``pt`` one point in meters +``point`` one point in meters +``survey_foot`` one survey foot in meters +``survey_mile`` one survey mile in meters +``nautical_mile`` one nautical mile in meters +``fermi`` one Fermi in meters +``angstrom`` one Angstrom in meters +``micron`` one micron in meters +``au`` one astronomical unit in meters +``astronomical_unit`` one astronomical unit in meters +``light_year`` one light year in meters +``parsec`` one parsec in meters +===================== ============================================================ + +Pressure +-------- + +================= ============================================================ +``atm`` standard atmosphere in pascals +``atmosphere`` standard atmosphere in pascals +``bar`` one bar in pascals +``torr`` one torr (mmHg) in pascals +``mmHg`` one torr (mmHg) in pascals +``psi`` one psi in pascals +================= ============================================================ + +Area +---- + +================= ============================================================ +``hectare`` one hectare in square meters +``acre`` one acre in square meters +================= ============================================================ + + +Volume +------ + +=================== ======================================================== +``liter`` one liter in cubic meters +``litre`` one liter in cubic meters +``gallon`` one gallon (US) in cubic meters +``gallon_US`` one gallon (US) in cubic meters +``gallon_imp`` one gallon (UK) in cubic meters +``fluid_ounce`` one fluid ounce (US) in cubic meters +``fluid_ounce_US`` one fluid ounce (US) in cubic meters +``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters +``bbl`` one barrel in cubic meters +``barrel`` one barrel in cubic meters +=================== ======================================================== + +Speed +----- + +================== ========================================================== +``kmh`` kilometers per hour in meters per second +``mph`` miles per hour in meters per second +``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second +``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second +``knot`` one knot in meters per second +================== ========================================================== + + +Temperature +----------- + +===================== ======================================================= +``zero_Celsius`` zero of Celsius scale in Kelvin +``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins +===================== ======================================================= + +.. autosummary:: + :toctree: generated/ + + convert_temperature + +Energy +------ + +==================== ======================================================= +``eV`` one electron volt in Joules +``electron_volt`` one electron volt in Joules +``calorie`` one calorie (thermochemical) in Joules +``calorie_th`` one calorie (thermochemical) in Joules +``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules +``erg`` one erg in Joules +``Btu`` one British thermal unit (International Steam Table) in Joules +``Btu_IT`` one British thermal unit (International Steam Table) in Joules +``Btu_th`` one British thermal unit (thermochemical) in Joules +``ton_TNT`` one ton of TNT in Joules +==================== ======================================================= + +Power +----- + +==================== ======================================================= +``hp`` one horsepower in watts +``horsepower`` one horsepower in watts +==================== ======================================================= + +Force +----- + +==================== ======================================================= +``dyn`` one dyne in newtons +``dyne`` one dyne in newtons +``lbf`` one pound force in newtons +``pound_force`` one pound force in newtons +``kgf`` one kilogram force in newtons +``kilogram_force`` one kilogram force in newtons +==================== ======================================================= + +Optics +------ + +.. autosummary:: + :toctree: generated/ + + lambda2nu + nu2lambda + +References +========== + +.. [CODATA2018] CODATA Recommended Values of the Fundamental + Physical Constants 2018. + + https://physics.nist.gov/cuu/Constants/ + +""" # noqa: E501 +# Modules contributed by BasSw (wegwerp@gmail.com) +from ._codata import * +from ._constants import * +from ._codata import _obsolete_constants, physical_constants + +# Deprecated namespaces, to be removed in v2.0.0 +from . import codata, constants + +_constant_names_list = [(_k.lower(), _k, _v) + for _k, _v in physical_constants.items() + if _k not in _obsolete_constants] +_constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])), + _x[2][0], _x[2][1]) + for _x in sorted(_constant_names_list)]) +if __doc__: + __doc__ = __doc__ % dict(constant_names=_constant_names) + +del _constant_names +del _constant_names_list + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47afcb21f7fd95c9f8b00eefd170dbc29053823e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e74d0f727acc3775246d4c6c9108a42d71079de3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95eb76749935fdb0f872b9278bcd6616af733dcc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71af4538647d4347d2c6c920e6989574fd14e761 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aed28fe1818adaff4d4c0fe2632d1d2fd3fc509 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/_codata.py b/venv/lib/python3.10/site-packages/scipy/constants/_codata.py new file mode 100644 index 0000000000000000000000000000000000000000..0f2fd4580fac82e53ed372219e77ddf843f2c68b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/_codata.py @@ -0,0 +1,1748 @@ +""" +Fundamental Physical Constants +------------------------------ + +These constants are taken from CODATA Recommended Values of the Fundamental +Physical Constants 2018. + +Object +------ +physical_constants : dict + A dictionary containing physical constants. Keys are the names of physical + constants, values are tuples (value, units, precision). + +Functions +--------- +value(key): + Returns the value of the physical constant(key). +unit(key): + Returns the units of the physical constant(key). +precision(key): + Returns the relative precision of the physical constant(key). +find(sub): + Prints or returns list of keys containing the string sub, default is all. + +Source +------ +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2018 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 20 May +2019 and replaced the 2014 CODATA set. Also available is an introduction to the +constants for non-experts at + +https://physics.nist.gov/cuu/Constants/introduction.html + +References +---------- +Theoretical and experimental publications relevant to the fundamental constants +and closely related precision measurements published since the mid 1980s, but +also including many older papers of particular interest, some of which date +back to the 1800s. To search the bibliography, visit + +https://physics.nist.gov/cuu/Constants/ + +""" + +# Compiled by Charles Harris, dated October 3, 2002 +# updated to 2002 values by BasSw, 2006 +# Updated to 2006 values by Vincent Davis June 2010 +# Updated to 2014 values by Joseph Booker, 2015 +# Updated to 2018 values by Jakob Jakobson, 2019 + +from __future__ import annotations + +import warnings + +from typing import Any + +__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning'] + +""" +Source: https://physics.nist.gov/cuu/Constants/ + +The values of the constants provided at this site are recommended for +international use by CODATA and are the latest available. Termed the "2018 +CODATA recommended values," they are generally recognized worldwide for use in +all fields of science and technology. The values became available on 20 May +2019 and replaced the 2014 CODATA set. +""" + +# +# Source: https://physics.nist.gov/cuu/Constants/ +# + +# Quantity Value Uncertainty Unit +# ---------------------------------------------------- --------------------- -------------------- ------------- +txt2002 = """\ +Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K +atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3 +atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m +atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1 +atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2 +atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1 +atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T +deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1 +deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3 +deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092 +deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4 +deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045 +deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11 +electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1 +electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1 +electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1 +electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038 +electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85 +electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3 +electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071 +electron to shielded helion magn. moment ratio 864.058 255 0.000 010 +electron-deuteron magn. moment ratio -2143.923 493 0.000 023 +electron-muon magn. moment ratio 206.766 9894 0.000 0054 +electron-neutron magn. moment ratio 960.920 50 0.000 23 +electron-proton magn. moment ratio -658.210 6862 0.000 0066 +magn. constant 12.566 370 614...e-7 0 N A^-2 +magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb +muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1 +muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3 +muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23 +muon-proton magn. moment ratio -3.183 345 118 0.000 000 089 +neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1 +neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1 +neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1 +neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3 +neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16 +neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3 +neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16 +proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1 +proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1 +proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1 +proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3 +proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028 +proton magn. shielding correction 25.689e-6 0.015e-6 +proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1 +shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3 +shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025 +shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012 +shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033 +shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1 +shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1 +shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1 +shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3 +shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030 +{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m""" + +txt2006 = """\ +lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m +alpha particle-electron mass ratio 7294.299 5365 0.000 0031 +alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg +alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41 +Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV +atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV +atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J +atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K +atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C +atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3 +atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A +atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m +atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1 +atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 383 86 0.000 000 68 V +atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2 +atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J +atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N +atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T +atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2 +atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s +atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1 +Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1 +Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1 +Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1 +Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m +Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m +Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m +Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m +conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9654 0.000 0016 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg +deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J +deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV +deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u +deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22 +deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4 +electron g factor -2.002 319 304 3622 0.000 000 000 0015 +electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1 +electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74 +electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80 +electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J +electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV +electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u +electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9877 0.000 0052 +electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4 +electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4 +electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u +electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h +electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz +electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1 +electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J +electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K +electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg +elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C +elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1 +Faraday constant 96 485.3399 0.0024 C mol^-1 +Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1 +Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2 +fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3 +first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u +hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV +Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J +Hartree energy in eV 27.211 383 86 0.000 000 68 eV +hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz +hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1 +hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J +hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K +hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg +helion-electron mass ratio 5495.885 2765 0.000 0052 +helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg +helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J +helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV +helion mass in u 3.014 932 2473 0.000 000 0026 u +helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6713 0.000 000 0026 +hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u +hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV +hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J +hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K +hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg +inverse fine-structure constant 137.035 999 679 0.000 000 094 +inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u +inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV +inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J +inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K +inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg +inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm +Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u +joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV +joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h +joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz +joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1 +joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u +kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV +kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h +kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz +kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1 +kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J +kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u +kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV +kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h +kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz +kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K +lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb +molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1 +molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1 +Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2823 0.000 0052 +muon g factor -2.002 331 8414 0.000 000 0012 +muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23 +muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg +muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J +muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV +muon mass in u 0.113 428 9256 0.000 000 0029 u +muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0029 +muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085 +muon-proton mass ratio 0.112 609 5261 0.000 000 0029 +muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2 +natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s +natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J +natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV +natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m +natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg +natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c +natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg +neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J +neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV +neutron mass in u 1.008 664 915 97 0.000 000 000 43 u +neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 09 0.000 000 23 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46 +neutron-tau mass ratio 0.528 740 0.000 086 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1 +Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s +Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s +Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s +Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm +Planck length 1.616 252 e-35 0.000 081 e-35 m +Planck mass 2.176 44 e-8 0.000 11 e-8 kg +Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV +Planck temperature 1.416 785 e32 0.000 071 e32 K +Planck time 5.391 24 e-44 0.000 27 e-44 s +proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1 +proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m +proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m +proton-electron mass ratio 1836.152 672 47 0.000 000 80 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1 +proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg +proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J +proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV +proton mass in u 1.007 276 466 77 0.000 000 000 10 u +proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 39 0.000 000 23 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46 +proton rms charge radius 0.8768 e-15 0.0069 e-15 m +proton-tau mass ratio 0.528 012 0.000 086 +quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 527 0.000 073 m^-1 +Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz +Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV +Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044 +second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K +shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1 +shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1 +shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m +tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m +tau-electron mass ratio 3477.48 0.57 +tau mass 3.167 77 e-27 0.000 52 e-27 kg +tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J +tau mass energy equivalent in MeV 1776.99 0.29 MeV +tau mass in u 1.907 68 0.000 31 u +tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1 +tau-muon mass ratio 16.8183 0.0027 +tau-neutron mass ratio 1.891 29 0.000 31 +tau-proton mass ratio 1.893 90 0.000 31 +Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2 +triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3 +triton-electron mass ratio 5496.921 5269 0.000 0051 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg +triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J +triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37 +triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010 +triton-proton mass ratio 2.993 717 0309 0.000 000 0025 +unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg +von Klitzing constant 25 812.807 557 0.000 018 ohm +weak mixing angle 0.222 55 0.000 56 +Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K""" + +txt2010 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 5361 0.000 0029 +alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg +alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV +alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u +alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV +atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV +atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1 +atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J +atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K +atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C +atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3 +atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A +atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m +atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 385 05 0.000 000 60 V +atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2 +atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J +atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N +atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T +atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2 +atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s +atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1 +Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1 +Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1 +Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m +Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m +Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m +Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m +conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4 +deuteron-electron mass ratio 3670.482 9652 0.000 0015 +deuteron g factor 0.857 438 2308 0.000 000 0072 +deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072 +deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg +deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J +deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV +deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u +deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024 +deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18 +deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 498 0.000 018 +electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4 +electron g factor -2.002 319 304 361 53 0.000 000 000 000 53 +electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1 +electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4 +electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27 +electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75 +electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J +electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV +electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u +electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9896 0.000 0052 +electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4 +electron-proton mag. mom. ratio -658.210 6848 0.000 0054 +electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4 +electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u +electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h +electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz +electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1 +electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J +electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K +electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg +elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C +elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1 +Faraday constant 96 485.3365 0.0021 C mol^-1 +Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1 +Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2 +fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3 +first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u +hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV +Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J +Hartree energy in eV 27.211 385 05 0.000 000 60 eV +hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz +hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1 +hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J +hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K +hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg +helion-electron mass ratio 5495.885 2754 0.000 0050 +helion g factor -4.255 250 613 0.000 000 050 +helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025 +helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg +helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J +helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV +helion mass in u 3.014 932 2468 0.000 000 0025 u +helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 6707 0.000 000 0025 +hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u +hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV +hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J +hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K +hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg +inverse fine-structure constant 137.035 999 074 0.000 000 044 +inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u +inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV +inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J +inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K +inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg +inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm +Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u +joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV +joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h +joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz +joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1 +joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u +kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV +kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h +kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz +kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1 +kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J +kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg +kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u +kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV +kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h +kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz +kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb +molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1 +molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m +muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m +muon-electron mass ratio 206.768 2843 0.000 0052 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22 +muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg +muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J +muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV +muon mass in u 0.113 428 9267 0.000 000 0029 u +muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5177 0.000 000 0028 +muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084 +muon-proton mass ratio 0.112 609 5272 0.000 000 0028 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s +natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J +natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV +natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m +natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg +natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c +natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 6605 0.000 0011 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45 +neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg +neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J +neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV +neutron mass in u 1.008 664 916 00 0.000 000 000 43 u +neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 00 0.000 000 22 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30 +neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42 +neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45 +neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1 +Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s +Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s +Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm +Planck length 1.616 199 e-35 0.000 097 e-35 m +Planck mass 2.176 51 e-8 0.000 13 e-8 kg +Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV +Planck temperature 1.416 833 e32 0.000 085 e32 K +Planck time 5.391 06 e-44 0.000 32 e-44 s +proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1 +proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m +proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m +proton-electron mass ratio 1836.152 672 45 0.000 000 75 +proton g factor 5.585 694 713 0.000 000 046 +proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1 +proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023 +proton mag. shielding correction 25.694 e-6 0.014 e-6 +proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg +proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J +proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV +proton mass in u 1.007 276 466 812 0.000 000 000 090 u +proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 31 0.000 000 22 +proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45 +proton rms charge radius 0.8775 e-15 0.0051 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 539 0.000 055 m^-1 +Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz +Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV +Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023 +second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K +shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1 +shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1 +shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2 +triton-electron mass ratio 5496.921 5267 0.000 0050 +triton g factor 5.957 924 896 0.000 000 076 +triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038 +triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg +triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J +triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV +triton mass in u 3.015 500 7134 0.000 000 0025 u +triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 0308 0.000 000 0025 +unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg +von Klitzing constant 25 812.807 4434 0.000 0084 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K""" + +txt2014 = """\ +{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m +alpha particle-electron mass ratio 7294.299 541 36 0.000 000 24 +alpha particle mass 6.644 657 230 e-27 0.000 000 082 e-27 kg +alpha particle mass energy equivalent 5.971 920 097 e-10 0.000 000 073 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 378 0.000 023 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 36 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic mass constant energy equivalent 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass constant energy equivalent in MeV 931.494 0954 0.000 0057 MeV +atomic mass unit-electron volt relationship 931.494 0954 e6 0.000 0057 e6 eV +atomic mass unit-hartree relationship 3.423 177 6902 e7 0.000 000 0016 e7 E_h +atomic mass unit-hertz relationship 2.252 342 7206 e23 0.000 000 0010 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6166 e14 0.000 000 0034 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 062 e-10 0.000 000 018 e-10 J +atomic mass unit-kelvin relationship 1.080 954 38 e13 0.000 000 62 e13 K +atomic mass unit-kilogram relationship 1.660 539 040 e-27 0.000 000 020 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 329 e-53 0.000 000 020 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 380 085 e-65 0.000 000 077 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +atomic unit of charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +atomic unit of charge density 1.081 202 3770 e12 0.000 000 0067 e12 C m^-3 +atomic unit of current 6.623 618 183 e-3 0.000 000 041 e-3 A +atomic unit of electric dipole mom. 8.478 353 552 e-30 0.000 000 052 e-30 C m +atomic unit of electric field 5.142 206 707 e11 0.000 000 032 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 356 e21 0.000 000 060 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 2731 e-41 0.000 000 0011 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 02 0.000 000 17 V +atomic unit of electric quadrupole mom. 4.486 551 484 e-40 0.000 000 028 e-40 C m^2 +atomic unit of energy 4.359 744 650 e-18 0.000 000 054 e-18 J +atomic unit of force 8.238 723 36 e-8 0.000 000 10 e-8 N +atomic unit of length 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +atomic unit of mag. dipole mom. 1.854 801 999 e-23 0.000 000 011 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 550 e5 0.000 000 014 e5 T +atomic unit of magnetizability 7.891 036 5886 e-29 0.000 000 0090 e-29 J T^-2 +atomic unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +atomic unit of mom.um 1.992 851 882 e-24 0.000 000 024 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1 +atomic unit of time 2.418 884 326509e-17 0.000 000 000014e-17 s +atomic unit of velocity 2.187 691 262 77 e6 0.000 000 000 50 e6 m s^-1 +Avogadro constant 6.022 140 857 e23 0.000 000 074 e23 mol^-1 +Bohr magneton 927.400 9994 e-26 0.000 0057 e-26 J T^-1 +Bohr magneton in eV/T 5.788 381 8012 e-5 0.000 000 0026 e-5 eV T^-1 +Bohr magneton in Hz/T 13.996 245 042 e9 0.000 000 086 e9 Hz T^-1 +Bohr magneton in inverse meters per tesla 46.686 448 14 0.000 000 29 m^-1 T^-1 +Bohr magneton in K/T 0.671 714 05 0.000 000 39 K T^-1 +Bohr radius 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m +Boltzmann constant 1.380 648 52 e-23 0.000 000 79 e-23 J K^-1 +Boltzmann constant in eV/K 8.617 3303 e-5 0.000 0050 e-5 eV K^-1 +Boltzmann constant in Hz/K 2.083 6612 e10 0.000 0012 e10 Hz K^-1 +Boltzmann constant in inverse meters per kelvin 69.503 457 0.000 040 m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 461... (exact) ohm +classical electron radius 2.817 940 3227 e-15 0.000 000 0019 e-15 m +Compton wavelength 2.426 310 2367 e-12 0.000 000 0011 e-12 m +Compton wavelength over 2 pi 386.159 267 64 e-15 0.000 000 18 e-15 m +conductance quantum 7.748 091 7310 e-5 0.000 000 0018 e-5 S +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of von Klitzing constant 25 812.807 (exact) ohm +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 535 e-4 0.000 000 026 e-4 +deuteron-electron mass ratio 3670.482 967 85 0.000 000 13 +deuteron g factor 0.857 438 2311 0.000 000 0048 +deuteron mag. mom. 0.433 073 5040 e-26 0.000 000 0036 e-26 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 0.466 975 4554 e-3 0.000 000 0026 e-3 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2311 0.000 000 0048 +deuteron mass 3.343 583 719 e-27 0.000 000 041 e-27 kg +deuteron mass energy equivalent 3.005 063 183 e-10 0.000 000 037 e-10 J +deuteron mass energy equivalent in MeV 1875.612 928 0.000 012 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 2077 0.000 000 0015 +deuteron-proton mass ratio 1.999 007 500 87 0.000 000 000 19 +deuteron rms charge radius 2.1413 e-15 0.0025 e-15 m +electric constant 8.854 187 817... e-12 (exact) F m^-1 +electron charge to mass quotient -1.758 820 024 e11 0.000 000 011 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 499 0.000 012 +electron-deuteron mass ratio 2.724 437 107 484 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 361 82 0.000 000 000 000 52 +electron gyromag. ratio 1.760 859 644 e11 0.000 000 011 e11 s^-1 T^-1 +electron gyromag. ratio over 2 pi 28 024.951 64 0.000 17 MHz T^-1 +electron-helion mass ratio 1.819 543 074 854 e-4 0.000 000 000 088 e-4 +electron mag. mom. -928.476 4620 e-26 0.000 0057 e-26 J T^-1 +electron mag. mom. anomaly 1.159 652 180 91 e-3 0.000 000 000 26 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 91 0.000 000 000 000 26 +electron mag. mom. to nuclear magneton ratio -1838.281 972 34 0.000 000 17 +electron mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +electron mass energy equivalent 8.187 105 65 e-14 0.000 000 10 e-14 J +electron mass energy equivalent in MeV 0.510 998 9461 0.000 000 0031 MeV +electron mass in u 5.485 799 090 70 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 090 70 e-7 0.000 000 000 16 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9880 0.000 0046 +electron-muon mass ratio 4.836 331 70 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4428 e-4 0.000 000 0027 e-4 +electron-proton mag. mom. ratio -658.210 6866 0.000 0020 +electron-proton mass ratio 5.446 170 213 52 e-4 0.000 000 000 52 e-4 +electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4 +electron to alpha particle mass ratio 1.370 933 554 798 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 203 e-4 0.000 000 000 084 e-4 +electron volt 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-atomic mass unit relationship 1.073 544 1105 e-9 0.000 000 0066 e-9 u +electron volt-hartree relationship 3.674 932 248 e-2 0.000 000 023 e-2 E_h +electron volt-hertz relationship 2.417 989 262 e14 0.000 000 015 e14 Hz +electron volt-inverse meter relationship 8.065 544 005 e5 0.000 000 050 e5 m^-1 +electron volt-joule relationship 1.602 176 6208 e-19 0.000 000 0098 e-19 J +electron volt-kelvin relationship 1.160 452 21 e4 0.000 000 67 e4 K +electron volt-kilogram relationship 1.782 661 907 e-36 0.000 000 011 e-36 kg +elementary charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C +elementary charge over h 2.417 989 262 e14 0.000 000 015 e14 A J^-1 +Faraday constant 96 485.332 89 0.000 59 C mol^-1 +Faraday constant for conventional electric current 96 485.3251 0.0012 C_90 mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5664 e-3 0.000 000 0017 e-3 +first radiation constant 3.741 771 790 e-16 0.000 000 046 e-16 W m^2 +first radiation constant for spectral radiance 1.191 042 953 e-16 0.000 000 015 e-16 W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 3197 e-8 0.000 000 0013 e-8 u +hartree-electron volt relationship 27.211 386 02 0.000 000 17 eV +Hartree energy 4.359 744 650 e-18 0.000 000 054 e-18 J +Hartree energy in eV 27.211 386 02 0.000 000 17 eV +hartree-hertz relationship 6.579 683 920 711 e15 0.000 000 000 039 e15 Hz +hartree-inverse meter relationship 2.194 746 313 702 e7 0.000 000 000 013 e7 m^-1 +hartree-joule relationship 4.359 744 650 e-18 0.000 000 054 e-18 J +hartree-kelvin relationship 3.157 7513 e5 0.000 0018 e5 K +hartree-kilogram relationship 4.850 870 129 e-35 0.000 000 060 e-35 kg +helion-electron mass ratio 5495.885 279 22 0.000 000 27 +helion g factor -4.255 250 616 0.000 000 050 +helion mag. mom. -1.074 617 522 e-26 0.000 000 014 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 308 0.000 000 025 +helion mass 5.006 412 700 e-27 0.000 000 062 e-27 kg +helion mass energy equivalent 4.499 539 341 e-10 0.000 000 055 e-10 J +helion mass energy equivalent in MeV 2808.391 586 0.000 017 MeV +helion mass in u 3.014 932 246 73 0.000 000 000 12 u +helion molar mass 3.014 932 246 73 e-3 0.000 000 000 12 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 670 46 0.000 000 000 29 +hertz-atomic mass unit relationship 4.439 821 6616 e-24 0.000 000 0020 e-24 u +hertz-electron volt relationship 4.135 667 662 e-15 0.000 000 025 e-15 eV +hertz-hartree relationship 1.5198298460088 e-16 0.0000000000090e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 040 e-34 0.000 000 081 e-34 J +hertz-kelvin relationship 4.799 2447 e-11 0.000 0028 e-11 K +hertz-kilogram relationship 7.372 497 201 e-51 0.000 000 091 e-51 kg +inverse fine-structure constant 137.035 999 139 0.000 000 031 +inverse meter-atomic mass unit relationship 1.331 025 049 00 e-15 0.000 000 000 61 e-15 u +inverse meter-electron volt relationship 1.239 841 9739 e-6 0.000 000 0076 e-6 eV +inverse meter-hartree relationship 4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 824 e-25 0.000 000 024 e-25 J +inverse meter-kelvin relationship 1.438 777 36 e-2 0.000 000 83 e-2 K +inverse meter-kilogram relationship 2.210 219 057 e-42 0.000 000 027 e-42 kg +inverse of conductance quantum 12 906.403 7278 0.000 0029 ohm +Josephson constant 483 597.8525 e9 0.0030 e9 Hz V^-1 +joule-atomic mass unit relationship 6.700 535 363 e9 0.000 000 082 e9 u +joule-electron volt relationship 6.241 509 126 e18 0.000 000 038 e18 eV +joule-hartree relationship 2.293 712 317 e17 0.000 000 028 e17 E_h +joule-hertz relationship 1.509 190 205 e33 0.000 000 019 e33 Hz +joule-inverse meter relationship 5.034 116 651 e24 0.000 000 062 e24 m^-1 +joule-kelvin relationship 7.242 9731 e22 0.000 0042 e22 K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 0842 e-14 0.000 0053 e-14 u +kelvin-electron volt relationship 8.617 3303 e-5 0.000 0050 e-5 eV +kelvin-hartree relationship 3.166 8105 e-6 0.000 0018 e-6 E_h +kelvin-hertz relationship 2.083 6612 e10 0.000 0012 e10 Hz +kelvin-inverse meter relationship 69.503 457 0.000 040 m^-1 +kelvin-joule relationship 1.380 648 52 e-23 0.000 000 79 e-23 J +kelvin-kilogram relationship 1.536 178 65 e-40 0.000 000 88 e-40 kg +kilogram-atomic mass unit relationship 6.022 140 857 e26 0.000 000 074 e26 u +kilogram-electron volt relationship 5.609 588 650 e35 0.000 000 034 e35 eV +kilogram-hartree relationship 2.061 485 823 e34 0.000 000 025 e34 E_h +kilogram-hertz relationship 1.356 392 512 e50 0.000 000 017 e50 Hz +kilogram-inverse meter relationship 4.524 438 411 e41 0.000 000 056 e41 m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 6595 e39 0.000 0037 e39 K +lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 6467 e25 0.000 0015 e25 m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7811 e25 0.000 0015 e25 m^-3 +mag. constant 12.566 370 614... e-7 (exact) N A^-2 +mag. flux quantum 2.067 833 831 e-15 0.000 000 013 e-15 Wb +molar gas constant 8.314 4598 0.000 0048 J mol^-1 K^-1 +molar mass constant 1 e-3 (exact) kg mol^-1 +molar mass of carbon-12 12 e-3 (exact) kg mol^-1 +molar Planck constant 3.990 312 7110 e-10 0.000 000 0018 e-10 J s mol^-1 +molar Planck constant times c 0.119 626 565 582 0.000 000 000 054 J m mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 947 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 962 e-3 0.000 013 e-3 m^3 mol^-1 +molar volume of silicon 12.058 832 14 e-6 0.000 000 61 e-6 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 11.734 441 11 e-15 0.000 000 26 e-15 m +muon Compton wavelength over 2 pi 1.867 594 308 e-15 0.000 000 042 e-15 m +muon-electron mass ratio 206.768 2826 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 26 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 48 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 20 +muon mass 1.883 531 594 e-28 0.000 000 048 e-28 kg +muon mass energy equivalent 1.692 833 774 e-11 0.000 000 043 e-11 J +muon mass energy equivalent in MeV 105.658 3745 0.000 0024 MeV +muon mass in u 0.113 428 9257 0.000 000 0025 u +muon molar mass 0.113 428 9257 e-3 0.000 000 0025 e-3 kg mol^-1 +muon-neutron mass ratio 0.112 454 5167 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5262 0.000 000 0025 +muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2 +natural unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s +natural unit of action in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +natural unit of energy 8.187 105 65 e-14 0.000 000 10 e-14 J +natural unit of energy in MeV 0.510 998 9461 0.000 000 0031 MeV +natural unit of length 386.159 267 64 e-15 0.000 000 18 e-15 m +natural unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg +natural unit of mom.um 2.730 924 488 e-22 0.000 000 034 e-22 kg m s^-1 +natural unit of mom.um in MeV/c 0.510 998 9461 0.000 000 0031 MeV/c +natural unit of time 1.288 088 667 12 e-21 0.000 000 000 58 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 904 81 e-15 0.000 000 000 88 e-15 m +neutron Compton wavelength over 2 pi 0.210 019 415 36 e-15 0.000 000 000 14 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 58 0.000 000 90 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 72 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio over 2 pi 29.164 6933 0.000 0069 MHz T^-1 +neutron mag. mom. -0.966 236 50 e-26 0.000 000 23 e-26 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 471 e-27 0.000 000 021 e-27 kg +neutron mass energy equivalent 1.505 349 739 e-10 0.000 000 019 e-10 J +neutron mass energy equivalent in MeV 939.565 4133 0.000 0058 MeV +neutron mass in u 1.008 664 915 88 0.000 000 000 49 u +neutron molar mass 1.008 664 915 88 e-3 0.000 000 000 49 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 08 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 573 77 e-30 0.000 000 85 e-30 +neutron-proton mass difference energy equivalent 2.072 146 37 e-13 0.000 000 76 e-13 +neutron-proton mass difference energy equivalent in MeV 1.293 332 05 0.000 000 48 +neutron-proton mass difference in u 0.001 388 449 00 0.000 000 000 51 +neutron-proton mass ratio 1.001 378 418 98 0.000 000 000 51 +neutron-tau mass ratio 0.528 790 0.000 048 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 08 e-11 0.000 31 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 61 e-39 0.000 31 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 699 e-27 0.000 000 031 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 2550 e-8 0.000 000 0015 e-8 eV T^-1 +nuclear magneton in inverse meters per tesla 2.542 623 432 e-2 0.000 000 016 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 2690 e-4 0.000 0021 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 285 0.000 000 047 MHz T^-1 +Planck constant 6.626 070 040 e-34 0.000 000 081 e-34 J s +Planck constant in eV s 4.135 667 662 e-15 0.000 000 025 e-15 eV s +Planck constant over 2 pi 1.054 571 800 e-34 0.000 000 013 e-34 J s +Planck constant over 2 pi in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s +Planck constant over 2 pi times c in MeV fm 197.326 9788 0.000 0012 MeV fm +Planck length 1.616 229 e-35 0.000 038 e-35 m +Planck mass 2.176 470 e-8 0.000 051 e-8 kg +Planck mass energy equivalent in GeV 1.220 910 e19 0.000 029 e19 GeV +Planck temperature 1.416 808 e32 0.000 033 e32 K +Planck time 5.391 16 e-44 0.000 13 e-44 s +proton charge to mass quotient 9.578 833 226 e7 0.000 000 059 e7 C kg^-1 +proton Compton wavelength 1.321 409 853 96 e-15 0.000 000 000 61 e-15 m +proton Compton wavelength over 2 pi 0.210 308910109e-15 0.000 000 000097e-15 m +proton-electron mass ratio 1836.152 673 89 0.000 000 17 +proton g factor 5.585 694 702 0.000 000 017 +proton gyromag. ratio 2.675 221 900 e8 0.000 000 018 e8 s^-1 T^-1 +proton gyromag. ratio over 2 pi 42.577 478 92 0.000 000 29 MHz T^-1 +proton mag. mom. 1.410 606 7873 e-26 0.000 000 0097 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 2053 e-3 0.000 000 0046 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 3508 0.000 000 0085 +proton mag. shielding correction 25.691 e-6 0.011 e-6 +proton mass 1.672 621 898 e-27 0.000 000 021 e-27 kg +proton mass energy equivalent 1.503 277 593 e-10 0.000 000 018 e-10 J +proton mass energy equivalent in MeV 938.272 0813 0.000 0058 MeV +proton mass in u 1.007 276 466 879 0.000 000 000 091 u +proton molar mass 1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 38 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 44 0.000 000 000 51 +proton rms charge radius 0.8751 e-15 0.0061 e-15 m +proton-tau mass ratio 0.528 063 0.000 048 +quantum of circulation 3.636 947 5486 e-4 0.000 000 0017 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 0972 e-4 0.000 000 0033 e-4 m^2 s^-1 +Rydberg constant 10 973 731.568 508 0.000 065 m^-1 +Rydberg constant times c in Hz 3.289 841 960 355 e15 0.000 000 000 019 e15 Hz +Rydberg constant times hc in eV 13.605 693 009 0.000 000 084 eV +Rydberg constant times hc in J 2.179 872 325 e-18 0.000 000 027 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7084 0.000 0014 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8714 0.000 0014 +second radiation constant 1.438 777 36 e-2 0.000 000 83 e-2 m K +shielded helion gyromag. ratio 2.037 894 585 e8 0.000 000 027 e8 s^-1 T^-1 +shielded helion gyromag. ratio over 2 pi 32.434 099 66 0.000 000 43 MHz T^-1 +shielded helion mag. mom. -1.074 553 080 e-26 0.000 000 014 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 720 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5603 0.000 000 0092 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 171 e8 0.000 000 033 e8 s^-1 T^-1 +shielded proton gyromag. ratio over 2 pi 42.576 385 07 0.000 000 53 MHz T^-1 +shielded proton mag. mom. 1.410 570 547 e-26 0.000 000 018 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 600 0.000 000 030 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 367 e-8 0.000 013 e-8 W m^-2 K^-4 +tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m +tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m +tau-electron mass ratio 3477.15 0.31 +tau mass 3.167 47 e-27 0.000 29 e-27 kg +tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J +tau mass energy equivalent in MeV 1776.82 0.16 MeV +tau mass in u 1.907 49 0.000 17 u +tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1 +tau-muon mass ratio 16.8167 0.0015 +tau-neutron mass ratio 1.891 11 0.000 17 +tau-proton mass ratio 1.893 72 0.000 17 +Thomson cross section 0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2 +triton-electron mass ratio 5496.921 535 88 0.000 000 26 +triton g factor 5.957 924 920 0.000 000 028 +triton mag. mom. 1.504 609 503 e-26 0.000 000 012 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6616 e-3 0.000 000 0076 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 460 0.000 000 014 +triton mass 5.007 356 665 e-27 0.000 000 062 e-27 kg +triton mass energy equivalent 4.500 387 735 e-10 0.000 000 055 e-10 J +triton mass energy equivalent in MeV 2808.921 112 0.000 017 MeV +triton mass in u 3.015 500 716 32 0.000 000 000 11 u +triton molar mass 3.015 500 716 32 e-3 0.000 000 000 11 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 033 48 0.000 000 000 22 +unified atomic mass unit 1.660 539 040 e-27 0.000 000 020 e-27 kg +von Klitzing constant 25 812.807 4555 0.000 0059 ohm +weak mixing angle 0.2223 0.0021 +Wien frequency displacement law constant 5.878 9238 e10 0.000 0034 e10 Hz K^-1 +Wien wavelength displacement law constant 2.897 7729 e-3 0.000 0017 e-3 m K""" + +txt2018 = """\ +alpha particle-electron mass ratio 7294.299 541 42 0.000 000 24 +alpha particle mass 6.644 657 3357 e-27 0.000 000 0020 e-27 kg +alpha particle mass energy equivalent 5.971 920 1914 e-10 0.000 000 0018 e-10 J +alpha particle mass energy equivalent in MeV 3727.379 4066 0.000 0011 MeV +alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u +alpha particle molar mass 4.001 506 1777 e-3 0.000 000 0012 e-3 kg mol^-1 +alpha particle-proton mass ratio 3.972 599 690 09 0.000 000 000 22 +alpha particle relative atomic mass 4.001 506 179 127 0.000 000 000 063 +Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m +atomic mass constant 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +atomic mass constant energy equivalent 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J +atomic mass constant energy equivalent in MeV 931.494 102 42 0.000 000 28 MeV +atomic mass unit-electron volt relationship 9.314 941 0242 e8 0.000 000 0028 e8 eV +atomic mass unit-hartree relationship 3.423 177 6874 e7 0.000 000 0010 e7 E_h +atomic mass unit-hertz relationship 2.252 342 718 71 e23 0.000 000 000 68 e23 Hz +atomic mass unit-inverse meter relationship 7.513 006 6104 e14 0.000 000 0023 e14 m^-1 +atomic mass unit-joule relationship 1.492 418 085 60 e-10 0.000 000 000 45 e-10 J +atomic mass unit-kelvin relationship 1.080 954 019 16 e13 0.000 000 000 33 e13 K +atomic mass unit-kilogram relationship 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +atomic unit of 1st hyperpolarizability 3.206 361 3061 e-53 0.000 000 0015 e-53 C^3 m^3 J^-2 +atomic unit of 2nd hyperpolarizability 6.235 379 9905 e-65 0.000 000 0038 e-65 C^4 m^4 J^-3 +atomic unit of action 1.054 571 817... e-34 (exact) J s +atomic unit of charge 1.602 176 634 e-19 (exact) C +atomic unit of charge density 1.081 202 384 57 e12 0.000 000 000 49 e12 C m^-3 +atomic unit of current 6.623 618 237 510 e-3 0.000 000 000 013 e-3 A +atomic unit of electric dipole mom. 8.478 353 6255 e-30 0.000 000 0013 e-30 C m +atomic unit of electric field 5.142 206 747 63 e11 0.000 000 000 78 e11 V m^-1 +atomic unit of electric field gradient 9.717 362 4292 e21 0.000 000 0029 e21 V m^-2 +atomic unit of electric polarizability 1.648 777 274 36 e-41 0.000 000 000 50 e-41 C^2 m^2 J^-1 +atomic unit of electric potential 27.211 386 245 988 0.000 000 000 053 V +atomic unit of electric quadrupole mom. 4.486 551 5246 e-40 0.000 000 0014 e-40 C m^2 +atomic unit of energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +atomic unit of force 8.238 723 4983 e-8 0.000 000 0012 e-8 N +atomic unit of length 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m +atomic unit of mag. dipole mom. 1.854 802 015 66 e-23 0.000 000 000 56 e-23 J T^-1 +atomic unit of mag. flux density 2.350 517 567 58 e5 0.000 000 000 71 e5 T +atomic unit of magnetizability 7.891 036 6008 e-29 0.000 000 0048 e-29 J T^-2 +atomic unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +atomic unit of momentum 1.992 851 914 10 e-24 0.000 000 000 30 e-24 kg m s^-1 +atomic unit of permittivity 1.112 650 055 45 e-10 0.000 000 000 17 e-10 F m^-1 +atomic unit of time 2.418 884 326 5857 e-17 0.000 000 000 0047 e-17 s +atomic unit of velocity 2.187 691 263 64 e6 0.000 000 000 33 e6 m s^-1 +Avogadro constant 6.022 140 76 e23 (exact) mol^-1 +Bohr magneton 9.274 010 0783 e-24 0.000 000 0028 e-24 J T^-1 +Bohr magneton in eV/T 5.788 381 8060 e-5 0.000 000 0017 e-5 eV T^-1 +Bohr magneton in Hz/T 1.399 624 493 61 e10 0.000 000 000 42 e10 Hz T^-1 +Bohr magneton in inverse meter per tesla 46.686 447 783 0.000 000 014 m^-1 T^-1 +Bohr magneton in K/T 0.671 713 815 63 0.000 000 000 20 K T^-1 +Bohr radius 5.291 772 109 03 e-11 0.000 000 000 80 e-11 m +Boltzmann constant 1.380 649 e-23 (exact) J K^-1 +Boltzmann constant in eV/K 8.617 333 262... e-5 (exact) eV K^-1 +Boltzmann constant in Hz/K 2.083 661 912... e10 (exact) Hz K^-1 +Boltzmann constant in inverse meter per kelvin 69.503 480 04... (exact) m^-1 K^-1 +characteristic impedance of vacuum 376.730 313 668 0.000 000 057 ohm +classical electron radius 2.817 940 3262 e-15 0.000 000 0013 e-15 m +Compton wavelength 2.426 310 238 67 e-12 0.000 000 000 73 e-12 m +conductance quantum 7.748 091 729... e-5 (exact) S +conventional value of ampere-90 1.000 000 088 87... (exact) A +conventional value of coulomb-90 1.000 000 088 87... (exact) C +conventional value of farad-90 0.999 999 982 20... (exact) F +conventional value of henry-90 1.000 000 017 79... (exact) H +conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1 +conventional value of ohm-90 1.000 000 017 79... (exact) ohm +conventional value of volt-90 1.000 000 106 66... (exact) V +conventional value of von Klitzing constant 25 812.807 (exact) ohm +conventional value of watt-90 1.000 000 195 53... (exact) W +Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m +deuteron-electron mag. mom. ratio -4.664 345 551 e-4 0.000 000 012 e-4 +deuteron-electron mass ratio 3670.482 967 88 0.000 000 13 +deuteron g factor 0.857 438 2338 0.000 000 0022 +deuteron mag. mom. 4.330 735 094 e-27 0.000 000 011 e-27 J T^-1 +deuteron mag. mom. to Bohr magneton ratio 4.669 754 570 e-4 0.000 000 012 e-4 +deuteron mag. mom. to nuclear magneton ratio 0.857 438 2338 0.000 000 0022 +deuteron mass 3.343 583 7724 e-27 0.000 000 0010 e-27 kg +deuteron mass energy equivalent 3.005 063 231 02 e-10 0.000 000 000 91 e-10 J +deuteron mass energy equivalent in MeV 1875.612 942 57 0.000 000 57 MeV +deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u +deuteron molar mass 2.013 553 212 05 e-3 0.000 000 000 61 e-3 kg mol^-1 +deuteron-neutron mag. mom. ratio -0.448 206 53 0.000 000 11 +deuteron-proton mag. mom. ratio 0.307 012 209 39 0.000 000 000 79 +deuteron-proton mass ratio 1.999 007 501 39 0.000 000 000 11 +deuteron relative atomic mass 2.013 553 212 745 0.000 000 000 040 +deuteron rms charge radius 2.127 99 e-15 0.000 74 e-15 m +electron charge to mass quotient -1.758 820 010 76 e11 0.000 000 000 53 e11 C kg^-1 +electron-deuteron mag. mom. ratio -2143.923 4915 0.000 0056 +electron-deuteron mass ratio 2.724 437 107 462 e-4 0.000 000 000 096 e-4 +electron g factor -2.002 319 304 362 56 0.000 000 000 000 35 +electron gyromag. ratio 1.760 859 630 23 e11 0.000 000 000 53 e11 s^-1 T^-1 +electron gyromag. ratio in MHz/T 28 024.951 4242 0.000 0085 MHz T^-1 +electron-helion mass ratio 1.819 543 074 573 e-4 0.000 000 000 079 e-4 +electron mag. mom. -9.284 764 7043 e-24 0.000 000 0028 e-24 J T^-1 +electron mag. mom. anomaly 1.159 652 181 28 e-3 0.000 000 000 18 e-3 +electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 28 0.000 000 000 000 18 +electron mag. mom. to nuclear magneton ratio -1838.281 971 88 0.000 000 11 +electron mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +electron mass energy equivalent 8.187 105 7769 e-14 0.000 000 0025 e-14 J +electron mass energy equivalent in MeV 0.510 998 950 00 0.000 000 000 15 MeV +electron mass in u 5.485 799 090 65 e-4 0.000 000 000 16 e-4 u +electron molar mass 5.485 799 0888 e-7 0.000 000 0017 e-7 kg mol^-1 +electron-muon mag. mom. ratio 206.766 9883 0.000 0046 +electron-muon mass ratio 4.836 331 69 e-3 0.000 000 11 e-3 +electron-neutron mag. mom. ratio 960.920 50 0.000 23 +electron-neutron mass ratio 5.438 673 4424 e-4 0.000 000 0026 e-4 +electron-proton mag. mom. ratio -658.210 687 89 0.000 000 20 +electron-proton mass ratio 5.446 170 214 87 e-4 0.000 000 000 33 e-4 +electron relative atomic mass 5.485 799 090 65 e-4 0.000 000 000 16 e-4 +electron-tau mass ratio 2.875 85 e-4 0.000 19 e-4 +electron to alpha particle mass ratio 1.370 933 554 787 e-4 0.000 000 000 045 e-4 +electron to shielded helion mag. mom. ratio 864.058 257 0.000 010 +electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072 +electron-triton mass ratio 1.819 200 062 251 e-4 0.000 000 000 090 e-4 +electron volt 1.602 176 634 e-19 (exact) J +electron volt-atomic mass unit relationship 1.073 544 102 33 e-9 0.000 000 000 32 e-9 u +electron volt-hartree relationship 3.674 932 217 5655 e-2 0.000 000 000 0071 e-2 E_h +electron volt-hertz relationship 2.417 989 242... e14 (exact) Hz +electron volt-inverse meter relationship 8.065 543 937... e5 (exact) m^-1 +electron volt-joule relationship 1.602 176 634 e-19 (exact) J +electron volt-kelvin relationship 1.160 451 812... e4 (exact) K +electron volt-kilogram relationship 1.782 661 921... e-36 (exact) kg +elementary charge 1.602 176 634 e-19 (exact) C +elementary charge over h-bar 1.519 267 447... e15 (exact) A J^-1 +Faraday constant 96 485.332 12... (exact) C mol^-1 +Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2 +fine-structure constant 7.297 352 5693 e-3 0.000 000 0011 e-3 +first radiation constant 3.741 771 852... e-16 (exact) W m^2 +first radiation constant for spectral radiance 1.191 042 972... e-16 (exact) W m^2 sr^-1 +hartree-atomic mass unit relationship 2.921 262 322 05 e-8 0.000 000 000 88 e-8 u +hartree-electron volt relationship 27.211 386 245 988 0.000 000 000 053 eV +Hartree energy 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +Hartree energy in eV 27.211 386 245 988 0.000 000 000 053 eV +hartree-hertz relationship 6.579 683 920 502 e15 0.000 000 000 013 e15 Hz +hartree-inverse meter relationship 2.194 746 313 6320 e7 0.000 000 000 0043 e7 m^-1 +hartree-joule relationship 4.359 744 722 2071 e-18 0.000 000 000 0085 e-18 J +hartree-kelvin relationship 3.157 750 248 0407 e5 0.000 000 000 0061 e5 K +hartree-kilogram relationship 4.850 870 209 5432 e-35 0.000 000 000 0094 e-35 kg +helion-electron mass ratio 5495.885 280 07 0.000 000 24 +helion g factor -4.255 250 615 0.000 000 050 +helion mag. mom. -1.074 617 532 e-26 0.000 000 013 e-26 J T^-1 +helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3 +helion mag. mom. to nuclear magneton ratio -2.127 625 307 0.000 000 025 +helion mass 5.006 412 7796 e-27 0.000 000 0015 e-27 kg +helion mass energy equivalent 4.499 539 4125 e-10 0.000 000 0014 e-10 J +helion mass energy equivalent in MeV 2808.391 607 43 0.000 000 85 MeV +helion mass in u 3.014 932 247 175 0.000 000 000 097 u +helion molar mass 3.014 932 246 13 e-3 0.000 000 000 91 e-3 kg mol^-1 +helion-proton mass ratio 2.993 152 671 67 0.000 000 000 13 +helion relative atomic mass 3.014 932 247 175 0.000 000 000 097 +helion shielding shift 5.996 743 e-5 0.000 010 e-5 +hertz-atomic mass unit relationship 4.439 821 6652 e-24 0.000 000 0013 e-24 u +hertz-electron volt relationship 4.135 667 696... e-15 (exact) eV +hertz-hartree relationship 1.519 829 846 0570 e-16 0.000 000 000 0029 e-16 E_h +hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1 +hertz-joule relationship 6.626 070 15 e-34 (exact) J +hertz-kelvin relationship 4.799 243 073... e-11 (exact) K +hertz-kilogram relationship 7.372 497 323... e-51 (exact) kg +hyperfine transition frequency of Cs-133 9 192 631 770 (exact) Hz +inverse fine-structure constant 137.035 999 084 0.000 000 021 +inverse meter-atomic mass unit relationship 1.331 025 050 10 e-15 0.000 000 000 40 e-15 u +inverse meter-electron volt relationship 1.239 841 984... e-6 (exact) eV +inverse meter-hartree relationship 4.556 335 252 9120 e-8 0.000 000 000 0088 e-8 E_h +inverse meter-hertz relationship 299 792 458 (exact) Hz +inverse meter-joule relationship 1.986 445 857... e-25 (exact) J +inverse meter-kelvin relationship 1.438 776 877... e-2 (exact) K +inverse meter-kilogram relationship 2.210 219 094... e-42 (exact) kg +inverse of conductance quantum 12 906.403 72... (exact) ohm +Josephson constant 483 597.848 4... e9 (exact) Hz V^-1 +joule-atomic mass unit relationship 6.700 535 2565 e9 0.000 000 0020 e9 u +joule-electron volt relationship 6.241 509 074... e18 (exact) eV +joule-hartree relationship 2.293 712 278 3963 e17 0.000 000 000 0045 e17 E_h +joule-hertz relationship 1.509 190 179... e33 (exact) Hz +joule-inverse meter relationship 5.034 116 567... e24 (exact) m^-1 +joule-kelvin relationship 7.242 970 516... e22 (exact) K +joule-kilogram relationship 1.112 650 056... e-17 (exact) kg +kelvin-atomic mass unit relationship 9.251 087 3014 e-14 0.000 000 0028 e-14 u +kelvin-electron volt relationship 8.617 333 262... e-5 (exact) eV +kelvin-hartree relationship 3.166 811 563 4556 e-6 0.000 000 000 0061 e-6 E_h +kelvin-hertz relationship 2.083 661 912... e10 (exact) Hz +kelvin-inverse meter relationship 69.503 480 04... (exact) m^-1 +kelvin-joule relationship 1.380 649 e-23 (exact) J +kelvin-kilogram relationship 1.536 179 187... e-40 (exact) kg +kilogram-atomic mass unit relationship 6.022 140 7621 e26 0.000 000 0018 e26 u +kilogram-electron volt relationship 5.609 588 603... e35 (exact) eV +kilogram-hartree relationship 2.061 485 788 7409 e34 0.000 000 000 0040 e34 E_h +kilogram-hertz relationship 1.356 392 489... e50 (exact) Hz +kilogram-inverse meter relationship 4.524 438 335... e41 (exact) m^-1 +kilogram-joule relationship 8.987 551 787... e16 (exact) J +kilogram-kelvin relationship 6.509 657 260... e39 (exact) K +lattice parameter of silicon 5.431 020 511 e-10 0.000 000 089 e-10 m +lattice spacing of ideal Si (220) 1.920 155 716 e-10 0.000 000 032 e-10 m +Loschmidt constant (273.15 K, 100 kPa) 2.651 645 804... e25 (exact) m^-3 +Loschmidt constant (273.15 K, 101.325 kPa) 2.686 780 111... e25 (exact) m^-3 +luminous efficacy 683 (exact) lm W^-1 +mag. flux quantum 2.067 833 848... e-15 (exact) Wb +molar gas constant 8.314 462 618... (exact) J mol^-1 K^-1 +molar mass constant 0.999 999 999 65 e-3 0.000 000 000 30 e-3 kg mol^-1 +molar mass of carbon-12 11.999 999 9958 e-3 0.000 000 0036 e-3 kg mol^-1 +molar Planck constant 3.990 312 712... e-10 (exact) J Hz^-1 mol^-1 +molar volume of ideal gas (273.15 K, 100 kPa) 22.710 954 64... e-3 (exact) m^3 mol^-1 +molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 969 54... e-3 (exact) m^3 mol^-1 +molar volume of silicon 1.205 883 199 e-5 0.000 000 060 e-5 m^3 mol^-1 +Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m +muon Compton wavelength 1.173 444 110 e-14 0.000 000 026 e-14 m +muon-electron mass ratio 206.768 2830 0.000 0046 +muon g factor -2.002 331 8418 0.000 000 0013 +muon mag. mom. -4.490 448 30 e-26 0.000 000 10 e-26 J T^-1 +muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3 +muon mag. mom. to Bohr magneton ratio -4.841 970 47 e-3 0.000 000 11 e-3 +muon mag. mom. to nuclear magneton ratio -8.890 597 03 0.000 000 20 +muon mass 1.883 531 627 e-28 0.000 000 042 e-28 kg +muon mass energy equivalent 1.692 833 804 e-11 0.000 000 038 e-11 J +muon mass energy equivalent in MeV 105.658 3755 0.000 0023 MeV +muon mass in u 0.113 428 9259 0.000 000 0025 u +muon molar mass 1.134 289 259 e-4 0.000 000 025 e-4 kg mol^-1 +muon-neutron mass ratio 0.112 454 5170 0.000 000 0025 +muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071 +muon-proton mass ratio 0.112 609 5264 0.000 000 0025 +muon-tau mass ratio 5.946 35 e-2 0.000 40 e-2 +natural unit of action 1.054 571 817... e-34 (exact) J s +natural unit of action in eV s 6.582 119 569... e-16 (exact) eV s +natural unit of energy 8.187 105 7769 e-14 0.000 000 0025 e-14 J +natural unit of energy in MeV 0.510 998 950 00 0.000 000 000 15 MeV +natural unit of length 3.861 592 6796 e-13 0.000 000 0012 e-13 m +natural unit of mass 9.109 383 7015 e-31 0.000 000 0028 e-31 kg +natural unit of momentum 2.730 924 530 75 e-22 0.000 000 000 82 e-22 kg m s^-1 +natural unit of momentum in MeV/c 0.510 998 950 00 0.000 000 000 15 MeV/c +natural unit of time 1.288 088 668 19 e-21 0.000 000 000 39 e-21 s +natural unit of velocity 299 792 458 (exact) m s^-1 +neutron Compton wavelength 1.319 590 905 81 e-15 0.000 000 000 75 e-15 m +neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3 +neutron-electron mass ratio 1838.683 661 73 0.000 000 89 +neutron g factor -3.826 085 45 0.000 000 90 +neutron gyromag. ratio 1.832 471 71 e8 0.000 000 43 e8 s^-1 T^-1 +neutron gyromag. ratio in MHz/T 29.164 6931 0.000 0069 MHz T^-1 +neutron mag. mom. -9.662 3651 e-27 0.000 0023 e-27 J T^-1 +neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3 +neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45 +neutron mass 1.674 927 498 04 e-27 0.000 000 000 95 e-27 kg +neutron mass energy equivalent 1.505 349 762 87 e-10 0.000 000 000 86 e-10 J +neutron mass energy equivalent in MeV 939.565 420 52 0.000 000 54 MeV +neutron mass in u 1.008 664 915 95 0.000 000 000 49 u +neutron molar mass 1.008 664 915 60 e-3 0.000 000 000 57 e-3 kg mol^-1 +neutron-muon mass ratio 8.892 484 06 0.000 000 20 +neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16 +neutron-proton mass difference 2.305 574 35 e-30 0.000 000 82 e-30 kg +neutron-proton mass difference energy equivalent 2.072 146 89 e-13 0.000 000 74 e-13 J +neutron-proton mass difference energy equivalent in MeV 1.293 332 36 0.000 000 46 MeV +neutron-proton mass difference in u 1.388 449 33 e-3 0.000 000 49 e-3 u +neutron-proton mass ratio 1.001 378 419 31 0.000 000 000 49 +neutron relative atomic mass 1.008 664 915 95 0.000 000 000 49 +neutron-tau mass ratio 0.528 779 0.000 036 +neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16 +Newtonian constant of gravitation 6.674 30 e-11 0.000 15 e-11 m^3 kg^-1 s^-2 +Newtonian constant of gravitation over h-bar c 6.708 83 e-39 0.000 15 e-39 (GeV/c^2)^-2 +nuclear magneton 5.050 783 7461 e-27 0.000 000 0015 e-27 J T^-1 +nuclear magneton in eV/T 3.152 451 258 44 e-8 0.000 000 000 96 e-8 eV T^-1 +nuclear magneton in inverse meter per tesla 2.542 623 413 53 e-2 0.000 000 000 78 e-2 m^-1 T^-1 +nuclear magneton in K/T 3.658 267 7756 e-4 0.000 000 0011 e-4 K T^-1 +nuclear magneton in MHz/T 7.622 593 2291 0.000 000 0023 MHz T^-1 +Planck constant 6.626 070 15 e-34 (exact) J Hz^-1 +Planck constant in eV/Hz 4.135 667 696... e-15 (exact) eV Hz^-1 +Planck length 1.616 255 e-35 0.000 018 e-35 m +Planck mass 2.176 434 e-8 0.000 024 e-8 kg +Planck mass energy equivalent in GeV 1.220 890 e19 0.000 014 e19 GeV +Planck temperature 1.416 784 e32 0.000 016 e32 K +Planck time 5.391 247 e-44 0.000 060 e-44 s +proton charge to mass quotient 9.578 833 1560 e7 0.000 000 0029 e7 C kg^-1 +proton Compton wavelength 1.321 409 855 39 e-15 0.000 000 000 40 e-15 m +proton-electron mass ratio 1836.152 673 43 0.000 000 11 +proton g factor 5.585 694 6893 0.000 000 0016 +proton gyromag. ratio 2.675 221 8744 e8 0.000 000 0011 e8 s^-1 T^-1 +proton gyromag. ratio in MHz/T 42.577 478 518 0.000 000 018 MHz T^-1 +proton mag. mom. 1.410 606 797 36 e-26 0.000 000 000 60 e-26 J T^-1 +proton mag. mom. to Bohr magneton ratio 1.521 032 202 30 e-3 0.000 000 000 46 e-3 +proton mag. mom. to nuclear magneton ratio 2.792 847 344 63 0.000 000 000 82 +proton mag. shielding correction 2.5689 e-5 0.0011 e-5 +proton mass 1.672 621 923 69 e-27 0.000 000 000 51 e-27 kg +proton mass energy equivalent 1.503 277 615 98 e-10 0.000 000 000 46 e-10 J +proton mass energy equivalent in MeV 938.272 088 16 0.000 000 29 MeV +proton mass in u 1.007 276 466 621 0.000 000 000 053 u +proton molar mass 1.007 276 466 27 e-3 0.000 000 000 31 e-3 kg mol^-1 +proton-muon mass ratio 8.880 243 37 0.000 000 20 +proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34 +proton-neutron mass ratio 0.998 623 478 12 0.000 000 000 49 +proton relative atomic mass 1.007 276 466 621 0.000 000 000 053 +proton rms charge radius 8.414 e-16 0.019 e-16 m +proton-tau mass ratio 0.528 051 0.000 036 +quantum of circulation 3.636 947 5516 e-4 0.000 000 0011 e-4 m^2 s^-1 +quantum of circulation times 2 7.273 895 1032 e-4 0.000 000 0022 e-4 m^2 s^-1 +reduced Compton wavelength 3.861 592 6796 e-13 0.000 000 0012 e-13 m +reduced muon Compton wavelength 1.867 594 306 e-15 0.000 000 042 e-15 m +reduced neutron Compton wavelength 2.100 194 1552 e-16 0.000 000 0012 e-16 m +reduced Planck constant 1.054 571 817... e-34 (exact) J s +reduced Planck constant in eV s 6.582 119 569... e-16 (exact) eV s +reduced Planck constant times c in MeV fm 197.326 980 4... (exact) MeV fm +reduced proton Compton wavelength 2.103 089 103 36 e-16 0.000 000 000 64 e-16 m +reduced tau Compton wavelength 1.110 538 e-16 0.000 075 e-16 m +Rydberg constant 10 973 731.568 160 0.000 021 m^-1 +Rydberg constant times c in Hz 3.289 841 960 2508 e15 0.000 000 000 0064 e15 Hz +Rydberg constant times hc in eV 13.605 693 122 994 0.000 000 000 026 eV +Rydberg constant times hc in J 2.179 872 361 1035 e-18 0.000 000 000 0042 e-18 J +Sackur-Tetrode constant (1 K, 100 kPa) -1.151 707 537 06 0.000 000 000 45 +Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 870 523 58 0.000 000 000 45 +second radiation constant 1.438 776 877... e-2 (exact) m K +shielded helion gyromag. ratio 2.037 894 569 e8 0.000 000 024 e8 s^-1 T^-1 +shielded helion gyromag. ratio in MHz/T 32.434 099 42 0.000 000 38 MHz T^-1 +shielded helion mag. mom. -1.074 553 090 e-26 0.000 000 013 e-26 J T^-1 +shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3 +shielded helion mag. mom. to nuclear magneton ratio -2.127 497 719 0.000 000 025 +shielded helion to proton mag. mom. ratio -0.761 766 5618 0.000 000 0089 +shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033 +shielded proton gyromag. ratio 2.675 153 151 e8 0.000 000 029 e8 s^-1 T^-1 +shielded proton gyromag. ratio in MHz/T 42.576 384 74 0.000 000 46 MHz T^-1 +shielded proton mag. mom. 1.410 570 560 e-26 0.000 000 015 e-26 J T^-1 +shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3 +shielded proton mag. mom. to nuclear magneton ratio 2.792 775 599 0.000 000 030 +shielding difference of d and p in HD 2.0200 e-8 0.0020 e-8 +shielding difference of t and p in HT 2.4140 e-8 0.0020 e-8 +speed of light in vacuum 299 792 458 (exact) m s^-1 +standard acceleration of gravity 9.806 65 (exact) m s^-2 +standard atmosphere 101 325 (exact) Pa +standard-state pressure 100 000 (exact) Pa +Stefan-Boltzmann constant 5.670 374 419... e-8 (exact) W m^-2 K^-4 +tau Compton wavelength 6.977 71 e-16 0.000 47 e-16 m +tau-electron mass ratio 3477.23 0.23 +tau energy equivalent 1776.86 0.12 MeV +tau mass 3.167 54 e-27 0.000 21 e-27 kg +tau mass energy equivalent 2.846 84 e-10 0.000 19 e-10 J +tau mass in u 1.907 54 0.000 13 u +tau molar mass 1.907 54 e-3 0.000 13 e-3 kg mol^-1 +tau-muon mass ratio 16.8170 0.0011 +tau-neutron mass ratio 1.891 15 0.000 13 +tau-proton mass ratio 1.893 76 0.000 13 +Thomson cross section 6.652 458 7321 e-29 0.000 000 0060 e-29 m^2 +triton-electron mass ratio 5496.921 535 73 0.000 000 27 +triton g factor 5.957 924 931 0.000 000 012 +triton mag. mom. 1.504 609 5202 e-26 0.000 000 0030 e-26 J T^-1 +triton mag. mom. to Bohr magneton ratio 1.622 393 6651 e-3 0.000 000 0032 e-3 +triton mag. mom. to nuclear magneton ratio 2.978 962 4656 0.000 000 0059 +triton mass 5.007 356 7446 e-27 0.000 000 0015 e-27 kg +triton mass energy equivalent 4.500 387 8060 e-10 0.000 000 0014 e-10 J +triton mass energy equivalent in MeV 2808.921 132 98 0.000 000 85 MeV +triton mass in u 3.015 500 716 21 0.000 000 000 12 u +triton molar mass 3.015 500 715 17 e-3 0.000 000 000 92 e-3 kg mol^-1 +triton-proton mass ratio 2.993 717 034 14 0.000 000 000 15 +triton relative atomic mass 3.015 500 716 21 0.000 000 000 12 +triton to proton mag. mom. ratio 1.066 639 9191 0.000 000 0021 +unified atomic mass unit 1.660 539 066 60 e-27 0.000 000 000 50 e-27 kg +vacuum electric permittivity 8.854 187 8128 e-12 0.000 000 0013 e-12 F m^-1 +vacuum mag. permeability 1.256 637 062 12 e-6 0.000 000 000 19 e-6 N A^-2 +von Klitzing constant 25 812.807 45... (exact) ohm +weak mixing angle 0.222 90 0.000 30 +Wien frequency displacement law constant 5.878 925 757... e10 (exact) Hz K^-1 +Wien wavelength displacement law constant 2.897 771 955... e-3 (exact) m K +W to Z mass ratio 0.881 53 0.000 17 """ + +# ----------------------------------------------------------------------------- + +physical_constants: dict[str, tuple[float, str, float]] = {} + + +def parse_constants_2002to2014(d: str) -> dict[str, tuple[float, str, float]]: + constants = {} + for line in d.split('\n'): + name = line[:55].rstrip() + val = float(line[55:77].replace(' ', '').replace('...', '')) + uncert = float(line[77:99].replace(' ', '').replace('(exact)', '0')) + units = line[99:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +def parse_constants_2018toXXXX(d: str) -> dict[str, tuple[float, str, float]]: + constants = {} + for line in d.split('\n'): + name = line[:60].rstrip() + val = float(line[60:85].replace(' ', '').replace('...', '')) + uncert = float(line[85:110].replace(' ', '').replace('(exact)', '0')) + units = line[110:].rstrip() + constants[name] = (val, units, uncert) + return constants + + +_physical_constants_2002 = parse_constants_2002to2014(txt2002) +_physical_constants_2006 = parse_constants_2002to2014(txt2006) +_physical_constants_2010 = parse_constants_2002to2014(txt2010) +_physical_constants_2014 = parse_constants_2002to2014(txt2014) +_physical_constants_2018 = parse_constants_2018toXXXX(txt2018) + + +physical_constants.update(_physical_constants_2002) +physical_constants.update(_physical_constants_2006) +physical_constants.update(_physical_constants_2010) +physical_constants.update(_physical_constants_2014) +physical_constants.update(_physical_constants_2018) +_current_constants = _physical_constants_2018 +_current_codata = "CODATA 2018" + +# check obsolete values +_obsolete_constants = {} +for k in physical_constants: + if k not in _current_constants: + _obsolete_constants[k] = True + +# generate some additional aliases +_aliases = {} +for k in _physical_constants_2002: + if 'magn.' in k: + _aliases[k] = k.replace('magn.', 'mag.') +for k in _physical_constants_2006: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') +for k in _physical_constants_2018: + if 'momentum' in k: + _aliases[k] = k.replace('momentum', 'mom.um') + +# CODATA 2018: renamed and no longer exact; use as aliases +_aliases['mag. constant'] = 'vacuum mag. permeability' +_aliases['electric constant'] = 'vacuum electric permittivity' + + +class ConstantWarning(DeprecationWarning): + """Accessing a constant no longer in current CODATA data set""" + pass + + +def _check_obsolete(key: str) -> None: + if key in _obsolete_constants and key not in _aliases: + warnings.warn(f"Constant '{key}' is not in current {_current_codata} data set", + ConstantWarning, stacklevel=3) + + +def value(key: str) -> float: + """ + Value in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + value : float + Value in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.value('elementary charge') + 1.602176634e-19 + + """ + _check_obsolete(key) + return physical_constants[key][0] + + +def unit(key: str) -> str: + """ + Unit in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + unit : Python string + Unit in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.unit('proton mass') + 'kg' + + """ + _check_obsolete(key) + return physical_constants[key][1] + + +def precision(key: str) -> float: + """ + Relative precision in physical_constants indexed by key + + Parameters + ---------- + key : Python string + Key in dictionary `physical_constants` + + Returns + ------- + prec : float + Relative precision in `physical_constants` corresponding to `key` + + Examples + -------- + >>> from scipy import constants + >>> constants.precision('proton mass') + 5.1e-37 + + """ + _check_obsolete(key) + return physical_constants[key][2] / physical_constants[key][0] + + +def find(sub: str | None = None, disp: bool = False) -> Any: + """ + Return list of physical_constant keys containing a given string. + + Parameters + ---------- + sub : str + Sub-string to search keys for. By default, return all keys. + disp : bool + If True, print the keys that are found and return None. + Otherwise, return the list of keys without printing anything. + + Returns + ------- + keys : list or None + If `disp` is False, the list of keys is returned. + Otherwise, None is returned. + + Examples + -------- + >>> from scipy.constants import find, physical_constants + + Which keys in the ``physical_constants`` dictionary contain 'boltzmann'? + + >>> find('boltzmann') + ['Boltzmann constant', + 'Boltzmann constant in Hz/K', + 'Boltzmann constant in eV/K', + 'Boltzmann constant in inverse meter per kelvin', + 'Stefan-Boltzmann constant'] + + Get the constant called 'Boltzmann constant in Hz/K': + + >>> physical_constants['Boltzmann constant in Hz/K'] + (20836619120.0, 'Hz K^-1', 0.0) + + Find constants with 'radius' in the key: + + >>> find('radius') + ['Bohr radius', + 'classical electron radius', + 'deuteron rms charge radius', + 'proton rms charge radius'] + >>> physical_constants['classical electron radius'] + (2.8179403262e-15, 'm', 1.3e-24) + + """ + if sub is None: + result = list(_current_constants.keys()) + else: + result = [key for key in _current_constants + if sub.lower() in key.lower()] + + result.sort() + if disp: + for key in result: + print(key) + return + else: + return result + + +c = value('speed of light in vacuum') +mu0 = value('vacuum mag. permeability') +epsilon0 = value('vacuum electric permittivity') + +# Table is lacking some digits for exact values: calculate from definition +exact_values = { + 'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0), + 'kilogram-joule relationship': (c * c, 'J', 0.0), + 'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0), +} + +# sanity check +for key in exact_values: + val = physical_constants[key][0] + if abs(exact_values[key][0] - val) / val > 1e-9: + raise ValueError("Constants.codata: exact values too far off.") + if exact_values[key][2] == 0 and physical_constants[key][2] != 0: + raise ValueError("Constants.codata: value not exact") + +physical_constants.update(exact_values) + +_tested_keys = ['natural unit of velocity', + 'natural unit of action', + 'natural unit of action in eV s', + 'natural unit of mass', + 'natural unit of energy', + 'natural unit of energy in MeV', + 'natural unit of mom.um', + 'natural unit of mom.um in MeV/c', + 'natural unit of length', + 'natural unit of time'] + +# finally, insert aliases for values +for k, v in list(_aliases.items()): + if v in _current_constants or v in _tested_keys: + physical_constants[k] = physical_constants[v] + else: + del _aliases[k] diff --git a/venv/lib/python3.10/site-packages/scipy/constants/_constants.py b/venv/lib/python3.10/site-packages/scipy/constants/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fa379828ddd62bedc92f2e0e81b51ce550ca90fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/_constants.py @@ -0,0 +1,362 @@ +""" +Collection of physical constants and conversion factors. + +Most constants are in SI units, so you can do +print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots' + +The list is not meant to be comprehensive, but just convenient for everyday use. +""" + +from __future__ import annotations + +import math as _math +from typing import TYPE_CHECKING, Any + +from ._codata import value as _cd +import numpy as _np + +if TYPE_CHECKING: + import numpy.typing as npt + +""" +BasSw 2006 +physical constants: imported from CODATA +unit conversion: see e.g., NIST special publication 811 +Use at own risk: double-check values before calculating your Mars orbit-insertion burn. +Some constants exist in a few variants, which are marked with suffixes. +The ones without any suffix should be the most common ones. +""" + +__all__ = [ + 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G', + 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg', + 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha', + 'angstrom', 'arcmin', 'arcminute', 'arcsec', + 'arcsecond', 'astronomical_unit', 'atm', + 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar', + 'barrel', 'bbl', 'blob', 'c', 'calorie', + 'calorie_IT', 'calorie_th', 'carat', 'centi', + 'convert_temperature', 'day', 'deci', 'degree', + 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e', + 'eV', 'electron_mass', 'electron_volt', + 'elementary_charge', 'epsilon_0', 'erg', + 'exa', 'exbi', 'femto', 'fermi', 'fine_structure', + 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', + 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp', + 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio', + 'grain', 'gram', 'gravitational_constant', 'h', 'hbar', + 'hectare', 'hecto', 'horsepower', 'hour', 'hp', + 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force', + 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf', + 'light_year', 'liter', 'litre', 'long_ton', 'm_e', + 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega', + 'metric_ton', 'micro', 'micron', 'mil', 'mile', + 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano', + 'nautical_mile', 'neutron_mass', 'nu2lambda', + 'ounce', 'oz', 'parsec', 'pebi', 'peta', + 'pi', 'pico', 'point', 'pound', 'pound_force', + 'proton_mass', 'psi', 'pt', 'quecto', 'quetta', 'ronna', 'ronto', + 'short_ton', 'sigma', 'slinch', 'slug', 'speed_of_light', + 'speed_of_sound', 'stone', 'survey_foot', + 'survey_mile', 'tebi', 'tera', 'ton_TNT', + 'torr', 'troy_ounce', 'troy_pound', 'u', + 'week', 'yard', 'year', 'yobi', 'yocto', + 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta' +] + + +# mathematical constants +pi = _math.pi +golden = golden_ratio = (1 + _math.sqrt(5)) / 2 + +# SI prefixes +quetta = 1e30 +ronna = 1e27 +yotta = 1e24 +zetta = 1e21 +exa = 1e18 +peta = 1e15 +tera = 1e12 +giga = 1e9 +mega = 1e6 +kilo = 1e3 +hecto = 1e2 +deka = 1e1 +deci = 1e-1 +centi = 1e-2 +milli = 1e-3 +micro = 1e-6 +nano = 1e-9 +pico = 1e-12 +femto = 1e-15 +atto = 1e-18 +zepto = 1e-21 +yocto = 1e-24 +ronto = 1e-27 +quecto = 1e-30 + +# binary prefixes +kibi = 2**10 +mebi = 2**20 +gibi = 2**30 +tebi = 2**40 +pebi = 2**50 +exbi = 2**60 +zebi = 2**70 +yobi = 2**80 + +# physical constants +c = speed_of_light = _cd('speed of light in vacuum') +mu_0 = _cd('vacuum mag. permeability') +epsilon_0 = _cd('vacuum electric permittivity') +h = Planck = _cd('Planck constant') +hbar = h / (2 * pi) +G = gravitational_constant = _cd('Newtonian constant of gravitation') +g = _cd('standard acceleration of gravity') +e = elementary_charge = _cd('elementary charge') +R = gas_constant = _cd('molar gas constant') +alpha = fine_structure = _cd('fine-structure constant') +N_A = Avogadro = _cd('Avogadro constant') +k = Boltzmann = _cd('Boltzmann constant') +sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant') +Wien = _cd('Wien wavelength displacement law constant') +Rydberg = _cd('Rydberg constant') + +# mass in kg +gram = 1e-3 +metric_ton = 1e3 +grain = 64.79891e-6 +lb = pound = 7000 * grain # avoirdupois +blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0) +slug = blob / 12 # lbf*s**2/foot (added in 1.0.0) +oz = ounce = pound / 16 +stone = 14 * pound +long_ton = 2240 * pound +short_ton = 2000 * pound + +troy_ounce = 480 * grain # only for metals / gems +troy_pound = 12 * troy_ounce +carat = 200e-6 + +m_e = electron_mass = _cd('electron mass') +m_p = proton_mass = _cd('proton mass') +m_n = neutron_mass = _cd('neutron mass') +m_u = u = atomic_mass = _cd('atomic mass constant') + +# angle in rad +degree = pi / 180 +arcmin = arcminute = degree / 60 +arcsec = arcsecond = arcmin / 60 + +# time in second +minute = 60.0 +hour = 60 * minute +day = 24 * hour +week = 7 * day +year = 365 * day +Julian_year = 365.25 * day + +# length in meter +inch = 0.0254 +foot = 12 * inch +yard = 3 * foot +mile = 1760 * yard +mil = inch / 1000 +pt = point = inch / 72 # typography +survey_foot = 1200.0 / 3937 +survey_mile = 5280 * survey_foot +nautical_mile = 1852.0 +fermi = 1e-15 +angstrom = 1e-10 +micron = 1e-6 +au = astronomical_unit = 149597870700.0 +light_year = Julian_year * c +parsec = au / arcsec + +# pressure in pascal +atm = atmosphere = _cd('standard atmosphere') +bar = 1e5 +torr = mmHg = atm / 760 +psi = pound * g / (inch * inch) + +# area in meter**2 +hectare = 1e4 +acre = 43560 * foot**2 + +# volume in meter**3 +litre = liter = 1e-3 +gallon = gallon_US = 231 * inch**3 # US +# pint = gallon_US / 8 +fluid_ounce = fluid_ounce_US = gallon_US / 128 +bbl = barrel = 42 * gallon_US # for oil + +gallon_imp = 4.54609e-3 # UK +fluid_ounce_imp = gallon_imp / 160 + +# speed in meter per second +kmh = 1e3 / hour +mph = mile / hour +# approx value of mach at 15 degrees in 1 atm. Is this a common value? +mach = speed_of_sound = 340.5 +knot = nautical_mile / hour + +# temperature in kelvin +zero_Celsius = 273.15 +degree_Fahrenheit = 1/1.8 # only for differences + +# energy in joule +eV = electron_volt = elementary_charge # * 1 Volt +calorie = calorie_th = 4.184 +calorie_IT = 4.1868 +erg = 1e-7 +Btu_th = pound * degree_Fahrenheit * calorie_th / gram +Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram +ton_TNT = 1e9 * calorie_th +# Wh = watt_hour + +# power in watt +hp = horsepower = 550 * foot * pound * g + +# force in newton +dyn = dyne = 1e-5 +lbf = pound_force = pound * g +kgf = kilogram_force = g # * 1 kg + +# functions for conversions that are not linear + + +def convert_temperature( + val: npt.ArrayLike, + old_scale: str, + new_scale: str, +) -> Any: + """ + Convert from a temperature scale to another one among Celsius, Kelvin, + Fahrenheit, and Rankine scales. + + Parameters + ---------- + val : array_like + Value(s) of the temperature(s) to be converted expressed in the + original scale. + old_scale : str + Specifies as a string the original scale from which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine + ('Rankine', 'rankine', 'R', 'r'). + new_scale : str + Specifies as a string the new scale to which the temperature + value(s) will be converted. Supported scales are Celsius ('Celsius', + 'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'), + Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine + ('Rankine', 'rankine', 'R', 'r'). + + Returns + ------- + res : float or array of floats + Value(s) of the converted temperature(s) expressed in the new scale. + + Notes + ----- + .. versionadded:: 0.18.0 + + Examples + -------- + >>> from scipy.constants import convert_temperature + >>> import numpy as np + >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin') + array([ 233.15, 313.15]) + + """ + # Convert from `old_scale` to Kelvin + if old_scale.lower() in ['celsius', 'c']: + tempo = _np.asanyarray(val) + zero_Celsius + elif old_scale.lower() in ['kelvin', 'k']: + tempo = _np.asanyarray(val) + elif old_scale.lower() in ['fahrenheit', 'f']: + tempo = (_np.asanyarray(val) - 32) * 5 / 9 + zero_Celsius + elif old_scale.lower() in ['rankine', 'r']: + tempo = _np.asanyarray(val) * 5 / 9 + else: + raise NotImplementedError("%s scale is unsupported: supported scales " + "are Celsius, Kelvin, Fahrenheit, and " + "Rankine" % old_scale) + # and from Kelvin to `new_scale`. + if new_scale.lower() in ['celsius', 'c']: + res = tempo - zero_Celsius + elif new_scale.lower() in ['kelvin', 'k']: + res = tempo + elif new_scale.lower() in ['fahrenheit', 'f']: + res = (tempo - zero_Celsius) * 9 / 5 + 32 + elif new_scale.lower() in ['rankine', 'r']: + res = tempo * 9 / 5 + else: + raise NotImplementedError("'%s' scale is unsupported: supported " + "scales are 'Celsius', 'Kelvin', " + "'Fahrenheit', and 'Rankine'" % new_scale) + + return res + + +# optics + + +def lambda2nu(lambda_: npt.ArrayLike) -> Any: + """ + Convert wavelength to optical frequency + + Parameters + ---------- + lambda_ : array_like + Wavelength(s) to be converted. + + Returns + ------- + nu : float or array of floats + Equivalent optical frequency. + + Notes + ----- + Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import lambda2nu, speed_of_light + >>> import numpy as np + >>> lambda2nu(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(lambda_) + + +def nu2lambda(nu: npt.ArrayLike) -> Any: + """ + Convert optical frequency to wavelength. + + Parameters + ---------- + nu : array_like + Optical frequency to be converted. + + Returns + ------- + lambda : float or array of floats + Equivalent wavelength(s). + + Notes + ----- + Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the + (vacuum) speed of light in meters/second. + + Examples + -------- + >>> from scipy.constants import nu2lambda, speed_of_light + >>> import numpy as np + >>> nu2lambda(np.array((1, speed_of_light))) + array([ 2.99792458e+08, 1.00000000e+00]) + + """ + return c / _np.asanyarray(nu) diff --git a/venv/lib/python3.10/site-packages/scipy/constants/codata.py b/venv/lib/python3.10/site-packages/scipy/constants/codata.py new file mode 100644 index 0000000000000000000000000000000000000000..72177f20545d673d5bbb179c705f72cdbb1afcc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/codata.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.constants` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'physical_constants', 'value', 'unit', 'precision', 'find', + 'ConstantWarning', 'txt2002', 'txt2006', 'txt2010', 'txt2014', + 'txt2018', 'parse_constants_2002to2014', + 'parse_constants_2018toXXXX', 'k', 'c', 'mu0', 'epsilon0', + 'exact_values', 'key', 'val', 'v' + +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="constants", module="codata", + private_modules=["_codata"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/constants/constants.py b/venv/lib/python3.10/site-packages/scipy/constants/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..855901ba802881090b99b7e8972de741331c7ab9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/constants.py @@ -0,0 +1,53 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.constants` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G', + 'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg', + 'Stefan_Boltzmann', 'Wien', 'acre', 'alpha', + 'angstrom', 'arcmin', 'arcminute', 'arcsec', + 'arcsecond', 'astronomical_unit', 'atm', + 'atmosphere', 'atomic_mass', 'atto', 'au', 'bar', + 'barrel', 'bbl', 'blob', 'c', 'calorie', + 'calorie_IT', 'calorie_th', 'carat', 'centi', + 'convert_temperature', 'day', 'deci', 'degree', + 'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e', + 'eV', 'electron_mass', 'electron_volt', + 'elementary_charge', 'epsilon_0', 'erg', + 'exa', 'exbi', 'femto', 'fermi', 'fine_structure', + 'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp', + 'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp', + 'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio', + 'grain', 'gram', 'gravitational_constant', 'h', 'hbar', + 'hectare', 'hecto', 'horsepower', 'hour', 'hp', + 'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force', + 'kmh', 'knot', 'lambda2nu', 'lb', 'lbf', + 'light_year', 'liter', 'litre', 'long_ton', 'm_e', + 'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega', + 'metric_ton', 'micro', 'micron', 'mil', 'mile', + 'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano', + 'nautical_mile', 'neutron_mass', 'nu2lambda', + 'ounce', 'oz', 'parsec', 'pebi', 'peta', + 'pi', 'pico', 'point', 'pound', 'pound_force', + 'proton_mass', 'psi', 'pt', 'short_ton', + 'sigma', 'slinch', 'slug', 'speed_of_light', + 'speed_of_sound', 'stone', 'survey_foot', + 'survey_mile', 'tebi', 'tera', 'ton_TNT', + 'torr', 'troy_ounce', 'troy_pound', 'u', + 'week', 'yard', 'year', 'yobi', 'yocto', + 'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="constants", module="constants", + private_modules=["_constants"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/constants/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/constants/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef8d1ca7137b8567d880d1a0f98d9eddb2290fa7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d62f9850fc7bba4c5611f0343d482267c8e1ea5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68b78bbb7dada8e20a1abc32ded384dfc748fa88 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py b/venv/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9b69aa20351832eebe725d73c5e6aacaefdf1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/tests/test_codata.py @@ -0,0 +1,57 @@ +from scipy.constants import find, value, ConstantWarning, c, speed_of_light +from numpy.testing import (assert_equal, assert_, assert_almost_equal, + suppress_warnings) +import scipy.constants._codata as _cd + + +def test_find(): + keys = find('weak mixing', disp=False) + assert_equal(keys, ['weak mixing angle']) + + keys = find('qwertyuiop', disp=False) + assert_equal(keys, []) + + keys = find('natural unit', disp=False) + assert_equal(keys, sorted(['natural unit of velocity', + 'natural unit of action', + 'natural unit of action in eV s', + 'natural unit of mass', + 'natural unit of energy', + 'natural unit of energy in MeV', + 'natural unit of momentum', + 'natural unit of momentum in MeV/c', + 'natural unit of length', + 'natural unit of time'])) + + +def test_basic_table_parse(): + c_s = 'speed of light in vacuum' + assert_equal(value(c_s), c) + assert_equal(value(c_s), speed_of_light) + + +def test_basic_lookup(): + assert_equal('%d %s' % (_cd.c, _cd.unit('speed of light in vacuum')), + '299792458 m s^-1') + + +def test_find_all(): + assert_(len(find(disp=False)) > 300) + + +def test_find_single(): + assert_equal(find('Wien freq', disp=False)[0], + 'Wien frequency displacement law constant') + + +def test_2002_vs_2006(): + assert_almost_equal(value('magn. flux quantum'), + value('mag. flux quantum')) + + +def test_exact_values(): + # Check that updating stored values with exact ones worked. + with suppress_warnings() as sup: + sup.filter(ConstantWarning) + for key in _cd.exact_values: + assert_((_cd.exact_values[key][0] - value(key)) / value(key) == 0) diff --git a/venv/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py b/venv/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..8d7461d978fa1f2ff0267429d32ca58193b3b73e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/constants/tests/test_constants.py @@ -0,0 +1,35 @@ +from numpy.testing import assert_equal, assert_allclose +import scipy.constants as sc + + +def test_convert_temperature(): + assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0) + assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'), + [273.15, 273.15]) + assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'), + [-273.15, -273.15]) + assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15]) + assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'), + [32, 32]) + assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32]) + assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67], + rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'), + [0., 0.], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'), + [32., 32.], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'), + [491.67, 491.67], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'), + [491.67, 491.67], rtol=0., atol=1e-13) + assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'), + [273.15, 0.], rtol=0., atol=1e-13) + + +def test_lambda_to_nu(): + assert_equal(sc.lambda2nu([sc.speed_of_light, 1]), [1, sc.speed_of_light]) + + +def test_nu_to_lambda(): + assert_equal(sc.nu2lambda([sc.speed_of_light, 1]), [1, sc.speed_of_light]) + diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/__init__.py b/venv/lib/python3.10/site-packages/scipy/ndimage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4f619dded6615a284392c4273559f226a1c8c72c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/__init__.py @@ -0,0 +1,169 @@ +""" +========================================================= +Multidimensional image processing (:mod:`scipy.ndimage`) +========================================================= + +.. currentmodule:: scipy.ndimage + +This package contains various functions for multidimensional image +processing. + + +Filters +======= + +.. autosummary:: + :toctree: generated/ + + convolve - Multidimensional convolution + convolve1d - 1-D convolution along the given axis + correlate - Multidimensional correlation + correlate1d - 1-D correlation along the given axis + gaussian_filter + gaussian_filter1d + gaussian_gradient_magnitude + gaussian_laplace + generic_filter - Multidimensional filter using a given function + generic_filter1d - 1-D generic filter along the given axis + generic_gradient_magnitude + generic_laplace + laplace - N-D Laplace filter based on approximate second derivatives + maximum_filter + maximum_filter1d + median_filter - Calculates a multidimensional median filter + minimum_filter + minimum_filter1d + percentile_filter - Calculates a multidimensional percentile filter + prewitt + rank_filter - Calculates a multidimensional rank filter + sobel + uniform_filter - Multidimensional uniform filter + uniform_filter1d - 1-D uniform filter along the given axis + +Fourier filters +=============== + +.. autosummary:: + :toctree: generated/ + + fourier_ellipsoid + fourier_gaussian + fourier_shift + fourier_uniform + +Interpolation +============= + +.. autosummary:: + :toctree: generated/ + + affine_transform - Apply an affine transformation + geometric_transform - Apply an arbitrary geometric transform + map_coordinates - Map input array to new coordinates by interpolation + rotate - Rotate an array + shift - Shift an array + spline_filter + spline_filter1d + zoom - Zoom an array + +Measurements +============ + +.. autosummary:: + :toctree: generated/ + + center_of_mass - The center of mass of the values of an array at labels + extrema - Min's and max's of an array at labels, with their positions + find_objects - Find objects in a labeled array + histogram - Histogram of the values of an array, optionally at labels + label - Label features in an array + labeled_comprehension + maximum + maximum_position + mean - Mean of the values of an array at labels + median + minimum + minimum_position + standard_deviation - Standard deviation of an N-D image array + sum_labels - Sum of the values of the array + value_indices - Find indices of each distinct value in given array + variance - Variance of the values of an N-D image array + watershed_ift + +Morphology +========== + +.. autosummary:: + :toctree: generated/ + + binary_closing + binary_dilation + binary_erosion + binary_fill_holes + binary_hit_or_miss + binary_opening + binary_propagation + black_tophat + distance_transform_bf + distance_transform_cdt + distance_transform_edt + generate_binary_structure + grey_closing + grey_dilation + grey_erosion + grey_opening + iterate_structure + morphological_gradient + morphological_laplace + white_tophat + +""" + +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from ._filters import * +from ._fourier import * +from ._interpolation import * +from ._measurements import * +from ._morphology import * + +# Deprecated namespaces, to be removed in v2.0.0 +from . import filters +from . import fourier +from . import interpolation +from . import measurements +from . import morphology + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0d05e123ba1f7f45c1f37795e7ba5cd0257018b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/ndimage/_ctest.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..af816c8e2ce5740bd2b524a4bca51b4d0ccdac20 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/ndimage/_cytest.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_filters.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..a2907614d5acffcd8dcfaf054d84d69b438e7923 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_filters.py @@ -0,0 +1,1852 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import numbers +import warnings +import numpy +import operator + +from scipy._lib._util import normalize_axis_index +from . import _ni_support +from . import _nd_image +from . import _ni_docstrings + +__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', + 'prewitt', 'sobel', 'generic_laplace', 'laplace', + 'gaussian_laplace', 'generic_gradient_magnitude', + 'gaussian_gradient_magnitude', 'correlate', 'convolve', + 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', + 'maximum_filter1d', 'minimum_filter', 'maximum_filter', + 'rank_filter', 'median_filter', 'percentile_filter', + 'generic_filter1d', 'generic_filter'] + + +def _invalid_origin(origin, lenw): + return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) + + +def _complex_via_real_components(func, input, weights, output, cval, **kwargs): + """Complex convolution via a linear combination of real convolutions.""" + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input and complex_weights: + # real component of the output + func(input.real, weights.real, output=output.real, + cval=numpy.real(cval), **kwargs) + output.real -= func(input.imag, weights.imag, output=None, + cval=numpy.imag(cval), **kwargs) + # imaginary component of the output + func(input.real, weights.imag, output=output.imag, + cval=numpy.real(cval), **kwargs) + output.imag += func(input.imag, weights.real, output=None, + cval=numpy.imag(cval), **kwargs) + elif complex_input: + func(input.real, weights, output=output.real, cval=numpy.real(cval), + **kwargs) + func(input.imag, weights, output=output.imag, cval=numpy.imag(cval), + **kwargs) + else: + if numpy.iscomplexobj(cval): + raise ValueError("Cannot provide a complex-valued cval when the " + "input is real.") + func(input, weights.real, output=output.real, cval=cval, **kwargs) + func(input, weights.imag, output=output.imag, cval=cval, **kwargs) + return output + + +@_ni_docstrings.docfiller +def correlate1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a 1-D correlation along the given axis. + + The lines of the array along the given axis are correlated with the + given weights. + + Parameters + ---------- + %(input)s + weights : array + 1-D sequence of numbers. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray + Correlation result. Has the same shape as `input`. + + Examples + -------- + >>> from scipy.ndimage import correlate1d + >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([ 8, 26, 8, 12, 7, 28, 36, 9]) + """ + input = numpy.asarray(input) + weights = numpy.asarray(weights) + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input or complex_weights: + if complex_weights: + weights = weights.conj() + weights = weights.astype(numpy.complex128, copy=False) + kwargs = dict(axis=axis, mode=mode, origin=origin) + output = _ni_support._get_output(output, input, complex_output=True) + return _complex_via_real_components(correlate1d, input, weights, + output, cval, **kwargs) + + output = _ni_support._get_output(output, input) + weights = numpy.asarray(weights, dtype=numpy.float64) + if weights.ndim != 1 or weights.shape[0] < 1: + raise RuntimeError('no filter weights given') + if not weights.flags.contiguous: + weights = weights.copy() + axis = normalize_axis_index(axis, input.ndim) + if _invalid_origin(origin, len(weights)): + raise ValueError('Invalid origin; origin must satisfy ' + '-(len(weights) // 2) <= origin <= ' + '(len(weights)-1) // 2') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate1d(input, weights, axis, output, mode, cval, + origin) + return output + + +@_ni_docstrings.docfiller +def convolve1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a 1-D convolution along the given axis. + + The lines of the array along the given axis are convolved with the + given weights. + + Parameters + ---------- + %(input)s + weights : ndarray + 1-D sequence of numbers. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + convolve1d : ndarray + Convolved array with same shape as input + + Examples + -------- + >>> from scipy.ndimage import convolve1d + >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([14, 24, 4, 13, 12, 36, 27, 0]) + """ + weights = weights[::-1] + origin = -origin + if not len(weights) & 1: + origin -= 1 + weights = numpy.asarray(weights) + if weights.dtype.kind == 'c': + # pre-conjugate here to counteract the conjugation in correlate1d + weights = weights.conj() + return correlate1d(input, weights, axis, output, mode, cval, origin) + + +def _gaussian_kernel1d(sigma, order, radius): + """ + Computes a 1-D Gaussian convolution kernel. + """ + if order < 0: + raise ValueError('order must be non-negative') + exponent_range = numpy.arange(order + 1) + sigma2 = sigma * sigma + x = numpy.arange(-radius, radius+1) + phi_x = numpy.exp(-0.5 / sigma2 * x ** 2) + phi_x = phi_x / phi_x.sum() + + if order == 0: + return phi_x + else: + # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) + # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) + # p'(x) = -1 / sigma ** 2 + # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the + # coefficients of q(x) + q = numpy.zeros(order + 1) + q[0] = 1 + D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x) + P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x) + Q_deriv = D + P + for _ in range(order): + q = Q_deriv.dot(q) + q = (x[:, None] ** exponent_range).dot(q) + return q * phi_x + + +@_ni_docstrings.docfiller +def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0, *, radius=None): + """1-D Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar + standard deviation for Gaussian kernel + %(axis)s + order : int, optional + An order of 0 corresponds to convolution with a Gaussian + kernel. A positive order corresponds to convolution with + that derivative of a Gaussian. + %(output)s + %(mode_reflect)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + radius : None or int, optional + Radius of the Gaussian kernel. If specified, the size of + the kernel will be ``2*radius + 1``, and `truncate` is ignored. + Default is None. + + Returns + ------- + gaussian_filter1d : ndarray + + Notes + ----- + The Gaussian kernel will have size ``2*radius + 1`` along each axis. If + `radius` is None, a default ``radius = round(truncate * sigma)`` will be + used. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter1d + >>> import numpy as np + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) + array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) + array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(101).cumsum() + >>> y3 = gaussian_filter1d(x, 3) + >>> y6 = gaussian_filter1d(x, 6) + >>> plt.plot(x, 'k', label='original data') + >>> plt.plot(y3, '--', label='filtered, sigma=3') + >>> plt.plot(y6, ':', label='filtered, sigma=6') + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + + """ + sd = float(sigma) + # make the radius of the filter equal to truncate standard deviations + lw = int(truncate * sd + 0.5) + if radius is not None: + lw = radius + if not isinstance(lw, numbers.Integral) or lw < 0: + raise ValueError('Radius must be a nonnegative integer.') + # Since we are calling correlate, not convolve, revert the kernel + weights = _gaussian_kernel1d(sigma, order, lw)[::-1] + return correlate1d(input, weights, axis, output, mode, cval, 0) + + +@_ni_docstrings.docfiller +def gaussian_filter(input, sigma, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0, *, radius=None, + axes=None): + """Multidimensional Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + Standard deviation for Gaussian kernel. The standard + deviations of the Gaussian filter are given for each axis as a + sequence, or as a single number, in which case it is equal for + all axes. + order : int or sequence of ints, optional + The order of the filter along each axis is given as a sequence + of integers, or as a single number. An order of 0 corresponds + to convolution with a Gaussian kernel. A positive order + corresponds to convolution with that derivative of a Gaussian. + %(output)s + %(mode_multiple)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + radius : None or int or sequence of ints, optional + Radius of the Gaussian kernel. The radius are given for each axis + as a sequence, or as a single number, in which case it is equal + for all axes. If specified, the size of the kernel along each axis + will be ``2*radius + 1``, and `truncate` is ignored. + Default is None. + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `sigma`, `order`, `mode` and/or `radius` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + gaussian_filter : ndarray + Returned array of same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D convolution filters. The intermediate arrays are + stored in the same data type as the output. Therefore, for output + types with a limited precision, the results may be imprecise + because intermediate results may be stored with insufficient + precision. + + The Gaussian kernel will have size ``2*radius + 1`` along each axis. If + `radius` is None, the default ``radius = round(truncate * sigma)`` will be + used. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter + >>> import numpy as np + >>> a = np.arange(50, step=2).reshape((5,5)) + >>> a + array([[ 0, 2, 4, 6, 8], + [10, 12, 14, 16, 18], + [20, 22, 24, 26, 28], + [30, 32, 34, 36, 38], + [40, 42, 44, 46, 48]]) + >>> gaussian_filter(a, sigma=1) + array([[ 4, 6, 8, 9, 11], + [10, 12, 14, 15, 17], + [20, 22, 24, 25, 27], + [29, 31, 33, 34, 36], + [35, 37, 39, 40, 42]]) + + >>> from scipy import datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = gaussian_filter(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + orders = _ni_support._normalize_sequence(order, num_axes) + sigmas = _ni_support._normalize_sequence(sigma, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + radiuses = _ni_support._normalize_sequence(radius, num_axes) + axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii]) + for ii in range(num_axes) if sigmas[ii] > 1e-15] + if len(axes) > 0: + for axis, sigma, order, mode, radius in axes: + gaussian_filter1d(input, sigma, axis, order, output, + mode, cval, truncate, radius=radius) + input = output + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Prewitt filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + prewitt : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + sobel: Sobel filter + + Notes + ----- + This function computes the one-dimensional Prewitt filter. + Horizontal edges are emphasised with the horizontal transform (axis=0), + vertical edges with the vertical transform (axis=1), and so on for higher + dimensions. These can be combined to give the magnitude. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> ascent = datasets.ascent() + >>> prewitt_h = ndimage.prewitt(ascent, axis=0) + >>> prewitt_v = ndimage.prewitt(ascent, axis=1) + >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2) + >>> magnitude *= 255 / np.max(magnitude) # Normalization + >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8)) + >>> plt.gray() + >>> axes[0, 0].imshow(ascent) + >>> axes[0, 1].imshow(prewitt_h) + >>> axes[1, 0].imshow(prewitt_v) + >>> axes[1, 1].imshow(magnitude) + >>> titles = ["original", "horizontal", "vertical", "magnitude"] + >>> for i, ax in enumerate(axes.ravel()): + ... ax.set_title(titles[i]) + ... ax.axis("off") + >>> plt.show() + + """ + input = numpy.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) + return output + + +@_ni_docstrings.docfiller +def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Sobel filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + sobel : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function computes the axis-specific Sobel gradient. + The horizontal edges can be emphasised with the horizontal transform (axis=0), + the vertical edges with the vertical transform (axis=1) and so on for higher + dimensions. These can be combined to give the magnitude. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> ascent = datasets.ascent().astype('int32') + >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient + >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient + >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2) + >>> magnitude *= 255.0 / np.max(magnitude) # normalization + >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8)) + >>> plt.gray() # show the filtered result in grayscale + >>> axs[0, 0].imshow(ascent) + >>> axs[0, 1].imshow(sobel_h) + >>> axs[1, 0].imshow(sobel_v) + >>> axs[1, 1].imshow(magnitude) + >>> titles = ["original", "horizontal", "vertical", "magnitude"] + >>> for i, ax in enumerate(axs.ravel()): + ... ax.set_title(titles[i]) + ... ax.axis("off") + >>> plt.show() + + """ + input = numpy.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) + return output + + +@_ni_docstrings.docfiller +def generic_laplace(input, derivative2, output=None, mode="reflect", + cval=0.0, + extra_arguments=(), + extra_keywords=None): + """ + N-D Laplace filter using a provided second derivative function. + + Parameters + ---------- + %(input)s + derivative2 : callable + Callable with the following signature:: + + derivative2(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + + Returns + ------- + generic_laplace : ndarray + Filtered array. Has the same shape as `input`. + + """ + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + axes = list(range(input.ndim)) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative2(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + for ii in range(1, len(axes)): + tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + output += tmp + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def laplace(input, output=None, mode="reflect", cval=0.0): + """N-D Laplace filter based on approximate second derivatives. + + Parameters + ---------- + %(input)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + laplace : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.laplace(ascent) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + def derivative2(input, axis, output, mode, cval): + return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) + return generic_laplace(input, derivative2, output, mode, cval) + + +@_ni_docstrings.docfiller +def gaussian_laplace(input, sigma, output=None, mode="reflect", + cval=0.0, **kwargs): + """Multidimensional Laplace filter using Gaussian second derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_laplace : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> ascent = datasets.ascent() + + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + + >>> result = ndimage.gaussian_laplace(ascent, sigma=1) + >>> ax1.imshow(result) + + >>> result = ndimage.gaussian_laplace(ascent, sigma=3) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + + def derivative2(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 2 + return gaussian_filter(input, sigma, order, output, mode, cval, + **kwargs) + + return generic_laplace(input, derivative2, output, mode, cval, + extra_arguments=(sigma,), + extra_keywords=kwargs) + + +@_ni_docstrings.docfiller +def generic_gradient_magnitude(input, derivative, output=None, + mode="reflect", cval=0.0, + extra_arguments=(), extra_keywords=None): + """Gradient magnitude using a provided gradient function. + + Parameters + ---------- + %(input)s + derivative : callable + Callable with the following signature:: + + derivative(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + `derivative` can assume that `input` and `output` are ndarrays. + Note that the output from `derivative` is modified inplace; + be careful to copy important inputs before returning them. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + + Returns + ------- + generic_gradient_matnitude : ndarray + Filtered array. Has the same shape as `input`. + + """ + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + output = _ni_support._get_output(output, input) + axes = list(range(input.ndim)) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + numpy.multiply(output, output, output) + for ii in range(1, len(axes)): + tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + numpy.multiply(tmp, tmp, tmp) + output += tmp + # This allows the sqrt to work with a different default casting + numpy.sqrt(output, output, casting='unsafe') + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def gaussian_gradient_magnitude(input, sigma, output=None, + mode="reflect", cval=0.0, **kwargs): + """Multidimensional gradient magnitude using Gaussian derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_gradient_magnitude : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + + def derivative(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 1 + return gaussian_filter(input, sigma, order, output, mode, + cval, **kwargs) + + return generic_gradient_magnitude(input, derivative, output, mode, + cval, extra_arguments=(sigma,), + extra_keywords=kwargs) + + +def _correlate_or_convolve(input, weights, output, mode, cval, origin, + convolution): + input = numpy.asarray(input) + weights = numpy.asarray(weights) + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input or complex_weights: + if complex_weights and not convolution: + # As for numpy.correlate, conjugate weights rather than input. + weights = weights.conj() + kwargs = dict( + mode=mode, origin=origin, convolution=convolution + ) + output = _ni_support._get_output(output, input, complex_output=True) + + return _complex_via_real_components(_correlate_or_convolve, input, + weights, output, cval, **kwargs) + + origins = _ni_support._normalize_sequence(origin, input.ndim) + weights = numpy.asarray(weights, dtype=numpy.float64) + wshape = [ii for ii in weights.shape if ii > 0] + if len(wshape) != input.ndim: + raise RuntimeError('filter weights array has incorrect shape.') + if convolution: + weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] + for ii in range(len(origins)): + origins[ii] = -origins[ii] + if not weights.shape[ii] & 1: + origins[ii] -= 1 + for origin, lenw in zip(origins, wshape): + if _invalid_origin(origin, lenw): + raise ValueError('Invalid origin; origin must satisfy ' + '-(weights.shape[k] // 2) <= origin[k] <= ' + '(weights.shape[k]-1) // 2') + + if not weights.flags.contiguous: + weights = weights.copy() + output = _ni_support._get_output(output, input) + temp_needed = numpy.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError("A sequence of modes is not supported") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate(input, weights, output, mode, cval, origins) + if temp_needed: + temp[...] = output + output = temp + return output + + +@_ni_docstrings.docfiller +def correlate(input, weights, output=None, mode='reflect', cval=0.0, + origin=0): + """ + Multidimensional correlation. + + The array is correlated with the given kernel. + + Parameters + ---------- + %(input)s + weights : ndarray + array of weights, same number of dimensions as input + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + result : ndarray + The result of correlation of `input` with `weights`. + + See Also + -------- + convolve : Convolve an image with a kernel. + + Examples + -------- + Correlation is the process of moving a filter mask often referred to + as kernel over the image and computing the sum of products at each location. + + >>> from scipy.ndimage import correlate + >>> import numpy as np + >>> input_img = np.arange(25).reshape(5,5) + >>> print(input_img) + [[ 0 1 2 3 4] + [ 5 6 7 8 9] + [10 11 12 13 14] + [15 16 17 18 19] + [20 21 22 23 24]] + + Define a kernel (weights) for correlation. In this example, it is for sum of + center and up, down, left and right next elements. + + >>> weights = [[0, 1, 0], + ... [1, 1, 1], + ... [0, 1, 0]] + + We can calculate a correlation result: + For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``. + + >>> correlate(input_img, weights) + array([[ 6, 10, 15, 20, 24], + [ 26, 30, 35, 40, 44], + [ 51, 55, 60, 65, 69], + [ 76, 80, 85, 90, 94], + [ 96, 100, 105, 110, 114]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, False) + + +@_ni_docstrings.docfiller +def convolve(input, weights, output=None, mode='reflect', cval=0.0, + origin=0): + """ + Multidimensional convolution. + + The array is convolved with the given kernel. + + Parameters + ---------- + %(input)s + weights : array_like + Array of weights, same number of dimensions as input + %(output)s + %(mode_reflect)s + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0 + origin : int, optional + Controls the origin of the input signal, which is where the + filter is centered to produce the first element of the output. + Positive values shift the filter to the right, and negative values + shift the filter to the left. Default is 0. + + Returns + ------- + result : ndarray + The result of convolution of `input` with `weights`. + + See Also + -------- + correlate : Correlate an image with a kernel. + + Notes + ----- + Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where + W is the `weights` kernel, + j is the N-D spatial index over :math:`W`, + I is the `input` and k is the coordinate of the center of + W, specified by `origin` in the input parameters. + + Examples + -------- + Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, + because in this case borders (i.e., where the `weights` kernel, centered + on any one value, extends beyond an edge of `input`) are treated as zeros. + + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) + >>> from scipy import ndimage + >>> ndimage.convolve(a, k, mode='constant', cval=0.0) + array([[11, 10, 7, 4], + [10, 3, 11, 11], + [15, 12, 14, 7], + [12, 3, 7, 0]]) + + Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` + with 1.0's (and then extracting only the original region of the result). + + >>> ndimage.convolve(a, k, mode='constant', cval=1.0) + array([[13, 11, 8, 7], + [11, 3, 11, 14], + [16, 12, 14, 10], + [15, 6, 10, 5]]) + + With ``mode='reflect'`` (the default), outer values are reflected at the + edge of `input` to fill in missing values. + + >>> b = np.array([[2, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) + >>> ndimage.convolve(b, k, mode='reflect') + array([[5, 0, 0], + [3, 0, 0], + [1, 0, 0]]) + + This includes diagonally at the corners. + + >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) + >>> ndimage.convolve(b, k) + array([[4, 2, 0], + [3, 2, 0], + [1, 1, 0]]) + + With ``mode='nearest'``, the single nearest value in to an edge in + `input` is repeated as many times as needed to match the overlapping + `weights`. + + >>> c = np.array([[2, 0, 1], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0]]) + >>> ndimage.convolve(c, k, mode='nearest') + array([[7, 0, 3], + [5, 0, 2], + [3, 0, 1]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, True) + + +@_ni_docstrings.docfiller +def uniform_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D uniform filter along the given axis. + + The lines of the array along the given axis are filtered with a + uniform filter of given size. + + Parameters + ---------- + %(input)s + size : int + length of uniform filter + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray + Filtered array. Has same shape as `input`. + + Examples + -------- + >>> from scipy.ndimage import uniform_filter1d + >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([4, 3, 4, 1, 4, 6, 6, 3]) + """ + input = numpy.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + complex_output = input.dtype.kind == 'c' + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + if not complex_output: + _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, + origin) + else: + _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode, + numpy.real(cval), origin) + _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode, + numpy.imag(cval), origin) + return output + + +@_ni_docstrings.docfiller +def uniform_filter(input, size=3, output=None, mode="reflect", + cval=0.0, origin=0, *, axes=None): + """Multidimensional uniform filter. + + Parameters + ---------- + %(input)s + size : int or sequence of ints, optional + The sizes of the uniform filter are given for each axis as a + sequence, or as a single number, in which case the size is + equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + uniform_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D uniform filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.uniform_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = numpy.asarray(input) + output = _ni_support._get_output(output, input, + complex_output=input.dtype.kind == 'c') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + sizes = _ni_support._normalize_sequence(size, num_axes) + origins = _ni_support._normalize_sequence(origin, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(num_axes) if sizes[ii] > 1] + if len(axes) > 0: + for axis, size, origin, mode in axes: + uniform_filter1d(input, int(size), axis, output, mode, + cval, origin) + input = output + else: + output[...] = input[...] + return output + + +@_ni_docstrings.docfiller +def minimum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D minimum filter along the given axis. + + The lines of the array along the given axis are filtered with a + minimum filter of given size. + + Parameters + ---------- + %(input)s + size : int + length along which to calculate 1D minimum + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray. + Filtered image. Has the same shape as `input`. + + Notes + ----- + This function implements the MINLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + + Examples + -------- + >>> from scipy.ndimage import minimum_filter1d + >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([2, 0, 0, 0, 1, 1, 0, 0]) + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 1) + return output + + +@_ni_docstrings.docfiller +def maximum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D maximum filter along the given axis. + + The lines of the array along the given axis are filtered with a + maximum filter of given size. + + Parameters + ---------- + %(input)s + size : int + Length along which to calculate the 1-D maximum. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + maximum1d : ndarray, None + Maximum-filtered array with same shape as input. + None if `output` is not None + + Notes + ----- + This function implements the MAXLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + Examples + -------- + >>> from scipy.ndimage import maximum_filter1d + >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([8, 8, 8, 4, 9, 9, 9, 9]) + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 0) + return output + + +def _min_or_max_filter(input, size, footprint, structure, output, mode, + cval, origin, minimum, axes=None): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=3) + if structure is None: + if footprint is None: + if size is None: + raise RuntimeError("no footprint provided") + separable = True + else: + footprint = numpy.asarray(footprint, dtype=bool) + if not footprint.any(): + raise ValueError("All-zero footprint is not supported.") + if footprint.all(): + size = footprint.shape + footprint = None + separable = True + else: + separable = False + else: + structure = numpy.asarray(structure, dtype=numpy.float64) + separable = False + if footprint is None: + footprint = numpy.ones(structure.shape, bool) + else: + footprint = numpy.asarray(footprint, dtype=bool) + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + temp_needed = numpy.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if separable: + origins = _ni_support._normalize_sequence(origin, num_axes) + sizes = _ni_support._normalize_sequence(size, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(len(axes)) if sizes[ii] > 1] + if minimum: + filter_ = minimum_filter1d + else: + filter_ = maximum_filter1d + if len(axes) > 0: + for axis, size, origin, mode in axes: + filter_(input, int(size), axis, output, mode, cval, origin) + input = output + else: + output[...] = input[...] + else: + origins = _ni_support._normalize_sequence(origin, input.ndim) + if num_axes < input.ndim: + if footprint.ndim != num_axes: + raise RuntimeError("footprint array has incorrect shape") + footprint = numpy.expand_dims( + footprint, + tuple(ax for ax in range(input.ndim) if ax not in axes) + ) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + if structure is not None: + if len(structure.shape) != input.ndim: + raise RuntimeError('structure array has incorrect shape') + if num_axes != structure.ndim: + structure = numpy.expand_dims( + structure, + tuple(ax for ax in range(structure.ndim) if ax not in axes) + ) + if not structure.flags.contiguous: + structure = structure.copy() + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError( + "A sequence of modes is not supported for non-separable " + "footprints") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter(input, footprint, structure, output, + mode, cval, origins, minimum) + if temp_needed: + temp[...] = output + output = temp + return output + + +@_ni_docstrings.docfiller +def minimum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional minimum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + minimum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + A sequence of modes (one per axis) is only supported when the footprint is + separable. Otherwise, a single mode string must be provided. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.minimum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 1, axes) + + +@_ni_docstrings.docfiller +def maximum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional maximum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + maximum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + A sequence of modes (one per axis) is only supported when the footprint is + separable. Otherwise, a single mode string must be provided. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.maximum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 0, axes) + + +@_ni_docstrings.docfiller +def _rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, operation='rank', + axes=None): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=3) + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + origins = _ni_support._normalize_sequence(origin, num_axes) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, num_axes) + footprint = numpy.ones(sizes, dtype=bool) + else: + footprint = numpy.asarray(footprint, dtype=bool) + if num_axes < input.ndim: + # set origin = 0 for any axes not being filtered + origins_temp = [0,] * input.ndim + for o, ax in zip(origins, axes): + origins_temp[ax] = o + origins = origins_temp + + if not isinstance(mode, str) and isinstance(mode, Iterable): + # set mode = 'constant' for any axes not being filtered + modes = _ni_support._normalize_sequence(mode, num_axes) + modes_temp = ['constant'] * input.ndim + for m, ax in zip(modes, axes): + modes_temp[ax] = m + mode = modes_temp + + # insert singleton dimension along any non-filtered axes + if footprint.ndim != num_axes: + raise RuntimeError("footprint array has incorrect shape") + footprint = numpy.expand_dims( + footprint, + tuple(ax for ax in range(input.ndim) if ax not in axes) + ) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + filter_size = numpy.where(footprint, 1, 0).sum() + if operation == 'median': + rank = filter_size // 2 + elif operation == 'percentile': + percentile = rank + if percentile < 0.0: + percentile += 100.0 + if percentile < 0 or percentile > 100: + raise RuntimeError('invalid percentile') + if percentile == 100.0: + rank = filter_size - 1 + else: + rank = int(float(filter_size) * percentile / 100.0) + if rank < 0: + rank += filter_size + if rank < 0 or rank >= filter_size: + raise RuntimeError('rank not within filter footprint size') + if rank == 0: + return minimum_filter(input, None, footprint, output, mode, cval, + origins, axes=None) + elif rank == filter_size - 1: + return maximum_filter(input, None, footprint, output, mode, cval, + origins, axes=None) + else: + output = _ni_support._get_output(output, input) + temp_needed = numpy.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError( + "A sequence of modes is not supported by non-separable rank " + "filters") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.rank_filter(input, rank, footprint, output, mode, cval, + origins) + if temp_needed: + temp[...] = output + output = temp + return output + + +@_ni_docstrings.docfiller +def rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional rank filter. + + Parameters + ---------- + %(input)s + rank : int + The rank parameter may be less than zero, i.e., rank = -1 + indicates the largest element. + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. + + Returns + ------- + rank_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.rank_filter(ascent, rank=42, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + rank = operator.index(rank) + return _rank_filter(input, rank, size, footprint, output, mode, cval, + origin, 'rank', axes=axes) + + +@_ni_docstrings.docfiller +def median_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """ + Calculate a multidimensional median filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. + + Returns + ------- + median_filter : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + scipy.signal.medfilt2d + + Notes + ----- + For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes + the specialised function `scipy.signal.medfilt2d` may be faster. It is + however limited to constant mode with ``cval=0``. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.median_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, 0, size, footprint, output, mode, cval, + origin, 'median', axes=axes) + + +@_ni_docstrings.docfiller +def percentile_filter(input, percentile, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """Calculate a multidimensional percentile filter. + + Parameters + ---------- + %(input)s + percentile : scalar + The percentile parameter may be less than zero, i.e., + percentile = -20 equals percentile = 80 + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. + + Returns + ------- + percentile_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, percentile, size, footprint, output, mode, + cval, origin, 'percentile', axes=axes) + + +@_ni_docstrings.docfiller +def generic_filter1d(input, function, filter_size, axis=-1, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a 1-D filter along the given axis. + + `generic_filter1d` iterates over the lines of the array, calling the + given function at each line. The arguments of the line are the + input line, and the output line. The input and output lines are 1-D + double arrays. The input line is extended appropriately according + to the filter size and origin. The output line must be modified + in-place with the result. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply along given axis. + filter_size : scalar + Length of the filter. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + %(extra_arguments)s + %(extra_keywords)s + + Returns + ------- + generic_filter1d : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int function(double *input_line, npy_intp input_length, + double *output_line, npy_intp output_length, + void *user_data) + int function(double *input_line, intptr_t input_length, + double *output_line, intptr_t output_length, + void *user_data) + + The calling function iterates over the lines of the input and output + arrays, calling the callback function at each line. The current line + is extended according to the border conditions set by the calling + function, and the result is copied into the array that is passed + through ``input_line``. The length of the input line (after extension) + is passed through ``input_length``. The callback function should apply + the filter and store the result in the array passed through + ``output_line``. The length of the output line is passed through + ``output_length``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + """ + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + if filter_size < 1: + raise RuntimeError('invalid filter size') + axis = normalize_axis_index(axis, input.ndim) + if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= + filter_size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter1d(input, function, filter_size, axis, output, + mode, cval, origin, extra_arguments, + extra_keywords) + return output + + +@_ni_docstrings.docfiller +def generic_filter(input, function, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a multidimensional filter using the given function. + + At each element the provided function is called. The input values + within the filter footprint at that element are passed to the function + as a 1-D array of double values. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply at each element. + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + %(extra_arguments)s + %(extra_keywords)s + + Returns + ------- + generic_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int callback(double *buffer, npy_intp filter_size, + double *return_value, void *user_data) + int callback(double *buffer, intptr_t filter_size, + double *return_value, void *user_data) + + The calling function iterates over the elements of the input and + output arrays, calling the callback function at each element. The + elements within the footprint of the filter at the current element are + passed through the ``buffer`` parameter, and the number of elements + within the footprint through ``filter_size``. The calculated value is + returned in ``return_value``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + Examples + -------- + Import the necessary modules and load the example image used for + filtering. + + >>> import numpy as np + >>> from scipy import datasets + >>> from scipy.ndimage import generic_filter + >>> import matplotlib.pyplot as plt + >>> ascent = datasets.ascent() + + Compute a maximum filter with kernel size 10 by passing a simple NumPy + aggregation function as argument to `function`. + + >>> maximum_filter_result = generic_filter(ascent, np.amax, [10, 10]) + + While a maximmum filter could also directly be obtained using + `maximum_filter`, `generic_filter` allows generic Python function or + `scipy.LowLevelCallable` to be used as a filter. Here, we compute the + range between maximum and minimum value as an example for a kernel size + of 5. + + >>> def custom_filter(image): + ... return np.amax(image) - np.amin(image) + >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5]) + + Plot the original and filtered images. + + >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12)) + >>> plt.gray() # show the filtered result in grayscale + >>> top, middle, bottom = axes + >>> for ax in axes: + ... ax.set_axis_off() # remove coordinate system + >>> top.imshow(ascent) + >>> top.set_title("Original image") + >>> middle.imshow(maximum_filter_result) + >>> middle.set_title("Maximum filter, Kernel: 10x10") + >>> bottom.imshow(custom_filter_result) + >>> bottom.set_title("Custom filter, Kernel: 5x5") + >>> fig.tight_layout() + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + if extra_keywords is None: + extra_keywords = {} + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + origins = _ni_support._normalize_sequence(origin, input.ndim) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, input.ndim) + footprint = numpy.ones(sizes, dtype=bool) + else: + footprint = numpy.asarray(footprint, dtype=bool) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('filter footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + output = _ni_support._get_output(output, input) + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter(input, function, footprint, output, mode, + cval, origins, extra_arguments, extra_keywords) + return output diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_fourier.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_fourier.py new file mode 100644 index 0000000000000000000000000000000000000000..8966dd6d9a94341f3b68561d8f6d7f8e73e074e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_fourier.py @@ -0,0 +1,307 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy +from scipy._lib._util import normalize_axis_index +from . import _ni_support +from . import _nd_image + +__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid', + 'fourier_shift'] + + +def _get_output_fourier(output, input): + if output is None: + if input.dtype.type in [numpy.complex64, numpy.complex128, + numpy.float32]: + output = numpy.zeros(input.shape, dtype=input.dtype) + else: + output = numpy.zeros(input.shape, dtype=numpy.float64) + elif type(output) is type: + if output not in [numpy.complex64, numpy.complex128, + numpy.float32, numpy.float64]: + raise RuntimeError("output type not supported") + output = numpy.zeros(input.shape, dtype=output) + elif output.shape != input.shape: + raise RuntimeError("output shape not correct") + return output + + +def _get_output_fourier_complex(output, input): + if output is None: + if input.dtype.type in [numpy.complex64, numpy.complex128]: + output = numpy.zeros(input.shape, dtype=input.dtype) + else: + output = numpy.zeros(input.shape, dtype=numpy.complex128) + elif type(output) is type: + if output not in [numpy.complex64, numpy.complex128]: + raise RuntimeError("output type not supported") + output = numpy.zeros(input.shape, dtype=output) + elif output.shape != input.shape: + raise RuntimeError("output shape not correct") + return output + + +def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None): + """ + Multidimensional Gaussian fourier filter. + + The array is multiplied with the fourier transform of a Gaussian + kernel. + + Parameters + ---------- + input : array_like + The input array. + sigma : float or sequence + The sigma of the Gaussian kernel. If a float, `sigma` is the same for + all axes. If a sequence, `sigma` has to contain one value for each + axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of filtering the input is placed in this array. + + Returns + ------- + fourier_gaussian : ndarray + The filtered input. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import numpy.fft + >>> import matplotlib.pyplot as plt + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = datasets.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_gaussian(input_, sigma=4) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier(output, input) + axis = normalize_axis_index(axis, input.ndim) + sigmas = _ni_support._normalize_sequence(sigma, input.ndim) + sigmas = numpy.asarray(sigmas, dtype=numpy.float64) + if not sigmas.flags.contiguous: + sigmas = sigmas.copy() + + _nd_image.fourier_filter(input, sigmas, n, axis, output, 0) + return output + + +def fourier_uniform(input, size, n=-1, axis=-1, output=None): + """ + Multidimensional uniform fourier filter. + + The array is multiplied with the Fourier transform of a box of given + size. + + Parameters + ---------- + input : array_like + The input array. + size : float or sequence + The size of the box used for filtering. + If a float, `size` is the same for all axes. If a sequence, `size` has + to contain one value for each axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of filtering the input is placed in this array. + + Returns + ------- + fourier_uniform : ndarray + The filtered input. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import numpy.fft + >>> import matplotlib.pyplot as plt + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = datasets.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_uniform(input_, size=20) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier(output, input) + axis = normalize_axis_index(axis, input.ndim) + sizes = _ni_support._normalize_sequence(size, input.ndim) + sizes = numpy.asarray(sizes, dtype=numpy.float64) + if not sizes.flags.contiguous: + sizes = sizes.copy() + _nd_image.fourier_filter(input, sizes, n, axis, output, 1) + return output + + +def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None): + """ + Multidimensional ellipsoid Fourier filter. + + The array is multiplied with the fourier transform of an ellipsoid of + given sizes. + + Parameters + ---------- + input : array_like + The input array. + size : float or sequence + The size of the box used for filtering. + If a float, `size` is the same for all axes. If a sequence, `size` has + to contain one value for each axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of filtering the input is placed in this array. + + Returns + ------- + fourier_ellipsoid : ndarray + The filtered input. + + Notes + ----- + This function is implemented for arrays of rank 1, 2, or 3. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import numpy.fft + >>> import matplotlib.pyplot as plt + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = datasets.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_ellipsoid(input_, size=20) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + if input.ndim > 3: + raise NotImplementedError("Only 1d, 2d and 3d inputs are supported") + output = _get_output_fourier(output, input) + if output.size == 0: + # The C code has a bug that can result in a segfault with arrays + # that have size 0 (gh-17270), so check here. + return output + axis = normalize_axis_index(axis, input.ndim) + sizes = _ni_support._normalize_sequence(size, input.ndim) + sizes = numpy.asarray(sizes, dtype=numpy.float64) + if not sizes.flags.contiguous: + sizes = sizes.copy() + _nd_image.fourier_filter(input, sizes, n, axis, output, 2) + return output + + +def fourier_shift(input, shift, n=-1, axis=-1, output=None): + """ + Multidimensional Fourier shift filter. + + The array is multiplied with the Fourier transform of a shift operation. + + Parameters + ---------- + input : array_like + The input array. + shift : float or sequence + The size of the box used for filtering. + If a float, `shift` is the same for all axes. If a sequence, `shift` + has to contain one value for each axis. + n : int, optional + If `n` is negative (default), then the input is assumed to be the + result of a complex fft. + If `n` is larger than or equal to zero, the input is assumed to be the + result of a real fft, and `n` gives the length of the array before + transformation along the real transform direction. + axis : int, optional + The axis of the real transform. + output : ndarray, optional + If given, the result of shifting the input is placed in this array. + + Returns + ------- + fourier_shift : ndarray + The shifted input. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy.fft + >>> fig, (ax1, ax2) = plt.subplots(1, 2) + >>> plt.gray() # show the filtered result in grayscale + >>> ascent = datasets.ascent() + >>> input_ = numpy.fft.fft2(ascent) + >>> result = ndimage.fourier_shift(input_, shift=200) + >>> result = numpy.fft.ifft2(result) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result.real) # the imaginary part is an artifact + >>> plt.show() + """ + input = numpy.asarray(input) + output = _get_output_fourier_complex(output, input) + axis = normalize_axis_index(axis, input.ndim) + shifts = _ni_support._normalize_sequence(shift, input.ndim) + shifts = numpy.asarray(shifts, dtype=numpy.float64) + if not shifts.flags.contiguous: + shifts = shifts.copy() + _nd_image.fourier_shift(input, shifts, n, axis, output) + return output diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..b87e32ef60215b8ec7b189bc5e7a579a4095b1f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_interpolation.py @@ -0,0 +1,1010 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import itertools +import warnings + +import numpy +from scipy._lib._util import normalize_axis_index + +from scipy import special +from . import _ni_support +from . import _nd_image +from ._ni_docstrings import docfiller + + +__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', + 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] + + +@docfiller +def spline_filter1d(input, order=3, axis=-1, output=numpy.float64, + mode='mirror'): + """ + Calculate a 1-D spline filter along the given axis. + + The lines of the array along the given axis are filtered by a + spline filter. The order of the spline must be >= 2 and <= 5. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + axis : int, optional + The axis along which the spline filter is applied. Default is the last + axis. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is ``numpy.float64``. + %(mode_interp_mirror)s + + Returns + ------- + spline_filter1d : ndarray + The filtered input. + + See Also + -------- + spline_filter : Multidimensional spline filter. + + Notes + ----- + All of the interpolation functions in `ndimage` do spline interpolation of + the input image. If using B-splines of `order > 1`, the input image + values have to be converted to B-spline coefficients first, which is + done by applying this 1-D filter sequentially along all + axes of the input. All functions that require B-spline coefficients + will automatically filter their inputs, a behavior controllable with + the `prefilter` keyword argument. For functions that accept a `mode` + parameter, the result will only be correct if it matches the `mode` + used when filtering. + + For complex-valued `input`, this function processes the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + We can filter an image using 1-D spline along the given axis: + + >>> from scipy.ndimage import spline_filter1d + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0) + >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1) + >>> f, ax = plt.subplots(1, 3, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter_axis_0, "spline filter (axis=0)"], + ... [sp_filter_axis_1, "spline filter (axis=1)"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + spline_filter1d(input.real, order, axis, output.real, mode) + spline_filter1d(input.imag, order, axis, output.imag, mode) + return output + if order in [0, 1]: + output[...] = numpy.array(input) + else: + mode = _ni_support._extend_mode_to_code(mode) + axis = normalize_axis_index(axis, input.ndim) + _nd_image.spline_filter1d(input, order, axis, output, mode) + return output + +@docfiller +def spline_filter(input, order=3, output=numpy.float64, mode='mirror'): + """ + Multidimensional spline filter. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is ``numpy.float64``. + %(mode_interp_mirror)s + + Returns + ------- + spline_filter : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + spline_filter1d : Calculate a 1-D spline filter along the given axis. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D spline filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + For complex-valued `input`, this function processes the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + We can filter an image using multidimentional splines: + + >>> from scipy.ndimage import spline_filter + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter = spline_filter(orig_img, order=3) + >>> f, ax = plt.subplots(1, 2, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter, "spline filter"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if order < 2 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + spline_filter(input.real, order, output.real, mode) + spline_filter(input.imag, order, output.imag, mode) + return output + if order not in [0, 1] and input.ndim > 0: + for axis in range(input.ndim): + spline_filter1d(input, order, axis, output=output, mode=mode) + input = output + else: + output[...] = input[...] + return output + + +def _prepad_for_spline_filter(input, mode, cval): + if mode in ['nearest', 'grid-constant']: + npad = 12 + if mode == 'grid-constant': + padded = numpy.pad(input, npad, mode='constant', + constant_values=cval) + elif mode == 'nearest': + padded = numpy.pad(input, npad, mode='edge') + else: + # other modes have exact boundary conditions implemented so + # no prepadding is needed + npad = 0 + padded = input + return padded, npad + + +@docfiller +def geometric_transform(input, mapping, output_shape=None, + output=None, order=3, + mode='constant', cval=0.0, prefilter=True, + extra_arguments=(), extra_keywords={}): + """ + Apply an arbitrary geometric transform. + + The given mapping function is used to find, for each point in the + output, the corresponding coordinates in the input. The value of the + input at those coordinates is determined by spline interpolation of + the requested order. + + Parameters + ---------- + %(input)s + mapping : {callable, scipy.LowLevelCallable} + A callable object that accepts a tuple of length equal to the output + array rank, and returns the corresponding input coordinates as a tuple + of length equal to the input array rank. + output_shape : tuple of ints, optional + Shape tuple. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + extra_arguments : tuple, optional + Extra arguments passed to `mapping`. + extra_keywords : dict, optional + Extra keywords passed to `mapping`. + + Returns + ------- + output : ndarray + The filtered input. + + See Also + -------- + map_coordinates, affine_transform, spline_filter1d + + + Notes + ----- + This function also accepts low-level callback functions with one + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int mapping(npy_intp *output_coordinates, double *input_coordinates, + int output_rank, int input_rank, void *user_data) + int mapping(intptr_t *output_coordinates, double *input_coordinates, + int output_rank, int input_rank, void *user_data) + + The calling function iterates over the elements of the output array, + calling the callback function at each element. The coordinates of the + current output element are passed through ``output_coordinates``. The + callback function must return the coordinates at which the input must + be interpolated in ``input_coordinates``. The rank of the input and + output arrays are given by ``input_rank`` and ``output_rank`` + respectively. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the Python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + For complex-valued `input`, this function transforms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> import numpy as np + >>> from scipy.ndimage import geometric_transform + >>> a = np.arange(12.).reshape((4, 3)) + >>> def shift_func(output_coords): + ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) + ... + >>> geometric_transform(a, shift_func) + array([[ 0. , 0. , 0. ], + [ 0. , 1.362, 2.738], + [ 0. , 4.812, 6.187], + [ 0. , 8.263, 9.637]]) + + >>> b = [1, 2, 3, 4, 5] + >>> def shift_func(output_coords): + ... return (output_coords[0] - 3,) + ... + >>> geometric_transform(b, shift_func, mode='constant') + array([0, 0, 0, 1, 2]) + >>> geometric_transform(b, shift_func, mode='nearest') + array([1, 1, 1, 1, 2]) + >>> geometric_transform(b, shift_func, mode='reflect') + array([3, 2, 1, 1, 2]) + >>> geometric_transform(b, shift_func, mode='wrap') + array([2, 3, 4, 1, 2]) + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if output_shape is None: + output_shape = input.shape + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + kwargs = dict(order=order, mode=mode, prefilter=prefilter, + output_shape=output_shape, + extra_arguments=extra_arguments, + extra_keywords=extra_keywords) + geometric_transform(input.real, mapping, output=output.real, + cval=numpy.real(cval), **kwargs) + geometric_transform(input.imag, mapping, output=output.imag, + cval=numpy.imag(cval), **kwargs) + return output + + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=numpy.float64, + mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.geometric_transform(filtered, mapping, None, None, None, output, + order, mode, cval, npad, extra_arguments, + extra_keywords) + return output + + +@docfiller +def map_coordinates(input, coordinates, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Map the input array to new coordinates by interpolation. + + The array of coordinates is used to find, for each point in the output, + the corresponding coordinates in the input. The value of the input at + those coordinates is determined by spline interpolation of the + requested order. + + The shape of the output is derived from that of the coordinate + array by dropping the first axis. The values of the array along + the first axis are the coordinates in the input array at which the + output value is found. + + Parameters + ---------- + %(input)s + coordinates : array_like + The coordinates at which `input` is evaluated. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + map_coordinates : ndarray + The result of transforming the input. The shape of the output is + derived from that of `coordinates` by dropping the first axis. + + See Also + -------- + spline_filter, geometric_transform, scipy.interpolate + + Notes + ----- + For complex-valued `input`, this function maps the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(12.).reshape((4, 3)) + >>> a + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.], + [ 9., 10., 11.]]) + >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) + array([ 2., 7.]) + + Above, the interpolated value of a[0.5, 0.5] gives output[0], while + a[2, 1] is output[1]. + + >>> inds = np.array([[0.5, 2], [0.5, 4]]) + >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) + array([ 2. , -33.3]) + >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') + array([ 2., 8.]) + >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) + array([ True, False], dtype=bool) + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + coordinates = numpy.asarray(coordinates) + if numpy.iscomplexobj(coordinates): + raise TypeError('Complex type not supported') + output_shape = coordinates.shape[1:] + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + if coordinates.shape[0] != input.ndim: + raise RuntimeError('invalid shape for coordinate array') + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + map_coordinates(input.real, coordinates, output=output.real, + cval=numpy.real(cval), **kwargs) + map_coordinates(input.imag, coordinates, output=output.imag, + cval=numpy.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=numpy.float64, + mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.geometric_transform(filtered, None, coordinates, None, None, + output, order, mode, cval, npad, None, None) + return output + + +@docfiller +def affine_transform(input, matrix, offset=0.0, output_shape=None, + output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Apply an affine transformation. + + Given an output image pixel index vector ``o``, the pixel value + is determined from the input image at position + ``np.dot(matrix, o) + offset``. + + This does 'pull' (or 'backward') resampling, transforming the output space + to the input to locate data. Affine transformations are often described in + the 'push' (or 'forward') direction, transforming input to output. If you + have a matrix for the 'push' transformation, use its inverse + (:func:`numpy.linalg.inv`) in this function. + + Parameters + ---------- + %(input)s + matrix : ndarray + The inverse coordinate transformation matrix, mapping output + coordinates to input coordinates. If ``ndim`` is the number of + dimensions of ``input``, the given matrix must have one of the + following shapes: + + - ``(ndim, ndim)``: the linear transformation matrix for each + output coordinate. + - ``(ndim,)``: assume that the 2-D transformation matrix is + diagonal, with the diagonal specified by the given value. A more + efficient algorithm is then used that exploits the separability + of the problem. + - ``(ndim + 1, ndim + 1)``: assume that the transformation is + specified using homogeneous coordinates [1]_. In this case, any + value passed to ``offset`` is ignored. + - ``(ndim, ndim + 1)``: as above, but the bottom row of a + homogeneous transformation matrix is always ``[0, 0, ..., 1]``, + and may be omitted. + + offset : float or sequence, optional + The offset into the array where the transform is applied. If a float, + `offset` is the same for each axis. If a sequence, `offset` should + contain one value for each axis. + output_shape : tuple of ints, optional + Shape tuple. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + affine_transform : ndarray + The transformed input. + + Notes + ----- + The given matrix and offset are used to find for each point in the + output the corresponding coordinates in the input by an affine + transformation. The value of the input at those coordinates is + determined by spline interpolation of the requested order. Points + outside the boundaries of the input are filled according to the given + mode. + + .. versionchanged:: 0.18.0 + Previously, the exact interpretation of the affine transformation + depended on whether the matrix was supplied as a 1-D or a + 2-D array. If a 1-D array was supplied + to the matrix parameter, the output pixel value at index ``o`` + was determined from the input image at position + ``matrix * (o + offset)``. + + For complex-valued `input`, this function transforms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if output_shape is None: + if isinstance(output, numpy.ndarray): + output_shape = output.shape + else: + output_shape = input.shape + if input.ndim < 1 or len(output_shape) < 1: + raise RuntimeError('input and output rank must be > 0') + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + kwargs = dict(offset=offset, output_shape=output_shape, order=order, + mode=mode, prefilter=prefilter) + affine_transform(input.real, matrix, output=output.real, + cval=numpy.real(cval), **kwargs) + affine_transform(input.imag, matrix, output=output.imag, + cval=numpy.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=numpy.float64, + mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + matrix = numpy.asarray(matrix, dtype=numpy.float64) + if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: + raise RuntimeError('no proper affine matrix provided') + if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and + (matrix.shape[0] in [input.ndim, input.ndim + 1])): + if matrix.shape[0] == input.ndim + 1: + exptd = [0] * input.ndim + [1] + if not numpy.all(matrix[input.ndim] == exptd): + msg = ('Expected homogeneous transformation matrix with ' + 'shape {} for image shape {}, but bottom row was ' + 'not equal to {}'.format(matrix.shape, input.shape, exptd)) + raise ValueError(msg) + # assume input is homogeneous coordinate transformation matrix + offset = matrix[:input.ndim, input.ndim] + matrix = matrix[:input.ndim, :input.ndim] + if matrix.shape[0] != input.ndim: + raise RuntimeError('affine matrix has wrong number of rows') + if matrix.ndim == 2 and matrix.shape[1] != output.ndim: + raise RuntimeError('affine matrix has wrong number of columns') + if not matrix.flags.contiguous: + matrix = matrix.copy() + offset = _ni_support._normalize_sequence(offset, input.ndim) + offset = numpy.asarray(offset, dtype=numpy.float64) + if offset.ndim != 1 or offset.shape[0] < 1: + raise RuntimeError('no proper offset provided') + if not offset.flags.contiguous: + offset = offset.copy() + if matrix.ndim == 1: + warnings.warn( + "The behavior of affine_transform with a 1-D " + "array supplied for the matrix parameter has changed in " + "SciPy 0.18.0.", + stacklevel=2 + ) + _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order, + mode, cval, npad, False) + else: + _nd_image.geometric_transform(filtered, None, None, matrix, offset, + output, order, mode, cval, npad, None, + None) + return output + + +@docfiller +def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, + prefilter=True): + """ + Shift an array. + + The array is shifted using spline interpolation of the requested order. + Points outside the boundaries of the input are filled according to the + given mode. + + Parameters + ---------- + %(input)s + shift : float or sequence + The shift along the axes. If a float, `shift` is the same for each + axis. If a sequence, `shift` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + shift : ndarray + The shifted input. + + See Also + -------- + affine_transform : Affine transformations + + Notes + ----- + For complex-valued `input`, this function shifts the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + Import the necessary modules and an exemplary image. + + >>> from scipy.ndimage import shift + >>> import matplotlib.pyplot as plt + >>> from scipy import datasets + >>> image = datasets.ascent() + + Shift the image vertically by 20 pixels. + + >>> image_shifted_vertically = shift(image, (20, 0)) + + Shift the image vertically by -200 pixels and horizontally by 100 pixels. + + >>> image_shifted_both_directions = shift(image, (-200, 100)) + + Plot the original and the shifted images. + + >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12)) + >>> plt.gray() # show the filtered result in grayscale + >>> top, middle, bottom = axes + >>> for ax in axes: + ... ax.set_axis_off() # remove coordinate system + >>> top.imshow(image) + >>> top.set_title("Original image") + >>> middle.imshow(image_shifted_vertically) + >>> middle.set_title("Vertically shifted image") + >>> bottom.imshow(image_shifted_both_directions) + >>> bottom.set_title("Image shifted in both directions") + >>> fig.tight_layout() + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + # import under different name to avoid confusion with shift parameter + from scipy.ndimage._interpolation import shift as _shift + + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + _shift(input.real, shift, output=output.real, cval=numpy.real(cval), + **kwargs) + _shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval), + **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=numpy.float64, + mode=mode) + else: + npad = 0 + filtered = input + mode = _ni_support._extend_mode_to_code(mode) + shift = _ni_support._normalize_sequence(shift, input.ndim) + shift = [-ii for ii in shift] + shift = numpy.asarray(shift, dtype=numpy.float64) + if not shift.flags.contiguous: + shift = shift.copy() + _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval, + npad, False) + return output + + +@docfiller +def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, + prefilter=True, *, grid_mode=False): + """ + Zoom an array. + + The array is zoomed using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + zoom : float or sequence + The zoom factor along the axes. If a float, `zoom` is the same for each + axis. If a sequence, `zoom` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + grid_mode : bool, optional + If False, the distance from the pixel centers is zoomed. Otherwise, the + distance including the full pixel extent is used. For example, a 1d + signal of length 5 is considered to have length 4 when `grid_mode` is + False, but length 5 when `grid_mode` is True. See the following + visual illustration: + + .. code-block:: text + + | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 | + |<-------------------------------------->| + vs. + |<----------------------------------------------->| + + The starting point of the arrow in the diagram above corresponds to + coordinate location 0 in each mode. + + Returns + ------- + zoom : ndarray + The zoomed input. + + Notes + ----- + For complex-valued `input`, this function zooms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.zoom(ascent, 3.0) + >>> ax1.imshow(ascent, vmin=0, vmax=255) + >>> ax2.imshow(result, vmin=0, vmax=255) + >>> plt.show() + + >>> print(ascent.shape) + (512, 512) + + >>> print(result.shape) + (1536, 1536) + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = numpy.asarray(input) + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + zoom = _ni_support._normalize_sequence(zoom, input.ndim) + output_shape = tuple( + [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) + complex_output = numpy.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + # import under different name to avoid confusion with zoom parameter + from scipy.ndimage._interpolation import zoom as _zoom + + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + _zoom(input.real, zoom, output=output.real, cval=numpy.real(cval), + **kwargs) + _zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval), + **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=numpy.float64, + mode=mode) + else: + npad = 0 + filtered = input + if grid_mode: + # warn about modes that may have surprising behavior + suggest_mode = None + if mode == 'constant': + suggest_mode = 'grid-constant' + elif mode == 'wrap': + suggest_mode = 'grid-wrap' + if suggest_mode is not None: + warnings.warn( + ("It is recommended to use mode = {} instead of {} when " + "grid_mode is True.").format(suggest_mode, mode), + stacklevel=2 + ) + mode = _ni_support._extend_mode_to_code(mode) + + zoom_div = numpy.array(output_shape) + zoom_nominator = numpy.array(input.shape) + if not grid_mode: + zoom_div -= 1 + zoom_nominator -= 1 + + # Zooming to infinite values is unpredictable, so just choose + # zoom factor 1 instead + zoom = numpy.divide(zoom_nominator, zoom_div, + out=numpy.ones_like(input.shape, dtype=numpy.float64), + where=zoom_div != 0) + zoom = numpy.ascontiguousarray(zoom) + _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad, + grid_mode) + return output + + +@docfiller +def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """ + Rotate an array. + + The array is rotated in the plane defined by the two axes given by the + `axes` parameter using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + angle : float + The rotation angle in degrees. + axes : tuple of 2 ints, optional + The two axes that define the plane of rotation. Default is the first + two axes. + reshape : bool, optional + If `reshape` is true, the output shape is adapted so that the input + array is contained completely in the output. Default is True. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + + Returns + ------- + rotate : ndarray + The rotated input. + + Notes + ----- + For complex-valued `input`, this function rotates the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure(figsize=(10, 3)) + >>> ax1, ax2, ax3 = fig.subplots(1, 3) + >>> img = datasets.ascent() + >>> img_45 = ndimage.rotate(img, 45, reshape=False) + >>> full_img_45 = ndimage.rotate(img, 45, reshape=True) + >>> ax1.imshow(img, cmap='gray') + >>> ax1.set_axis_off() + >>> ax2.imshow(img_45, cmap='gray') + >>> ax2.set_axis_off() + >>> ax3.imshow(full_img_45, cmap='gray') + >>> ax3.set_axis_off() + >>> fig.set_layout_engine('tight') + >>> plt.show() + >>> print(img.shape) + (512, 512) + >>> print(img_45.shape) + (512, 512) + >>> print(full_img_45.shape) + (724, 724) + + """ + input_arr = numpy.asarray(input) + ndim = input_arr.ndim + + if ndim < 2: + raise ValueError('input array should be at least 2D') + + axes = list(axes) + + if len(axes) != 2: + raise ValueError('axes should contain exactly two values') + + if not all([float(ax).is_integer() for ax in axes]): + raise ValueError('axes should contain only integer values') + + if axes[0] < 0: + axes[0] += ndim + if axes[1] < 0: + axes[1] += ndim + if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim: + raise ValueError('invalid rotation plane specified') + + axes.sort() + + c, s = special.cosdg(angle), special.sindg(angle) + + rot_matrix = numpy.array([[c, s], + [-s, c]]) + + img_shape = numpy.asarray(input_arr.shape) + in_plane_shape = img_shape[axes] + if reshape: + # Compute transformed input bounds + iy, ix = in_plane_shape + out_bounds = rot_matrix @ [[0, 0, iy, iy], + [0, ix, 0, ix]] + # Compute the shape of the transformed input plane + out_plane_shape = (numpy.ptp(out_bounds, axis=1) + 0.5).astype(int) + else: + out_plane_shape = img_shape[axes] + + out_center = rot_matrix @ ((out_plane_shape - 1) / 2) + in_center = (in_plane_shape - 1) / 2 + offset = in_center - out_center + + output_shape = img_shape + output_shape[axes] = out_plane_shape + output_shape = tuple(output_shape) + + complex_output = numpy.iscomplexobj(input_arr) + output = _ni_support._get_output(output, input_arr, shape=output_shape, + complex_output=complex_output) + + if ndim <= 2: + affine_transform(input_arr, rot_matrix, offset, output_shape, output, + order, mode, cval, prefilter) + else: + # If ndim > 2, the rotation is applied over all the planes + # parallel to axes + planes_coord = itertools.product( + *[[slice(None)] if ax in axes else range(img_shape[ax]) + for ax in range(ndim)]) + + out_plane_shape = tuple(out_plane_shape) + + for coordinates in planes_coord: + ia = input_arr[coordinates] + oa = output[coordinates] + affine_transform(ia, rot_matrix, offset, out_plane_shape, + oa, order, mode, cval, prefilter) + + return output diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_measurements.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..bb3c8ef19ebc9f56e3bc0b2cc47e9664e64dd60c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_measurements.py @@ -0,0 +1,1681 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy +import numpy as np +from . import _ni_support +from . import _ni_label +from . import _nd_image +from . import _morphology + +__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', + 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', + 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass', + 'histogram', 'watershed_ift', 'sum_labels', 'value_indices'] + + +def label(input, structure=None, output=None): + """ + Label features in an array. + + Parameters + ---------- + input : array_like + An array-like object to be labeled. Any non-zero values in `input` are + counted as features and zero values are considered the background. + structure : array_like, optional + A structuring element that defines feature connections. + `structure` must be centrosymmetric + (see Notes). + If no structuring element is provided, + one is automatically generated with a squared connectivity equal to + one. That is, for a 2-D `input` array, the default structuring element + is:: + + [[0,1,0], + [1,1,1], + [0,1,0]] + + output : (None, data-type, array_like), optional + If `output` is a data type, it specifies the type of the resulting + labeled feature array. + If `output` is an array-like object, then `output` will be updated + with the labeled features from this function. This function can + operate in-place, by passing output=input. + Note that the output must be able to store the largest label, or this + function will raise an Exception. + + Returns + ------- + label : ndarray or int + An integer ndarray where each unique feature in `input` has a unique + label in the returned array. + num_features : int + How many objects were found. + + If `output` is None, this function returns a tuple of + (`labeled_array`, `num_features`). + + If `output` is a ndarray, then it will be updated with values in + `labeled_array` and only `num_features` will be returned by this + function. + + See Also + -------- + find_objects : generate a list of slices for the labeled features (or + objects); useful for finding features' position or + dimensions + + Notes + ----- + A centrosymmetric matrix is a matrix that is symmetric about the center. + See [1]_ for more information. + + The `structure` matrix must be centrosymmetric to ensure + two-way connections. + For instance, if the `structure` matrix is not centrosymmetric + and is defined as:: + + [[0,1,0], + [1,1,0], + [0,0,0]] + + and the `input` is:: + + [[1,2], + [0,3]] + + then the structure matrix would indicate the + entry 2 in the input is connected to 1, + but 1 is not connected to 2. + + References + ---------- + .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric) + matrices, their basic properties, eigenvalues, and + eigenvectors." The American Mathematical Monthly 92.10 + (1985): 711-717. + + Examples + -------- + Create an image with some features, then label it using the default + (cross-shaped) structuring element: + + >>> from scipy.ndimage import label, generate_binary_structure + >>> import numpy as np + >>> a = np.array([[0,0,1,1,0,0], + ... [0,0,0,1,0,0], + ... [1,1,0,0,1,0], + ... [0,0,0,1,0,0]]) + >>> labeled_array, num_features = label(a) + + Each of the 4 features are labeled with a different integer: + + >>> num_features + 4 + >>> labeled_array + array([[0, 0, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [2, 2, 0, 0, 3, 0], + [0, 0, 0, 4, 0, 0]]) + + Generate a structuring element that will consider features connected even + if they touch diagonally: + + >>> s = generate_binary_structure(2,2) + + or, + + >>> s = [[1,1,1], + ... [1,1,1], + ... [1,1,1]] + + Label the image using the new structuring element: + + >>> labeled_array, num_features = label(a, structure=s) + + Show the 2 labeled features (note that features 1, 3, and 4 from above are + now considered a single feature): + + >>> num_features + 2 + >>> labeled_array + array([[0, 0, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [2, 2, 0, 0, 1, 0], + [0, 0, 0, 1, 0, 0]]) + + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if structure is None: + structure = _morphology.generate_binary_structure(input.ndim, 1) + structure = numpy.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for ii in structure.shape: + if ii != 3: + raise ValueError('structure dimensions must be equal to 3') + + # Use 32 bits if it's large enough for this image. + # _ni_label.label() needs two entries for background and + # foreground tracking + need_64bits = input.size >= (2**31 - 2) + + if isinstance(output, numpy.ndarray): + if output.shape != input.shape: + raise ValueError("output shape not correct") + caller_provided_output = True + else: + caller_provided_output = False + if output is None: + output = np.empty(input.shape, np.intp if need_64bits else np.int32) + else: + output = np.empty(input.shape, output) + + # handle scalars, 0-D arrays + if input.ndim == 0 or input.size == 0: + if input.ndim == 0: + # scalar + maxlabel = 1 if (input != 0) else 0 + output[...] = maxlabel + else: + # 0-D + maxlabel = 0 + if caller_provided_output: + return maxlabel + else: + return output, maxlabel + + try: + max_label = _ni_label._label(input, structure, output) + except _ni_label.NeedMoreBits as e: + # Make another attempt with enough bits, then try to cast to the + # new type. + tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32) + max_label = _ni_label._label(input, structure, tmp_output) + output[...] = tmp_output[...] + if not np.all(output == tmp_output): + # refuse to return bad results + raise RuntimeError( + "insufficient bit-depth in requested output type" + ) from e + + if caller_provided_output: + # result was written in-place + return max_label + else: + return output, max_label + + +def find_objects(input, max_label=0): + """ + Find objects in a labeled array. + + Parameters + ---------- + input : ndarray of ints + Array containing objects defined by different labels. Labels with + value 0 are ignored. + max_label : int, optional + Maximum label to be searched for in `input`. If max_label is not + given, the positions of all objects are returned. + + Returns + ------- + object_slices : list of tuples + A list of tuples, with each tuple containing N slices (with N the + dimension of the input array). Slices correspond to the minimal + parallelepiped that contains the object. If a number is missing, + None is returned instead of a slice. The label ``l`` corresponds to + the index ``l-1`` in the returned list. + + See Also + -------- + label, center_of_mass + + Notes + ----- + This function is very useful for isolating a volume of interest inside + a 3-D array, that cannot be "seen through". + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:4, 2:4] = 1 + >>> a[4, 4] = 1 + >>> a[:2, :3] = 2 + >>> a[0, 5] = 3 + >>> a + array([[2, 2, 2, 0, 0, 3], + [2, 2, 2, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.find_objects(a) + [(slice(2, 5, None), slice(2, 5, None)), + (slice(0, 2, None), slice(0, 3, None)), + (slice(0, 1, None), slice(5, 6, None))] + >>> ndimage.find_objects(a, max_label=2) + [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))] + >>> ndimage.find_objects(a == 1, max_label=2) + [(slice(2, 5, None), slice(2, 5, None)), None] + + >>> loc = ndimage.find_objects(a)[0] + >>> a[loc] + array([[1, 1, 0], + [1, 1, 0], + [0, 0, 1]]) + + """ + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + + if max_label < 1: + max_label = input.max() + + return _nd_image.find_objects(input, max_label) + + +def value_indices(arr, *, ignore_value=None): + """ + Find indices of each distinct value in given array. + + Parameters + ---------- + arr : ndarray of ints + Array containing integer values. + ignore_value : int, optional + This value will be ignored in searching the `arr` array. If not + given, all values found will be included in output. Default + is None. + + Returns + ------- + indices : dictionary + A Python dictionary of array indices for each distinct value. The + dictionary is keyed by the distinct values, the entries are array + index tuples covering all occurrences of the value within the + array. + + This dictionary can occupy significant memory, usually several times + the size of the input array. + + See Also + -------- + label, maximum, median, minimum_position, extrema, sum, mean, variance, + standard_deviation, numpy.where, numpy.unique + + Notes + ----- + For a small array with few distinct values, one might use + `numpy.unique()` to find all possible values, and ``(arr == val)`` to + locate each value within that array. However, for large arrays, + with many distinct values, this can become extremely inefficient, + as locating each value would require a new search through the entire + array. Using this function, there is essentially one search, with + the indices saved for all distinct values. + + This is useful when matching a categorical image (e.g. a segmentation + or classification) to an associated image of other data, allowing + any per-class statistic(s) to then be calculated. Provides a + more flexible alternative to functions like ``scipy.ndimage.mean()`` + and ``scipy.ndimage.variance()``. + + Some other closely related functionality, with different strengths and + weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and + the `scikit-image `_ function + ``skimage.measure.regionprops()``. + + Note for IDL users: this provides functionality equivalent to IDL's + REVERSE_INDICES option (as per the IDL documentation for the + `HISTOGRAM `_ + function). + + .. versionadded:: 1.10.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import ndimage + >>> a = np.zeros((6, 6), dtype=int) + >>> a[2:4, 2:4] = 1 + >>> a[4, 4] = 1 + >>> a[:2, :3] = 2 + >>> a[0, 5] = 3 + >>> a + array([[2, 2, 2, 0, 0, 3], + [2, 2, 2, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0]]) + >>> val_indices = ndimage.value_indices(a) + + The dictionary `val_indices` will have an entry for each distinct + value in the input array. + + >>> val_indices.keys() + dict_keys([0, 1, 2, 3]) + + The entry for each value is an index tuple, locating the elements + with that value. + + >>> ndx1 = val_indices[1] + >>> ndx1 + (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4])) + + This can be used to index into the original array, or any other + array with the same shape. + + >>> a[ndx1] + array([1, 1, 1, 1, 1]) + + If the zeros were to be ignored, then the resulting dictionary + would no longer have an entry for zero. + + >>> val_indices = ndimage.value_indices(a, ignore_value=0) + >>> val_indices.keys() + dict_keys([1, 2, 3]) + + """ + # Cope with ignore_value being None, without too much extra complexity + # in the C code. If not None, the value is passed in as a numpy array + # with the same dtype as arr. + ignore_value_arr = numpy.zeros((1,), dtype=arr.dtype) + ignoreIsNone = (ignore_value is None) + if not ignoreIsNone: + ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value) + + val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr) + return val_indices + + +def labeled_comprehension(input, labels, index, func, out_dtype, default, + pass_positions=False): + """ + Roughly equivalent to [func(input[labels == i]) for i in index]. + + Sequentially applies an arbitrary function (that works on array_like input) + to subsets of an N-D image array specified by `labels` and `index`. + The option exists to provide the function with positional parameters as the + second argument. + + Parameters + ---------- + input : array_like + Data from which to select `labels` to process. + labels : array_like or None + Labels to objects in `input`. + If not None, array must be same shape as `input`. + If None, `func` is applied to raveled `input`. + index : int, sequence of ints or None + Subset of `labels` to which to apply `func`. + If a scalar, a single value is returned. + If None, `func` is applied to all non-zero values of `labels`. + func : callable + Python function to apply to `labels` from `input`. + out_dtype : dtype + Dtype to use for `result`. + default : int, float or None + Default return value when a element of `index` does not exist + in `labels`. + pass_positions : bool, optional + If True, pass linear indices to `func` as a second argument. + Default is False. + + Returns + ------- + result : ndarray + Result of applying `func` to each of `labels` to `input` in `index`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> lbl, nlbl = ndimage.label(a) + >>> lbls = np.arange(1, nlbl+1) + >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0) + array([ 2.75, 5.5 , 6. ]) + + Falling back to `default`: + + >>> lbls = np.arange(1, nlbl+2) + >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1) + array([ 2.75, 5.5 , 6. , -1. ]) + + Passing positions: + + >>> def fn(val, pos): + ... print("fn says: %s : %s" % (val, pos)) + ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum()) + ... + >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True) + fn says: [1 2 5 3] : [0 1 4 5] + fn says: [4 7] : [ 7 11] + fn says: [9 3] : [12 13] + array([ 11., 11., -12., 0.]) + + """ + + as_scalar = numpy.isscalar(index) + input = numpy.asarray(input) + + if pass_positions: + positions = numpy.arange(input.size).reshape(input.shape) + + if labels is None: + if index is not None: + raise ValueError("index without defined labels") + if not pass_positions: + return func(input.ravel()) + else: + return func(input.ravel(), positions.ravel()) + + try: + input, labels = numpy.broadcast_arrays(input, labels) + except ValueError as e: + raise ValueError("input and labels must have the same shape " + "(excepting dimensions with width 1)") from e + + if index is None: + if not pass_positions: + return func(input[labels > 0]) + else: + return func(input[labels > 0], positions[labels > 0]) + + index = numpy.atleast_1d(index) + if np.any(index.astype(labels.dtype).astype(index.dtype) != index): + raise ValueError(f"Cannot convert index values from <{index.dtype}> to " + f"<{labels.dtype}> (labels' type) without loss of precision") + + index = index.astype(labels.dtype) + + # optimization: find min/max in index, + # and select those parts of labels, input, and positions + lo = index.min() + hi = index.max() + mask = (labels >= lo) & (labels <= hi) + + # this also ravels the arrays + labels = labels[mask] + input = input[mask] + if pass_positions: + positions = positions[mask] + + # sort everything by labels + label_order = labels.argsort() + labels = labels[label_order] + input = input[label_order] + if pass_positions: + positions = positions[label_order] + + index_order = index.argsort() + sorted_index = index[index_order] + + def do_map(inputs, output): + """labels must be sorted""" + nidx = sorted_index.size + + # Find boundaries for each stretch of constant labels + # This could be faster, but we already paid N log N to sort labels. + lo = numpy.searchsorted(labels, sorted_index, side='left') + hi = numpy.searchsorted(labels, sorted_index, side='right') + + for i, l, h in zip(range(nidx), lo, hi): + if l == h: + continue + output[i] = func(*[inp[l:h] for inp in inputs]) + + temp = numpy.empty(index.shape, out_dtype) + temp[:] = default + if not pass_positions: + do_map([input], temp) + else: + do_map([input, positions], temp) + + output = numpy.zeros(index.shape, out_dtype) + output[index_order] = temp + if as_scalar: + output = output[0] + + return output + + +def _safely_castable_to_int(dt): + """Test whether the NumPy data type `dt` can be safely cast to an int.""" + int_size = np.dtype(int).itemsize + safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or + (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)) + return safe + + +def _stats(input, labels=None, index=None, centered=False): + """Count, sum, and optionally compute (sum - centre)^2 of input by label + + Parameters + ---------- + input : array_like, N-D + The input data to be analyzed. + labels : array_like (N-D), optional + The labels of the data in `input`. This array must be broadcast + compatible with `input`; typically, it is the same shape as `input`. + If `labels` is None, all nonzero values in `input` are treated as + the single labeled group. + index : label or sequence of labels, optional + These are the labels of the groups for which the stats are computed. + If `index` is None, the stats are computed for the single group where + `labels` is greater than 0. + centered : bool, optional + If True, the centered sum of squares for each labeled group is + also returned. Default is False. + + Returns + ------- + counts : int or ndarray of ints + The number of elements in each labeled group. + sums : scalar or ndarray of scalars + The sums of the values in each labeled group. + sums_c : scalar or ndarray of scalars, optional + The sums of mean-centered squares of the values in each labeled group. + This is only returned if `centered` is True. + + """ + def single_group(vals): + if centered: + vals_c = vals - vals.mean() + return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum() + else: + return vals.size, vals.sum() + + if labels is None: + return single_group(input) + + # ensure input and labels match sizes + input, labels = numpy.broadcast_arrays(input, labels) + + if index is None: + return single_group(input[labels > 0]) + + if numpy.isscalar(index): + return single_group(input[labels == index]) + + def _sum_centered(labels): + # `labels` is expected to be an ndarray with the same shape as `input`. + # It must contain the label indices (which are not necessarily the labels + # themselves). + means = sums / counts + centered_input = input - means[labels] + # bincount expects 1-D inputs, so we ravel the arguments. + bc = numpy.bincount(labels.ravel(), + weights=(centered_input * + centered_input.conjugate()).ravel()) + return bc + + # Remap labels to unique integers if necessary, or if the largest + # label is larger than the number of values. + + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + # Use numpy.unique to generate the label indices. `new_labels` will + # be 1-D, but it should be interpreted as the flattened N-D array of + # label indices. + unique_labels, new_labels = numpy.unique(labels, return_inverse=True) + new_labels = np.reshape(new_labels, (-1,)) # flatten, since it may be >1-D + counts = numpy.bincount(new_labels) + sums = numpy.bincount(new_labels, weights=input.ravel()) + if centered: + # Compute the sum of the mean-centered squares. + # We must reshape new_labels to the N-D shape of `input` before + # passing it _sum_centered. + sums_c = _sum_centered(new_labels.reshape(labels.shape)) + idxs = numpy.searchsorted(unique_labels, index) + # make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = (unique_labels[idxs] == index) + else: + # labels are an integer type allowed by bincount, and there aren't too + # many, so call bincount directly. + counts = numpy.bincount(labels.ravel()) + sums = numpy.bincount(labels.ravel(), weights=input.ravel()) + if centered: + sums_c = _sum_centered(labels) + # make sure all index values are valid + idxs = numpy.asanyarray(index, numpy.int_).copy() + found = (idxs >= 0) & (idxs < counts.size) + idxs[~found] = 0 + + counts = counts[idxs] + counts[~found] = 0 + sums = sums[idxs] + sums[~found] = 0 + + if not centered: + return (counts, sums) + else: + sums_c = sums_c[idxs] + sums_c[~found] = 0 + return (counts, sums, sums_c) + + +def sum(input, labels=None, index=None): + """ + Calculate the sum of the values of the array. + + Notes + ----- + This is an alias for `ndimage.sum_labels` kept for backwards compatibility + reasons, for new code please prefer `sum_labels`. See the `sum_labels` + docstring for more details. + + """ + return sum_labels(input, labels, index) + + +def sum_labels(input, labels=None, index=None): + """ + Calculate the sum of the values of the array. + + Parameters + ---------- + input : array_like + Values of `input` inside the regions defined by `labels` + are summed together. + labels : array_like of ints, optional + Assign labels to the values of the array. Has to have the same shape as + `input`. + index : array_like, optional + A single label number or a sequence of label numbers of + the objects to be measured. + + Returns + ------- + sum : ndarray or scalar + An array of the sums of values of `input` inside the regions defined + by `labels` with the same shape as `index`. If 'index' is None or scalar, + a scalar is returned. + + See Also + -------- + mean, median + + Examples + -------- + >>> from scipy import ndimage + >>> input = [0,1,2,3] + >>> labels = [1,1,2,2] + >>> ndimage.sum_labels(input, labels, index=[1,2]) + [1.0, 5.0] + >>> ndimage.sum_labels(input, labels, index=1) + 1 + >>> ndimage.sum_labels(input, labels) + 6 + + + """ + count, sum = _stats(input, labels, index) + return sum + + +def mean(input, labels=None, index=None): + """ + Calculate the mean of the values of an array at labels. + + Parameters + ---------- + input : array_like + Array on which to compute the mean of elements over distinct + regions. + labels : array_like, optional + Array of labels of same shape, or broadcastable to the same shape as + `input`. All elements sharing the same label form one region over + which the mean of the elements is computed. + index : int or sequence of ints, optional + Labels of the objects over which the mean is to be computed. + Default is None, in which case the mean for all values where label is + greater than 0 is calculated. + + Returns + ------- + out : list + Sequence of same length as `index`, with the mean of the different + regions labeled by the labels in `index`. + + See Also + -------- + variance, standard_deviation, minimum, maximum, sum, label + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(25).reshape((5,5)) + >>> labels = np.zeros_like(a) + >>> labels[3:5,3:5] = 1 + >>> index = np.unique(labels) + >>> labels + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 1], + [0, 0, 0, 1, 1]]) + >>> index + array([0, 1]) + >>> ndimage.mean(a, labels=labels, index=index) + [10.285714285714286, 21.0] + + """ + + count, sum = _stats(input, labels, index) + return sum / numpy.asanyarray(count).astype(numpy.float64) + + +def variance(input, labels=None, index=None): + """ + Calculate the variance of the values of an N-D image array, optionally at + specified sub-regions. + + Parameters + ---------- + input : array_like + Nd-image data to process. + labels : array_like, optional + Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + `labels` to include in output. If None (default), all values where + `labels` is non-zero are used. + + Returns + ------- + variance : float or ndarray + Values of variance, for each sub-region if `labels` and `index` are + specified. + + See Also + -------- + label, standard_deviation, maximum, minimum, extrema + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.variance(a) + 7.609375 + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1)) + array([ 2.1875, 2.25 , 9. ]) + + If no index is given, all non-zero `labels` are processed: + + >>> ndimage.variance(a, lbl) + 6.1875 + + """ + count, sum, sum_c_sq = _stats(input, labels, index, centered=True) + return sum_c_sq / np.asanyarray(count).astype(float) + + +def standard_deviation(input, labels=None, index=None): + """ + Calculate the standard deviation of the values of an N-D image array, + optionally at specified sub-regions. + + Parameters + ---------- + input : array_like + N-D image data to process. + labels : array_like, optional + Labels to identify sub-regions in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + `labels` to include in output. If None (default), all values where + `labels` is non-zero are used. + + Returns + ------- + standard_deviation : float or ndarray + Values of standard deviation, for each sub-region if `labels` and + `index` are specified. + + See Also + -------- + label, variance, maximum, minimum, extrema + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.standard_deviation(a) + 2.7585095613392387 + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1)) + array([ 1.479, 1.5 , 3. ]) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.standard_deviation(a, lbl) + 2.4874685927665499 + + """ + return numpy.sqrt(variance(input, labels, index)) + + +def _select(input, labels=None, index=None, find_min=False, find_max=False, + find_min_positions=False, find_max_positions=False, + find_median=False): + """Returns min, max, or both, plus their positions (if requested), and + median.""" + + input = numpy.asanyarray(input) + + find_positions = find_min_positions or find_max_positions + positions = None + if find_positions: + positions = numpy.arange(input.size).reshape(input.shape) + + def single_group(vals, positions): + result = [] + if find_min: + result += [vals.min()] + if find_min_positions: + result += [positions[vals == vals.min()][0]] + if find_max: + result += [vals.max()] + if find_max_positions: + result += [positions[vals == vals.max()][0]] + if find_median: + result += [numpy.median(vals)] + return result + + if labels is None: + return single_group(input, positions) + + # ensure input and labels match sizes + input, labels = numpy.broadcast_arrays(input, labels) + + if index is None: + mask = (labels > 0) + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + if numpy.isscalar(index): + mask = (labels == index) + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + # remap labels to unique integers if necessary, or if the largest + # label is larger than the number of values. + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + # remap labels, and indexes + unique_labels, labels = numpy.unique(labels, return_inverse=True) + idxs = numpy.searchsorted(unique_labels, index) + + # make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = (unique_labels[idxs] == index) + else: + # labels are an integer type, and there aren't too many + idxs = numpy.asanyarray(index, numpy.int_).copy() + found = (idxs >= 0) & (idxs <= labels.max()) + + idxs[~ found] = labels.max() + 1 + + if find_median: + order = numpy.lexsort((input.ravel(), labels.ravel())) + else: + order = input.ravel().argsort() + input = input.ravel()[order] + labels = labels.ravel()[order] + if find_positions: + positions = positions.ravel()[order] + + result = [] + if find_min: + mins = numpy.zeros(labels.max() + 2, input.dtype) + mins[labels[::-1]] = input[::-1] + result += [mins[idxs]] + if find_min_positions: + minpos = numpy.zeros(labels.max() + 2, int) + minpos[labels[::-1]] = positions[::-1] + result += [minpos[idxs]] + if find_max: + maxs = numpy.zeros(labels.max() + 2, input.dtype) + maxs[labels] = input + result += [maxs[idxs]] + if find_max_positions: + maxpos = numpy.zeros(labels.max() + 2, int) + maxpos[labels] = positions + result += [maxpos[idxs]] + if find_median: + locs = numpy.arange(len(labels)) + lo = numpy.zeros(labels.max() + 2, numpy.int_) + lo[labels[::-1]] = locs[::-1] + hi = numpy.zeros(labels.max() + 2, numpy.int_) + hi[labels] = locs + lo = lo[idxs] + hi = hi[idxs] + # lo is an index to the lowest value in input for each label, + # hi is an index to the largest value. + # move them to be either the same ((hi - lo) % 2 == 0) or next + # to each other ((hi - lo) % 2 == 1), then average. + step = (hi - lo) // 2 + lo += step + hi -= step + if (np.issubdtype(input.dtype, np.integer) + or np.issubdtype(input.dtype, np.bool_)): + # avoid integer overflow or boolean addition (gh-12836) + result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0] + else: + result += [(input[lo] + input[hi]) / 2.0] + + return result + + +def minimum(input, labels=None, index=None): + """ + Calculate the minimum of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + minimal values of `input` over the region is computed. + labels : array_like, optional + An array_like of integers marking different regions over which the + minimum value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the minimum + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + minima. If index is None, the minimum over all elements where `labels` + is non-zero is returned. + + Returns + ------- + minimum : float or list of floats + List of minima of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the minimal value of `input` if `labels` is None, + and the minimal value of elements where `labels` is greater than zero + if `index` is None. + + See Also + -------- + label, maximum, median, minimum_position, extrema, sum, mean, variance, + standard_deviation + + Notes + ----- + The function returns a Python list and not a NumPy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(a) + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1)) + [1.0, 4.0, 3.0] + >>> ndimage.minimum(a) + 0.0 + >>> ndimage.minimum(a, labels=labels) + 1.0 + + """ + return _select(input, labels, index, find_min=True)[0] + + +def maximum(input, labels=None, index=None): + """ + Calculate the maximum of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + maximal values of `input` over the region is computed. + labels : array_like, optional + An array of integers marking different regions over which the + maximum value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the maximum + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + maxima. If index is None, the maximum over all elements where `labels` + is non-zero is returned. + + Returns + ------- + output : float or list of floats + List of maxima of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the maximal value of `input` if `labels` is None, + and the maximal value of elements where `labels` is greater than zero + if `index` is None. + + See Also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Notes + ----- + The function returns a Python list and not a NumPy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(16).reshape((4,4)) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> labels = np.zeros_like(a) + >>> labels[:2,:2] = 1 + >>> labels[2:, 1:3] = 2 + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 0], + [0, 2, 2, 0], + [0, 2, 2, 0]]) + >>> from scipy import ndimage + >>> ndimage.maximum(a) + 15.0 + >>> ndimage.maximum(a, labels=labels, index=[1,2]) + [5.0, 14.0] + >>> ndimage.maximum(a, labels=labels) + 14.0 + + >>> b = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(b) + >>> labels + array([[1, 1, 0, 0], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1)) + [5.0, 7.0, 9.0] + + """ + return _select(input, labels, index, find_max=True)[0] + + +def median(input, labels=None, index=None): + """ + Calculate the median of the values of an array over labeled regions. + + Parameters + ---------- + input : array_like + Array_like of values. For each region specified by `labels`, the + median value of `input` over the region is computed. + labels : array_like, optional + An array_like of integers marking different regions over which the + median value of `input` is to be computed. `labels` must have the + same shape as `input`. If `labels` is not specified, the median + over the whole array is returned. + index : array_like, optional + A list of region labels that are taken into account for computing the + medians. If index is None, the median over all elements where `labels` + is non-zero is returned. + + Returns + ------- + median : float or list of floats + List of medians of `input` over the regions determined by `labels` and + whose index is in `index`. If `index` or `labels` are not specified, a + float is returned: the median value of `input` if `labels` is None, + and the median value of elements where `labels` is greater than zero + if `index` is None. + + See Also + -------- + label, minimum, maximum, extrema, sum, mean, variance, standard_deviation + + Notes + ----- + The function returns a Python list and not a NumPy array, use + `np.array` to convert the list to an array. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 1], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> labels, labels_nb = ndimage.label(a) + >>> labels + array([[1, 1, 0, 2], + [1, 1, 0, 2], + [0, 0, 0, 2], + [3, 3, 0, 0]]) + >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1)) + [2.5, 4.0, 6.0] + >>> ndimage.median(a) + 1.0 + >>> ndimage.median(a, labels=labels) + 3.0 + + """ + return _select(input, labels, index, find_median=True)[0] + + +def minimum_position(input, labels=None, index=None): + """ + Find the positions of the minimums of the values of an array at labels. + + Parameters + ---------- + input : array_like + Array_like of values. + labels : array_like, optional + An array of integers marking different regions over which the + position of the minimum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the location of the first minimum over the whole + array is returned. + + The `labels` argument only works when `index` is specified. + index : array_like, optional + A list of region labels that are taken into account for finding the + location of the minima. If `index` is None, the ``first`` minimum + over all elements where `labels` is non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns + ------- + output : list of tuples of ints + Tuple of ints or list of tuples of ints that specify the location + of minima of `input` over the regions determined by `labels` and + whose index is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is + returned specifying the location of the first minimal value of `input`. + + See Also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 20, 30], + ... [40, 80, 100], + ... [1, 100, 200]]) + >>> b = np.array([[1, 2, 0, 1], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + + >>> from scipy import ndimage + + >>> ndimage.minimum_position(a) + (2, 0) + >>> ndimage.minimum_position(b) + (0, 2) + + Features to process can be specified using `labels` and `index`: + + >>> label, pos = ndimage.label(a) + >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1)) + [(2, 0)] + + >>> label, pos = ndimage.label(b) + >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1)) + [(0, 0), (0, 3), (3, 1)] + + """ + dims = numpy.array(numpy.asarray(input).shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_min_positions=True)[0] + + if numpy.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def maximum_position(input, labels=None, index=None): + """ + Find the positions of the maximums of the values of an array at labels. + + For each region specified by `labels`, the position of the maximum + value of `input` within the region is returned. + + Parameters + ---------- + input : array_like + Array_like of values. + labels : array_like, optional + An array of integers marking different regions over which the + position of the maximum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the location of the first maximum over the whole + array is returned. + + The `labels` argument only works when `index` is specified. + index : array_like, optional + A list of region labels that are taken into account for finding the + location of the maxima. If `index` is None, the first maximum + over all elements where `labels` is non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns + ------- + output : list of tuples of ints + List of tuples of ints that specify the location of maxima of + `input` over the regions determined by `labels` and whose index + is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is + returned specifying the location of the ``first`` maximal value + of `input`. + + See Also + -------- + label, minimum, median, maximum_position, extrema, sum, mean, variance, + standard_deviation + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> ndimage.maximum_position(a) + (3, 0) + + Features to process can be specified using `labels` and `index`: + + >>> lbl = np.array([[0, 1, 2, 3], + ... [0, 1, 2, 3], + ... [0, 1, 2, 3], + ... [0, 1, 2, 3]]) + >>> ndimage.maximum_position(a, lbl, 1) + (1, 1) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.maximum_position(a, lbl) + (2, 3) + + If there are no maxima, the position of the first element is returned: + + >>> ndimage.maximum_position(a, lbl, 2) + (0, 2) + + """ + dims = numpy.array(numpy.asarray(input).shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_max_positions=True)[0] + + if numpy.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def extrema(input, labels=None, index=None): + """ + Calculate the minimums and maximums of the values of an array + at labels, along with their positions. + + Parameters + ---------- + input : ndarray + N-D image data to process. + labels : ndarray, optional + Labels of features in input. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + Labels to include in output. If None (default), all values where + non-zero `labels` are used. + + Returns + ------- + minimums, maximums : int or ndarray + Values of minimums and maximums in each feature. + min_positions, max_positions : tuple or list of tuples + Each tuple gives the N-D coordinates of the corresponding minimum + or maximum. + + See Also + -------- + maximum, minimum, maximum_position, minimum_position, center_of_mass + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> from scipy import ndimage + >>> ndimage.extrema(a) + (0, 9, (0, 2), (3, 0)) + + Features to process can be specified using `labels` and `index`: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1)) + (array([1, 4, 3]), + array([5, 7, 9]), + [(0, 0), (1, 3), (3, 1)], + [(1, 0), (2, 3), (3, 0)]) + + If no index is given, non-zero `labels` are processed: + + >>> ndimage.extrema(a, lbl) + (1, 9, (0, 0), (3, 0)) + + """ + dims = numpy.array(numpy.asarray(input).shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + minimums, min_positions, maximums, max_positions = _select(input, labels, + index, + find_min=True, + find_max=True, + find_min_positions=True, + find_max_positions=True) + + if numpy.isscalar(minimums): + return (minimums, maximums, tuple((min_positions // dim_prod) % dims), + tuple((max_positions // dim_prod) % dims)) + + min_positions = [ + tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims + ] + max_positions = [ + tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims + ] + + return minimums, maximums, min_positions, max_positions + + +def center_of_mass(input, labels=None, index=None): + """ + Calculate the center of mass of the values of an array at labels. + + Parameters + ---------- + input : ndarray + Data from which to calculate center-of-mass. The masses can either + be positive or negative. + labels : ndarray, optional + Labels for objects in `input`, as generated by `ndimage.label`. + Only used with `index`. Dimensions must be the same as `input`. + index : int or sequence of ints, optional + Labels for which to calculate centers-of-mass. If not specified, + the combined center of mass of all labels greater than zero + will be calculated. Only used with `labels`. + + Returns + ------- + center_of_mass : tuple, or list of tuples + Coordinates of centers-of-mass. + + Examples + -------- + >>> import numpy as np + >>> a = np.array(([0,0,0,0], + ... [0,1,1,0], + ... [0,1,1,0], + ... [0,1,1,0])) + >>> from scipy import ndimage + >>> ndimage.center_of_mass(a) + (2.0, 1.5) + + Calculation of multiple objects in an image + + >>> b = np.array(([0,1,1,0], + ... [0,1,0,0], + ... [0,0,0,0], + ... [0,0,1,1], + ... [0,0,1,1])) + >>> lbl = ndimage.label(b)[0] + >>> ndimage.center_of_mass(b, lbl, [1,2]) + [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)] + + Negative masses are also accepted, which can occur for example when + bias is removed from measured data due to random noise. + + >>> c = np.array(([-1,0,0,0], + ... [0,-1,-1,0], + ... [0,1,-1,0], + ... [0,1,1,0])) + >>> ndimage.center_of_mass(c) + (-4.0, 1.0) + + If there are division by zero issues, the function does not raise an + error but rather issues a RuntimeWarning before returning inf and/or NaN. + + >>> d = np.array([-1, 1]) + >>> ndimage.center_of_mass(d) + (inf,) + """ + normalizer = sum(input, labels, index) + grids = numpy.ogrid[[slice(0, i) for i in input.shape]] + + results = [sum(input * grids[dir].astype(float), labels, index) / normalizer + for dir in range(input.ndim)] + + if numpy.isscalar(results[0]): + return tuple(results) + + return [tuple(v) for v in numpy.array(results).T] + + +def histogram(input, min, max, bins, labels=None, index=None): + """ + Calculate the histogram of the values of an array, optionally at labels. + + Histogram calculates the frequency of values in an array within bins + determined by `min`, `max`, and `bins`. The `labels` and `index` + keywords can limit the scope of the histogram to specified sub-regions + within the array. + + Parameters + ---------- + input : array_like + Data for which to calculate histogram. + min, max : int + Minimum and maximum values of range of histogram bins. + bins : int + Number of bins. + labels : array_like, optional + Labels for objects in `input`. + If not None, must be same shape as `input`. + index : int or sequence of ints, optional + Label or labels for which to calculate histogram. If None, all values + where label is greater than zero are used + + Returns + ------- + hist : ndarray + Histogram counts. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ], + ... [ 0. , 0.7778, 0. , 0. ], + ... [ 0. , 0. , 0. , 0. ], + ... [ 0. , 0. , 0.7181, 0.2787], + ... [ 0. , 0. , 0.6573, 0.3094]]) + >>> from scipy import ndimage + >>> ndimage.histogram(a, 0, 1, 10) + array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0]) + + With labels and no indices, non-zero elements are counted: + + >>> lbl, nlbl = ndimage.label(a) + >>> ndimage.histogram(a, 0, 1, 10, lbl) + array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0]) + + Indices can be used to count only certain objects: + + >>> ndimage.histogram(a, 0, 1, 10, lbl, 2) + array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0]) + + """ + _bins = numpy.linspace(min, max, bins + 1) + + def _hist(vals): + return numpy.histogram(vals, _bins)[0] + + return labeled_comprehension(input, labels, index, _hist, object, None, + pass_positions=False) + + +def watershed_ift(input, markers, structure=None, output=None): + """ + Apply watershed from markers using image foresting transform algorithm. + + Parameters + ---------- + input : array_like + Input. + markers : array_like + Markers are points within each watershed that form the beginning + of the process. Negative markers are considered background markers + which are processed after the other markers. + structure : structure element, optional + A structuring element defining the connectivity of the object can be + provided. If None, an element is generated with a squared + connectivity equal to one. + output : ndarray, optional + An output array can optionally be provided. The same shape as input. + + Returns + ------- + watershed_ift : ndarray + Output. Same shape as `input`. + + References + ---------- + .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image + foresting transform: theory, algorithms, and applications", + Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004. + + """ + input = numpy.asarray(input) + if input.dtype.type not in [numpy.uint8, numpy.uint16]: + raise TypeError('only 8 and 16 unsigned inputs are supported') + + if structure is None: + structure = _morphology.generate_binary_structure(input.ndim, 1) + structure = numpy.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for ii in structure.shape: + if ii != 3: + raise RuntimeError('structure dimensions must be equal to 3') + + if not structure.flags.contiguous: + structure = structure.copy() + markers = numpy.asarray(markers) + if input.shape != markers.shape: + raise RuntimeError('input and markers must have equal shape') + + integral_types = [numpy.int8, + numpy.int16, + numpy.int32, + numpy.int64, + numpy.intc, + numpy.intp] + + if markers.dtype.type not in integral_types: + raise RuntimeError('marker should be of integer type') + + if isinstance(output, numpy.ndarray): + if output.dtype.type not in integral_types: + raise RuntimeError('output should be of integer type') + else: + output = markers.dtype + + output = _ni_support._get_output(output, input) + _nd_image.watershed_ift(input, markers, structure, output) + return output diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_morphology.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd7ec7fbfdead532c7a9d3cb073360907d85b6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_morphology.py @@ -0,0 +1,2520 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import operator + +import numpy +from . import _ni_support +from . import _nd_image +from . import _filters + +__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', + 'binary_dilation', 'binary_opening', 'binary_closing', + 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', + 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', + 'morphological_gradient', 'morphological_laplace', 'white_tophat', + 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', + 'distance_transform_edt'] + + +def _center_is_true(structure, origin): + structure = numpy.array(structure) + coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, + origin)]) + return bool(structure[coor]) + + +def iterate_structure(structure, iterations, origin=None): + """ + Iterate a structure by dilating it with itself. + + Parameters + ---------- + structure : array_like + Structuring element (an array of bools, for example), to be dilated with + itself. + iterations : int + number of dilations performed on the structure with itself + origin : optional + If origin is None, only the iterated structure is returned. If + not, a tuple of the iterated structure and the modified origin is + returned. + + Returns + ------- + iterate_structure : ndarray of bools + A new structuring element obtained by dilating `structure` + (`iterations` - 1) times with itself. + + See Also + -------- + generate_binary_structure + + Examples + -------- + >>> from scipy import ndimage + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct.astype(int) + array([[0, 1, 0], + [1, 1, 1], + [0, 1, 0]]) + >>> ndimage.iterate_structure(struct, 2).astype(int) + array([[0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0]]) + >>> ndimage.iterate_structure(struct, 3).astype(int) + array([[0, 0, 0, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 0, 0]]) + + """ + structure = numpy.asarray(structure) + if iterations < 2: + return structure.copy() + ni = iterations - 1 + shape = [ii + ni * (ii - 1) for ii in structure.shape] + pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] + slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None) + for ii in range(len(shape))) + out = numpy.zeros(shape, bool) + out[slc] = structure != 0 + out = binary_dilation(out, structure, iterations=ni) + if origin is None: + return out + else: + origin = _ni_support._normalize_sequence(origin, structure.ndim) + origin = [iterations * o for o in origin] + return out, origin + + +def generate_binary_structure(rank, connectivity): + """ + Generate a binary structure for binary morphological operations. + + Parameters + ---------- + rank : int + Number of dimensions of the array to which the structuring element + will be applied, as returned by `np.ndim`. + connectivity : int + `connectivity` determines which elements of the output array belong + to the structure, i.e., are considered as neighbors of the central + element. Elements up to a squared distance of `connectivity` from + the center are considered neighbors. `connectivity` may range from 1 + (no diagonal elements are neighbors) to `rank` (all elements are + neighbors). + + Returns + ------- + output : ndarray of bools + Structuring element which may be used for binary morphological + operations, with `rank` dimensions and all dimensions equal to 3. + + See Also + -------- + iterate_structure, binary_dilation, binary_erosion + + Notes + ----- + `generate_binary_structure` can only create structuring elements with + dimensions equal to 3, i.e., minimal dimensions. For larger structuring + elements, that are useful e.g., for eroding large objects, one may either + use `iterate_structure`, or create directly custom arrays with + numpy functions such as `numpy.ones`. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> struct = ndimage.generate_binary_structure(2, 1) + >>> struct + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> a = np.zeros((5,5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) + >>> b + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + >>> struct = ndimage.generate_binary_structure(2, 2) + >>> struct + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> struct = ndimage.generate_binary_structure(3, 1) + >>> struct # no diagonal elements + array([[[False, False, False], + [False, True, False], + [False, False, False]], + [[False, True, False], + [ True, True, True], + [False, True, False]], + [[False, False, False], + [False, True, False], + [False, False, False]]], dtype=bool) + + """ + if connectivity < 1: + connectivity = 1 + if rank < 1: + return numpy.array(True, dtype=bool) + output = numpy.fabs(numpy.indices([3] * rank) - 1) + output = numpy.add.reduce(output, 0) + return output <= connectivity + + +def _binary_erosion(input, structure, iterations, mask, output, + border_value, origin, invert, brute_force): + try: + iterations = operator.index(iterations) + except TypeError as e: + raise TypeError('iterations parameter should be an integer') from e + + input = numpy.asarray(input) + if numpy.iscomplexobj(input): + raise TypeError('Complex type not supported') + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + else: + structure = numpy.asarray(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have same dimensionality') + if not structure.flags.contiguous: + structure = structure.copy() + if numpy.prod(structure.shape, axis=0) < 1: + raise RuntimeError('structure must not be empty') + if mask is not None: + mask = numpy.asarray(mask) + if mask.shape != input.shape: + raise RuntimeError('mask and input must have equal sizes') + origin = _ni_support._normalize_sequence(origin, input.ndim) + cit = _center_is_true(structure, origin) + if isinstance(output, numpy.ndarray): + if numpy.iscomplexobj(output): + raise TypeError('Complex output type not supported') + else: + output = bool + output = _ni_support._get_output(output, input) + temp_needed = numpy.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if iterations == 1: + _nd_image.binary_erosion(input, structure, mask, output, + border_value, origin, invert, cit, 0) + elif cit and not brute_force: + changed, coordinate_list = _nd_image.binary_erosion( + input, structure, mask, output, + border_value, origin, invert, cit, 1) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + if mask is not None: + mask = numpy.asarray(mask, dtype=numpy.int8) + if not structure.flags.contiguous: + structure = structure.copy() + _nd_image.binary_erosion2(output, structure, mask, iterations - 1, + origin, invert, coordinate_list) + else: + tmp_in = numpy.empty_like(input, dtype=bool) + tmp_out = output + if iterations >= 1 and not iterations & 1: + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + input, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii = 1 + while ii < iterations or (iterations < 1 and changed): + tmp_in, tmp_out = tmp_out, tmp_in + changed = _nd_image.binary_erosion( + tmp_in, structure, mask, tmp_out, + border_value, origin, invert, cit, 0) + ii += 1 + if temp_needed: + temp[...] = output + output = temp + return output + + +def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, + border_value=0, origin=0, brute_force=False): + """ + Multidimensional binary erosion with a given structuring element. + + Binary erosion is a mathematical morphology operation used for image + processing. + + Parameters + ---------- + input : array_like + Binary image to be eroded. Non-zero (True) elements form + the subset to be eroded. + structure : array_like, optional + Structuring element used for the erosion. Non-zero elements are + considered True. If no structuring element is provided, an element + is generated with a square connectivity equal to one. + iterations : int, optional + The erosion is repeated `iterations` times (one, by default). + If iterations is less than 1, the erosion is repeated until the + result does not change anymore. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (eroded) in + the current iteration; if True all pixels are considered as candidates + for erosion, regardless of what happened in the previous iteration. + False by default. + + Returns + ------- + binary_erosion : ndarray of bools + Erosion of the input by the structuring element. + + See Also + -------- + grey_erosion, binary_dilation, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Erosion [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for shrinking the shapes in an image. The binary + erosion of an image by a structuring element is the locus of the points + where a superimposition of the structuring element centered on the point + is entirely contained in the set of non-zero elements of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_erosion(a).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> #Erosion removes objects smaller than the structure + >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 0, brute_force) + + +def binary_dilation(input, structure=None, iterations=1, mask=None, + output=None, border_value=0, origin=0, + brute_force=False): + """ + Multidimensional binary dilation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be dilated. Non-zero (True) elements form + the subset to be dilated. + structure : array_like, optional + Structuring element used for the dilation. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one. + iterations : int, optional + The dilation is repeated `iterations` times (one, by default). + If iterations is less than 1, the dilation is repeated until the + result does not change anymore. Only an integer of iterations is + accepted. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated (dilated) + in the current iteration; if True all pixels are considered as + candidates for dilation, regardless of what happened in the previous + iteration. False by default. + + Returns + ------- + binary_dilation : ndarray of bools + Dilation of the input by the structuring element. + + See Also + -------- + grey_dilation, binary_erosion, binary_closing, binary_opening, + generate_binary_structure + + Notes + ----- + Dilation [1]_ is a mathematical morphology operation [2]_ that uses a + structuring element for expanding the shapes in an image. The binary + dilation of an image by a structuring element is the locus of the points + covered by the structuring element, when its center lies within the + non-zero points of the image. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5, 5)) + >>> a[2, 2] = 1 + >>> a + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a) + array([[False, False, False, False, False], + [False, False, True, False, False], + [False, True, True, True, False], + [False, False, True, False, False], + [False, False, False, False, False]], dtype=bool) + >>> ndimage.binary_dilation(a).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> # 3x3 structuring element with connectivity 1, used by default + >>> struct1 = ndimage.generate_binary_structure(2, 1) + >>> struct1 + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # 3x3 structuring element with connectivity 2 + >>> struct2 = ndimage.generate_binary_structure(2, 2) + >>> struct2 + array([[ True, True, True], + [ True, True, True], + [ True, True, True]], dtype=bool) + >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) + array([[ 0., 0., 0., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 0., 0., 0.]]) + >>> ndimage.binary_dilation(a, structure=struct1,\\ + ... iterations=2).astype(a.dtype) + array([[ 0., 0., 1., 0., 0.], + [ 0., 1., 1., 1., 0.], + [ 1., 1., 1., 1., 1.], + [ 0., 1., 1., 1., 0.], + [ 0., 0., 1., 0., 0.]]) + + """ + input = numpy.asarray(input) + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + origin = _ni_support._normalize_sequence(origin, input.ndim) + structure = numpy.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + + return _binary_erosion(input, structure, iterations, mask, + output, border_value, origin, 1, brute_force) + + +def binary_opening(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False): + """ + Multidimensional binary opening with the given structuring element. + + The *opening* of an input image by a structuring element is the + *dilation* of the *erosion* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be opened. Non-zero (True) elements form + the subset to be opened. + structure : array_like, optional + Structuring element used for the opening. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : int, optional + The erosion step of the opening, then the dilation step are each + repeated `iterations` times (one, by default). If `iterations` is + less than 1, each operation is repeated until the result does + not change anymore. Only an integer of iterations is accepted. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true all pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + + Returns + ------- + binary_opening : ndarray of bools + Opening of the input by the structuring element. + + See Also + -------- + grey_opening, binary_closing, binary_erosion, binary_dilation, + generate_binary_structure + + Notes + ----- + *Opening* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of an erosion and a dilation of the + input with the same structuring element. Opening, therefore, removes + objects smaller than the structuring element. + + Together with *closing* (`binary_closing`), opening can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:4, 1:4] = 1; a[4, 4] = 1 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + >>> # Opening removes small objects + >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening can also smooth corners + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + >>> # Opening is the dilation of the erosion of the input + >>> ndimage.binary_erosion(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + input = numpy.asarray(input) + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + + tmp = binary_erosion(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_dilation(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_closing(input, structure=None, iterations=1, output=None, + origin=0, mask=None, border_value=0, brute_force=False): + """ + Multidimensional binary closing with the given structuring element. + + The *closing* of an input image by a structuring element is the + *erosion* of the *dilation* of the image by the structuring element. + + Parameters + ---------- + input : array_like + Binary array_like to be closed. Non-zero (True) elements form + the subset to be closed. + structure : array_like, optional + Structuring element used for the closing. Non-zero elements are + considered True. If no structuring element is provided an element + is generated with a square connectivity equal to one (i.e., only + nearest neighbors are connected to the center, diagonally-connected + elements are not considered neighbors). + iterations : int, optional + The dilation step of the closing, then the erosion step are each + repeated `iterations` times (one, by default). If iterations is + less than 1, each operations is repeated until the result does + not change anymore. Only an integer of iterations is accepted. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + mask : array_like, optional + If a mask is given, only those elements with a True value at + the corresponding mask element are modified at each iteration. + + .. versionadded:: 1.1.0 + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + + .. versionadded:: 1.1.0 + brute_force : boolean, optional + Memory condition: if False, only the pixels whose value was changed in + the last iteration are tracked as candidates to be updated in the + current iteration; if true al pixels are considered as candidates for + update, regardless of what happened in the previous iteration. + False by default. + + .. versionadded:: 1.1.0 + + Returns + ------- + binary_closing : ndarray of bools + Closing of the input by the structuring element. + + See Also + -------- + grey_closing, binary_opening, binary_dilation, binary_erosion, + generate_binary_structure + + Notes + ----- + *Closing* [1]_ is a mathematical morphology operation [2]_ that + consists in the succession of a dilation and an erosion of the + input with the same structuring element. Closing therefore fills + holes smaller than the structuring element. + + Together with *opening* (`binary_opening`), closing can be used for + noise removal. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5,5), dtype=int) + >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing removes small holes + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Closing is the erosion of the dilation of the input + >>> ndimage.binary_dilation(a).astype(int) + array([[0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0]]) + >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # In addition to removing holes, closing can also + >>> # coarsen boundaries with fine hollows. + >>> ndimage.binary_closing(a).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = numpy.asarray(input) + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + + tmp = binary_dilation(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_erosion(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_hit_or_miss(input, structure1=None, structure2=None, + output=None, origin1=0, origin2=None): + """ + Multidimensional binary hit-or-miss transform. + + The hit-or-miss transform finds the locations of a given pattern + inside the input image. + + Parameters + ---------- + input : array_like (cast to booleans) + Binary image where a pattern is to be detected. + structure1 : array_like (cast to booleans), optional + Part of the structuring element to be fitted to the foreground + (non-zero elements) of `input`. If no value is provided, a + structure of square connectivity 1 is chosen. + structure2 : array_like (cast to booleans), optional + Second part of the structuring element that has to miss completely + the foreground. If no value is provided, the complementary of + `structure1` is taken. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin1 : int or tuple of ints, optional + Placement of the first part of the structuring element `structure1`, + by default 0 for a centered structure. + origin2 : int or tuple of ints, optional + Placement of the second part of the structuring element `structure2`, + by default 0 for a centered structure. If a value is provided for + `origin1` and not for `origin2`, then `origin2` is set to `origin1`. + + Returns + ------- + binary_hit_or_miss : ndarray + Hit-or-miss transform of `input` with the given structuring + element (`structure1`, `structure2`). + + See Also + -------- + binary_erosion + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) + >>> structure1 + array([[1, 0, 0], + [0, 1, 1], + [0, 1, 1]]) + >>> # Find the matches of structure1 in the array a + >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # Change the origin of the filter + >>> # origin1=1 is equivalent to origin1=(1,1) here + >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ + ... origin1=1).astype(int) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + input = numpy.asarray(input) + if structure1 is None: + structure1 = generate_binary_structure(input.ndim, 1) + if structure2 is None: + structure2 = numpy.logical_not(structure1) + origin1 = _ni_support._normalize_sequence(origin1, input.ndim) + if origin2 is None: + origin2 = origin1 + else: + origin2 = _ni_support._normalize_sequence(origin2, input.ndim) + + tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, + 0, False) + inplace = isinstance(output, numpy.ndarray) + result = _binary_erosion(input, structure2, 1, None, output, 0, + origin2, 1, False) + if inplace: + numpy.logical_not(output, output) + numpy.logical_and(tmp1, output, output) + else: + numpy.logical_not(result, result) + return numpy.logical_and(tmp1, result) + + +def binary_propagation(input, structure=None, mask=None, + output=None, border_value=0, origin=0): + """ + Multidimensional binary propagation with the given structuring element. + + Parameters + ---------- + input : array_like + Binary image to be propagated inside `mask`. + structure : array_like, optional + Structuring element used in the successive dilations. The output + may depend on the structuring element, especially if `mask` has + several connex components. If no structuring element is + provided, an element is generated with a squared connectivity equal + to one. + mask : array_like, optional + Binary mask defining the region into which `input` is allowed to + propagate. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + border_value : int (cast to 0 or 1), optional + Value at the border in the output array. + origin : int or tuple of ints, optional + Placement of the filter, by default 0. + + Returns + ------- + binary_propagation : ndarray + Binary propagation of `input` inside `mask`. + + Notes + ----- + This function is functionally equivalent to calling binary_dilation + with the number of iterations less than one: iterative dilation until + the result does not change anymore. + + The succession of an erosion and propagation inside the original image + can be used instead of an *opening* for deleting small objects while + keeping the contours of larger objects untouched. + + References + ---------- + .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. + .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of + image processing", 1998 + ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> input = np.zeros((8, 8), dtype=int) + >>> input[2, 2] = 1 + >>> mask = np.zeros((8, 8), dtype=int) + >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 + >>> input + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> mask + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 1]]) + >>> ndimage.binary_propagation(input, mask=mask).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(input, mask=mask,\\ + ... structure=np.ones((3,3))).astype(int) + array([[0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]]) + + >>> # Comparison between opening and erosion+propagation + >>> a = np.zeros((6,6), dtype=int) + >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 + >>> a + array([[1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 1]]) + >>> ndimage.binary_opening(a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> b = ndimage.binary_erosion(a) + >>> b.astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + >>> ndimage.binary_propagation(b, mask=a).astype(int) + array([[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0]]) + + """ + return binary_dilation(input, structure, -1, mask, output, + border_value, origin) + + +def binary_fill_holes(input, structure=None, output=None, origin=0): + """ + Fill the holes in binary objects. + + + Parameters + ---------- + input : array_like + N-D binary array with holes to be filled + structure : array_like, optional + Structuring element used in the computation; large-size elements + make computations faster but may miss holes separated from the + background by thin regions. The default element (with a square + connectivity equal to one) yields the intuitive result where all + holes in the input have been filled. + output : ndarray, optional + Array of the same shape as input, into which the output is placed. + By default, a new array is created. + origin : int, tuple of ints, optional + Position of the structuring element. + + Returns + ------- + out : ndarray + Transformation of the initial image `input` where holes have been + filled. + + See Also + -------- + binary_dilation, binary_propagation, label + + Notes + ----- + The algorithm used in this function consists in invading the complementary + of the shapes in `input` from the outer boundary of the image, + using binary dilations. Holes are not connected to the boundary and are + therefore not invaded. The result is the complementary subset of the + invaded region. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((5, 5), dtype=int) + >>> a[1:4, 1:4] = 1 + >>> a[2,2] = 0 + >>> a + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> ndimage.binary_fill_holes(a).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + >>> # Too big structuring element + >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 0, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]]) + + """ + mask = numpy.logical_not(input) + tmp = numpy.zeros(mask.shape, bool) + inplace = isinstance(output, numpy.ndarray) + if inplace: + binary_dilation(tmp, structure, -1, mask, output, 1, origin) + numpy.logical_not(output, output) + else: + output = binary_dilation(tmp, structure, -1, mask, None, 1, + origin) + numpy.logical_not(output, output) + return output + + +def grey_erosion(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Calculate a greyscale erosion, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale erosion is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a minimum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale erosion is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + erosion. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale erosion. Non-zero values give the set of + neighbors of the center over which the minimum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale erosion. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the erosion may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + output : ndarray + Grayscale erosion of `input`. + + See Also + -------- + binary_erosion, grey_dilation, grey_opening, grey_closing + generate_binary_structure, minimum_filter + + Notes + ----- + The grayscale erosion of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = min {input(y) - s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale erosion computes the minimum of the + input image inside a sliding window defined by E. + + Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[1:6, 1:6] = 3 + >>> a[4,4] = 2; a[2,3] = 1 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 1, 3, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 3, 3, 3, 2, 3, 0], + [0, 3, 3, 3, 3, 3, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> footprint = ndimage.generate_binary_structure(2, 1) + >>> footprint + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> # Diagonally-connected elements are not considered neighbors + >>> ndimage.grey_erosion(a, footprint=footprint) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 3, 1, 2, 0, 0], + [0, 0, 3, 2, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint, or structure must be specified") + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 1) + + +def grey_dilation(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Calculate a greyscale dilation, using either a structuring element, + or a footprint corresponding to a flat structuring element. + + Grayscale dilation is a mathematical morphology operation. For the + simple case of a full and flat structuring element, it can be viewed + as a maximum filter over a sliding window. + + Parameters + ---------- + input : array_like + Array over which the grayscale dilation is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + dilation. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale dilation. Non-zero values give the set of + neighbors of the center over which the maximum is chosen. + structure : array of ints, optional + Structuring element used for the grayscale dilation. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the dilation may be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_dilation : ndarray + Grayscale dilation of `input`. + + See Also + -------- + binary_dilation, grey_erosion, grey_closing, grey_opening + generate_binary_structure, maximum_filter + + Notes + ----- + The grayscale dilation of an image input by a structuring element s defined + over a domain E is given by: + + (input+s)(x) = max {input(y) + s(x-y), for y in E} + + In particular, for structuring elements defined as + s(y) = 0 for y in E, the grayscale dilation computes the maximum of the + input image inside a sliding window defined by E. + + Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 + .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> s = ndimage.generate_binary_structure(2,1) + >>> s + array([[False, True, False], + [ True, True, True], + [False, True, False]], dtype=bool) + >>> ndimage.grey_dilation(a, footprint=s) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 1, 3, 2, 1, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) + array([[1, 1, 1, 1, 1, 1, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 2, 1], + [1, 2, 4, 4, 4, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 2, 2, 3, 3, 3, 1], + [1, 1, 1, 1, 1, 1, 1]]) + + """ + if size is None and footprint is None and structure is None: + raise ValueError("size, footprint, or structure must be specified") + if structure is not None: + structure = numpy.asarray(structure) + structure = structure[tuple([slice(None, None, -1)] * + structure.ndim)] + if footprint is not None: + footprint = numpy.asarray(footprint) + footprint = footprint[tuple([slice(None, None, -1)] * + footprint.ndim)] + + input = numpy.asarray(input) + origin = _ni_support._normalize_sequence(origin, input.ndim) + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if footprint is not None: + sz = footprint.shape[ii] + elif structure is not None: + sz = structure.shape[ii] + elif numpy.isscalar(size): + sz = size + else: + sz = size[ii] + if not sz & 1: + origin[ii] -= 1 + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 0) + + +def grey_opening(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional grayscale opening. + + A grayscale opening consists in the succession of a grayscale erosion, + and a grayscale dilation. + + Parameters + ---------- + input : array_like + Array over which the grayscale opening is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + opening. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale opening. + structure : array of ints, optional + Structuring element used for the grayscale opening. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the opening may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_opening : ndarray + Result of the grayscale opening of `input` with `structure`. + + See Also + -------- + binary_opening, grey_dilation, grey_erosion, grey_closing + generate_binary_structure + + Notes + ----- + The action of a grayscale opening with a flat structuring element amounts + to smoothen high local maxima, whereas binary opening erases small objects. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(36).reshape((6,6)) + >>> a[3, 3] = 50 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 50, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_opening(a, size=(3,3)) + array([[ 0, 1, 2, 3, 4, 4], + [ 6, 7, 8, 9, 10, 10], + [12, 13, 14, 15, 16, 16], + [18, 19, 20, 22, 22, 22], + [24, 25, 26, 27, 28, 28], + [24, 25, 26, 27, 28, 28]]) + >>> # Note that the local maximum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + return grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin) + + +def grey_closing(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional grayscale closing. + + A grayscale closing consists in the succession of a grayscale dilation, + and a grayscale erosion. + + Parameters + ---------- + input : array_like + Array over which the grayscale closing is to be computed. + size : tuple of ints + Shape of a flat and full structuring element used for the grayscale + closing. Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the grayscale closing. + structure : array of ints, optional + Structuring element used for the grayscale closing. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the closing may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + grey_closing : ndarray + Result of the grayscale closing of `input` with `structure`. + + See Also + -------- + binary_closing, grey_dilation, grey_erosion, grey_opening, + generate_binary_structure + + Notes + ----- + The action of a grayscale closing with a flat structuring element amounts + to smoothen deep local minima, whereas binary closing fills small holes. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.arange(36).reshape((6,6)) + >>> a[3,3] = 0 + >>> a + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 0, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + >>> ndimage.grey_closing(a, size=(3,3)) + array([[ 7, 7, 8, 9, 10, 11], + [ 7, 7, 8, 9, 10, 11], + [13, 13, 14, 15, 16, 17], + [19, 19, 20, 20, 22, 23], + [25, 25, 26, 27, 28, 29], + [31, 31, 32, 33, 34, 35]]) + >>> # Note that the local minimum a[3,3] has disappeared + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + return grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin) + + +def morphological_gradient(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional morphological gradient. + + The morphological gradient is calculated as the difference between a + dilation and an erosion of the input with a given structuring element. + + Parameters + ---------- + input : array_like + Array over which to compute the morphlogical gradient. + size : tuple of ints + Shape of a flat and full structuring element used for the mathematical + morphology operations. Optional if `footprint` or `structure` is + provided. A larger `size` yields a more blurred gradient. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the morphology operations. Larger footprints + give a more blurred morphological gradient. + structure : array of ints, optional + Structuring element used for the morphology operations. + `structure` may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the morphological gradient + may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + morphological_gradient : ndarray + Morphological gradient of `input`. + + See Also + -------- + grey_dilation, grey_erosion, gaussian_gradient_magnitude + + Notes + ----- + For a flat structuring element, the morphological gradient + computed at a given point corresponds to the maximal difference + between elements of the input among the elements covered by the + structuring element centered on the point. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> # The morphological gradient is computed as the difference + >>> # between a dilation and an erosion + >>> ndimage.grey_dilation(a, size=(3,3)) -\\ + ... ndimage.grey_erosion(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 0, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> a = np.zeros((7,7), dtype=int) + >>> a[2:5, 2:5] = 1 + >>> a[4,4] = 2; a[2,3] = 3 + >>> a + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 3, 1, 0, 0], + [0, 0, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 2, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]]) + >>> ndimage.morphological_gradient(a, size=(3,3)) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 3, 3, 1, 0], + [0, 1, 3, 2, 3, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 1, 1, 2, 2, 2, 0], + [0, 0, 0, 0, 0, 0, 0]]) + + """ + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + if isinstance(output, numpy.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin) + return numpy.subtract(tmp, output, output) + else: + return (tmp - grey_erosion(input, size, footprint, structure, + None, mode, cval, origin)) + + +def morphological_laplace(input, size=None, footprint=None, + structure=None, output=None, + mode="reflect", cval=0.0, origin=0): + """ + Multidimensional morphological laplace. + + Parameters + ---------- + input : array_like + Input. + size : int or sequence of ints, optional + See `structure`. + footprint : bool or ndarray, optional + See `structure`. + structure : structure, optional + Either `size`, `footprint`, or the `structure` must be provided. + output : ndarray, optional + An output array can optionally be provided. + mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional + The mode parameter determines how the array borders are handled. + For 'constant' mode, values beyond borders are set to be `cval`. + Default is 'reflect'. + cval : scalar, optional + Value to fill past edges of input if mode is 'constant'. + Default is 0.0 + origin : origin, optional + The origin parameter controls the placement of the filter. + + Returns + ------- + morphological_laplace : ndarray + Output + + """ + tmp1 = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + if isinstance(output, numpy.ndarray): + grey_erosion(input, size, footprint, structure, output, mode, + cval, origin) + numpy.add(tmp1, output, output) + numpy.subtract(output, input, output) + return numpy.subtract(output, input, output) + else: + tmp2 = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + numpy.add(tmp1, tmp2, tmp2) + numpy.subtract(tmp2, input, tmp2) + numpy.subtract(tmp2, input, tmp2) + return tmp2 + + +def white_tophat(input, size=None, footprint=None, structure=None, + output=None, mode="reflect", cval=0.0, origin=0): + """ + Multidimensional white tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of elements of a flat structuring element + used for the white tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. + Default is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default is 0. + + Returns + ------- + output : ndarray + Result of the filter of `input` with `structure`. + + See Also + -------- + black_tophat + + Examples + -------- + Subtract gray background from a bright peak. + + >>> from scipy.ndimage import generate_binary_structure, white_tophat + >>> import numpy as np + >>> square = generate_binary_structure(rank=2, connectivity=3) + >>> bright_on_gray = np.array([[2, 3, 3, 3, 2], + ... [3, 4, 5, 4, 3], + ... [3, 5, 9, 5, 3], + ... [3, 4, 5, 4, 3], + ... [2, 3, 3, 3, 2]]) + >>> white_tophat(input=bright_on_gray, structure=square) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 5, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, + cval, origin) + tmp = grey_dilation(tmp, size, footprint, structure, output, mode, + cval, origin) + if tmp is None: + tmp = output + + if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: + numpy.bitwise_xor(input, tmp, out=tmp) + else: + numpy.subtract(input, tmp, out=tmp) + return tmp + + +def black_tophat(input, size=None, footprint=None, + structure=None, output=None, mode="reflect", + cval=0.0, origin=0): + """ + Multidimensional black tophat filter. + + Parameters + ---------- + input : array_like + Input. + size : tuple of ints, optional + Shape of a flat and full structuring element used for the filter. + Optional if `footprint` or `structure` is provided. + footprint : array of ints, optional + Positions of non-infinite elements of a flat structuring element + used for the black tophat filter. + structure : array of ints, optional + Structuring element used for the filter. `structure` + may be a non-flat structuring element. + output : array, optional + An array used for storing the output of the filter may be provided. + mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the array borders are + handled, where `cval` is the value when mode is equal to + 'constant'. Default is 'reflect' + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0. + origin : scalar, optional + The `origin` parameter controls the placement of the filter. + Default 0 + + Returns + ------- + black_tophat : ndarray + Result of the filter of `input` with `structure`. + + See Also + -------- + white_tophat, grey_opening, grey_closing + + Examples + -------- + Change dark peak to bright peak and subtract background. + + >>> from scipy.ndimage import generate_binary_structure, black_tophat + >>> import numpy as np + >>> square = generate_binary_structure(rank=2, connectivity=3) + >>> dark_on_gray = np.array([[7, 6, 6, 6, 7], + ... [6, 5, 4, 5, 6], + ... [6, 4, 0, 4, 6], + ... [6, 5, 4, 5, 6], + ... [7, 6, 6, 6, 7]]) + >>> black_tophat(input=dark_on_gray, structure=square) + array([[0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + [0, 1, 5, 1, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0]]) + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, + cval, origin) + tmp = grey_erosion(tmp, size, footprint, structure, output, mode, + cval, origin) + if tmp is None: + tmp = output + + if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: + numpy.bitwise_xor(tmp, input, out=tmp) + else: + numpy.subtract(tmp, input, out=tmp) + return tmp + + +def distance_transform_bf(input, metric="euclidean", sampling=None, + return_distances=True, return_indices=False, + distances=None, indices=None): + """ + Distance transform function by a brute force algorithm. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input + metric : {'euclidean', 'taxicab', 'chessboard'}, optional + 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. + The default is 'euclidean'. + sampling : float, or sequence of float, optional + This parameter is only used when `metric` is 'euclidean'. + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`, and of type float64 if `metric` + is 'euclidean', uint32 otherwise. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be `(input.ndim,) + input.shape`. + + Returns + ------- + distances : ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See distance_transform_edt documentation for an + example. + Returned only when `return_indices` is True and `indices` is not + supplied. + + See Also + -------- + distance_transform_cdt : Faster distance transform for taxicab and + chessboard metrics + distance_transform_edt : Faster distance transform for euclidean metric + + Notes + ----- + This function employs a slow brute force algorithm. See also the + function `distance_transform_cdt` for more efficient taxicab [1]_ and + chessboard algorithms [2]_. + + References + ---------- + .. [1] Taxicab distance. Wikipedia, 2023. + https://en.wikipedia.org/wiki/Taxicab_geometry + .. [2] Chessboard distance. Wikipedia, 2023. + https://en.wikipedia.org/wiki/Chebyshev_distance + + Examples + -------- + Import the necessary modules. + + >>> import numpy as np + >>> from scipy.ndimage import distance_transform_bf + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1 import ImageGrid + + First, we create a toy binary image. + + >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): + ... # fill circular area with 1 + ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] + ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 + ... circle_shape = np.sqrt(circle) < radius + ... image[circle_shape] = fillvalue + ... return image + >>> image = np.zeros((100, 100), dtype=np.uint8) + >>> image[35:65, 20:80] = 1 + >>> image = add_circle(28, 65, 10, image) + >>> image = add_circle(37, 30, 10, image) + >>> image = add_circle(70, 45, 20, image) + >>> image = add_circle(45, 80, 10, image) + + Next, we set up the figure. + + >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure + >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3), + ... label_mode="1", share_all=True, + ... cbar_location="right", cbar_mode="each", + ... cbar_size="7%", cbar_pad="2%") + >>> for ax in grid: + ... ax.axis('off') # remove axes from images + + The top left image is the original binary image. + + >>> binary_image = grid[0].imshow(image, cmap='gray') + >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image) + >>> cbar_binary_image.set_ticks([0, 1]) + >>> grid[0].set_title("Binary image: foreground in white") + + The distance transform calculates the distance between foreground pixels + and the image background according to a distance metric. Available metrics + in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab`` + and ``chessboard``. The top right image contains the distance transform + based on the ``euclidean`` metric. + + >>> distance_transform_euclidean = distance_transform_bf(image) + >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean, + ... cmap='gray') + >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform) + >>> colorbar_ticks = [0, 10, 20] + >>> cbar_euclidean.set_ticks(colorbar_ticks) + >>> grid[1].set_title("Euclidean distance") + + The lower left image contains the distance transform using the ``taxicab`` + metric. + + >>> distance_transform_taxicab = distance_transform_bf(image, + ... metric='taxicab') + >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab, + ... cmap='gray') + >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> grid[2].set_title("Taxicab distance") + + Finally, the lower right image contains the distance transform using the + ``chessboard`` metric. + + >>> distance_transform_cb = distance_transform_bf(image, + ... metric='chessboard') + >>> chessboard_transformation = grid[3].imshow(distance_transform_cb, + ... cmap='gray') + >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> grid[3].set_title("Chessboard distance") + >>> plt.show() + + """ + ft_inplace = isinstance(indices, numpy.ndarray) + dt_inplace = isinstance(distances, numpy.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + + tmp1 = numpy.asarray(input) != 0 + struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) + tmp2 = binary_dilation(tmp1, struct) + tmp2 = numpy.logical_xor(tmp1, tmp2) + tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8) + metric = metric.lower() + if metric == 'euclidean': + metric = 1 + elif metric in ['taxicab', 'cityblock', 'manhattan']: + metric = 2 + elif metric == 'chessboard': + metric = 3 + else: + raise RuntimeError('distance metric not supported') + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) + sampling = numpy.asarray(sampling, dtype=numpy.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + if return_indices: + ft = numpy.zeros(tmp1.shape, dtype=numpy.int32) + else: + ft = None + if return_distances: + if distances is None: + if metric == 1: + dt = numpy.zeros(tmp1.shape, dtype=numpy.float64) + else: + dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32) + else: + if distances.shape != tmp1.shape: + raise RuntimeError('distances array has wrong shape') + if metric == 1: + if distances.dtype.type != numpy.float64: + raise RuntimeError('distances array must be float64') + else: + if distances.dtype.type != numpy.uint32: + raise RuntimeError('distances array must be uint32') + dt = distances + else: + dt = None + + _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) + if return_indices: + if isinstance(indices, numpy.ndarray): + if indices.dtype.type != numpy.int32: + raise RuntimeError('indices array must be int32') + if indices.shape != (tmp1.ndim,) + tmp1.shape: + raise RuntimeError('indices array has wrong shape') + tmp2 = indices + else: + tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32) + ft = numpy.ravel(ft) + for ii in range(tmp2.shape[0]): + rtmp = numpy.ravel(tmp2[ii, ...])[ft] + rtmp.shape = tmp1.shape + tmp2[ii, ...] = rtmp + ft = tmp2 + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_cdt(input, metric='chessboard', return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Distance transform for chamfer type of transforms. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input. Values of 0 are treated as background. + metric : {'chessboard', 'taxicab'} or array_like, optional + The `metric` determines the type of chamfering that is done. If the + `metric` is equal to 'taxicab' a structure is generated using + `generate_binary_structure` with a squared distance equal to 1. If + the `metric` is equal to 'chessboard', a `metric` is generated + using `generate_binary_structure` with a squared distance equal to + the dimensionality of the array. These choices correspond to the + common interpretations of the 'taxicab' and the 'chessboard' + distance metrics in two dimensions. + A custom metric may be provided, in the form of a matrix where + each dimension has a length of three. + 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. + The default is 'chessboard'. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : int32 ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be `(input.ndim,) + input.shape`. + + Returns + ------- + distances : int32 ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True, and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See distance_transform_edt documentation for an + example. + Returned only when `return_indices` is True, and `indices` is not + supplied. + + See Also + -------- + distance_transform_edt : Fast distance transform for euclidean metric + distance_transform_bf : Distance transform for different metrics using + a slower brute force algorithm + + Examples + -------- + Import the necessary modules. + + >>> import numpy as np + >>> from scipy.ndimage import distance_transform_cdt + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1 import ImageGrid + + First, we create a toy binary image. + + >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): + ... # fill circular area with 1 + ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] + ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 + ... circle_shape = np.sqrt(circle) < radius + ... image[circle_shape] = fillvalue + ... return image + >>> image = np.zeros((100, 100), dtype=np.uint8) + >>> image[35:65, 20:80] = 1 + >>> image = add_circle(28, 65, 10, image) + >>> image = add_circle(37, 30, 10, image) + >>> image = add_circle(70, 45, 20, image) + >>> image = add_circle(45, 80, 10, image) + + Next, we set up the figure. + + >>> fig = plt.figure(figsize=(5, 15)) + >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3), + ... label_mode="1", share_all=True, + ... cbar_location="right", cbar_mode="each", + ... cbar_size="7%", cbar_pad="2%") + >>> for ax in grid: + ... ax.axis('off') + >>> top, middle, bottom = grid + >>> colorbar_ticks = [0, 10, 20] + + The top image contains the original binary image. + + >>> binary_image = top.imshow(image, cmap='gray') + >>> cbar_binary_image = top.cax.colorbar(binary_image) + >>> cbar_binary_image.set_ticks([0, 1]) + >>> top.set_title("Binary image: foreground in white") + + The middle image contains the distance transform using the ``taxicab`` + metric. + + >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab") + >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray') + >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform) + >>> cbar_taxicab.set_ticks(colorbar_ticks) + >>> middle.set_title("Taxicab metric") + + The bottom image contains the distance transform using the ``chessboard`` + metric. + + >>> distance_chessboard = distance_transform_cdt(image, + ... metric="chessboard") + >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray') + >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform) + >>> cbar_chessboard.set_ticks(colorbar_ticks) + >>> bottom.set_title("Chessboard metric") + >>> plt.tight_layout() + >>> plt.show() + + """ + ft_inplace = isinstance(indices, numpy.ndarray) + dt_inplace = isinstance(distances, numpy.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + input = numpy.asarray(input) + if isinstance(metric, str): + if metric in ['taxicab', 'cityblock', 'manhattan']: + rank = input.ndim + metric = generate_binary_structure(rank, 1) + elif metric == 'chessboard': + rank = input.ndim + metric = generate_binary_structure(rank, rank) + else: + raise ValueError('invalid metric provided') + else: + try: + metric = numpy.asarray(metric) + except Exception as e: + raise ValueError('invalid metric provided') from e + for s in metric.shape: + if s != 3: + raise ValueError('metric sizes must be equal to 3') + + if not metric.flags.contiguous: + metric = metric.copy() + if dt_inplace: + if distances.dtype.type != numpy.int32: + raise ValueError('distances must be of int32 type') + if distances.shape != input.shape: + raise ValueError('distances has wrong shape') + dt = distances + dt[...] = numpy.where(input, -1, 0).astype(numpy.int32) + else: + dt = numpy.where(input, -1, 0).astype(numpy.int32) + + rank = dt.ndim + if return_indices: + sz = numpy.prod(dt.shape, axis=0) + ft = numpy.arange(sz, dtype=numpy.int32) + ft.shape = dt.shape + else: + ft = None + + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + _nd_image.distance_transform_op(metric, dt, ft) + dt = dt[tuple([slice(None, None, -1)] * rank)] + if return_indices: + ft = ft[tuple([slice(None, None, -1)] * rank)] + ft = numpy.ravel(ft) + if ft_inplace: + if indices.dtype.type != numpy.int32: + raise ValueError('indices array must be int32') + if indices.shape != (dt.ndim,) + dt.shape: + raise ValueError('indices array has wrong shape') + tmp = indices + else: + tmp = numpy.indices(dt.shape, dtype=numpy.int32) + for ii in range(tmp.shape[0]): + rtmp = numpy.ravel(tmp[ii, ...])[ft] + rtmp.shape = dt.shape + tmp[ii, ...] = rtmp + ft = tmp + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def distance_transform_edt(input, sampling=None, return_distances=True, + return_indices=False, distances=None, indices=None): + """ + Exact Euclidean distance transform. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its + shortest distance to the background (any zero-valued element). + + In addition to the distance transform, the feature transform can + be calculated. In this case the index of the closest background + element to each foreground element is returned in a separate array. + + Parameters + ---------- + input : array_like + Input data to transform. Can be any type but will be converted + into binary: 1 wherever input equates to True, 0 elsewhere. + sampling : float, or sequence of float, optional + Spacing of elements along each dimension. If a sequence, must be of + length equal to the input rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + Default is True. + return_indices : bool, optional + Whether to calculate the feature transform. + Default is False. + distances : float64 ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. + `return_distances` must be True. + It must be the same shape as `input`. + indices : int32 ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. + `return_indicies` must be True. + Its shape must be `(input.ndim,) + input.shape`. + + Returns + ------- + distances : float64 ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is True and `distances` is not supplied. + It will have the same shape as the input array. + indices : int32 ndarray, optional + The calculated feature transform. It has an input-shaped array for each + dimension of the input. See example below. + Returned only when `return_indices` is True and `indices` is not + supplied. + + Notes + ----- + The Euclidean distance transform gives values of the Euclidean + distance:: + + n + y_i = sqrt(sum (x[i]-b[i])**2) + i + + where b[i] is the background point (value 0) with the smallest + Euclidean distance to input points x[i], and n is the + number of dimensions. + + Examples + -------- + >>> from scipy import ndimage + >>> import numpy as np + >>> a = np.array(([0,1,1,1,1], + ... [0,0,1,1,1], + ... [0,1,1,1,1], + ... [0,1,1,1,0], + ... [0,1,1,0,0])) + >>> ndimage.distance_transform_edt(a) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + With a sampling of 2 units along x, 1 along y: + + >>> ndimage.distance_transform_edt(a, sampling=[2,1]) + array([[ 0. , 1. , 2. , 2.8284, 3.6056], + [ 0. , 0. , 1. , 2. , 3. ], + [ 0. , 1. , 2. , 2.2361, 2. ], + [ 0. , 1. , 2. , 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + Asking for indices as well: + + >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) + >>> inds + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + With arrays provided for inplace outputs: + + >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32) + >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + >>> indices + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + """ + ft_inplace = isinstance(indices, numpy.ndarray) + dt_inplace = isinstance(distances, numpy.ndarray) + _distance_tranform_arg_check( + dt_inplace, ft_inplace, return_distances, return_indices + ) + + # calculate the feature transform + input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8)) + if sampling is not None: + sampling = _ni_support._normalize_sequence(sampling, input.ndim) + sampling = numpy.asarray(sampling, dtype=numpy.float64) + if not sampling.flags.contiguous: + sampling = sampling.copy() + + if ft_inplace: + ft = indices + if ft.shape != (input.ndim,) + input.shape: + raise RuntimeError('indices array has wrong shape') + if ft.dtype.type != numpy.int32: + raise RuntimeError('indices array must be int32') + else: + ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32) + + _nd_image.euclidean_feature_transform(input, sampling, ft) + # if requested, calculate the distance transform + if return_distances: + dt = ft - numpy.indices(input.shape, dtype=ft.dtype) + dt = dt.astype(numpy.float64) + if sampling is not None: + for ii in range(len(sampling)): + dt[ii, ...] *= sampling[ii] + numpy.multiply(dt, dt, dt) + if dt_inplace: + dt = numpy.add.reduce(dt, axis=0) + if distances.shape != dt.shape: + raise RuntimeError('distances array has wrong shape') + if distances.dtype.type != numpy.float64: + raise RuntimeError('distances array must be float64') + numpy.sqrt(dt, distances) + else: + dt = numpy.add.reduce(dt, axis=0) + dt = numpy.sqrt(dt) + + # construct and return the result + result = [] + if return_distances and not dt_inplace: + result.append(dt) + if return_indices and not ft_inplace: + result.append(ft) + + if len(result) == 2: + return tuple(result) + elif len(result) == 1: + return result[0] + else: + return None + + +def _distance_tranform_arg_check(distances_out, indices_out, + return_distances, return_indices): + """Raise a RuntimeError if the arguments are invalid""" + error_msgs = [] + if (not return_distances) and (not return_indices): + error_msgs.append( + 'at least one of return_distances/return_indices must be True') + if distances_out and not return_distances: + error_msgs.append( + 'return_distances must be True if distances is supplied' + ) + if indices_out and not return_indices: + error_msgs.append('return_indices must be True if indices is supplied') + if error_msgs: + raise RuntimeError(', '.join(error_msgs)) diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b2e0eab625d2da3b1bab2f221530be543eb41628 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..e6469f2c75fcee1f74dfbbe049df8ca05b074505 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_docstrings.py @@ -0,0 +1,208 @@ +"""Docstring components common to several ndimage functions.""" +from scipy._lib import doccer + +__all__ = ['docfiller'] + + +_input_doc = ( +"""input : array_like + The input array.""") +_axis_doc = ( +"""axis : int, optional + The axis of `input` along which to calculate. Default is -1.""") +_output_doc = ( +"""output : array or dtype, optional + The array in which to place the output, or the dtype of the + returned array. By default an array of the same dtype as input + will be created.""") +_size_foot_doc = ( +"""size : scalar or tuple, optional + See footprint, below. Ignored if footprint is given. +footprint : array, optional + Either `size` or `footprint` must be defined. `size` gives + the shape that is taken from the input array, at every element + position, to define the input to the filter function. + `footprint` is a boolean array that specifies (implicitly) a + shape, but also which of the elements within this shape will get + passed to the filter function. Thus ``size=(n,m)`` is equivalent + to ``footprint=np.ones((n,m))``. We adjust `size` to the number + of dimensions of the input array, so that, if the input array is + shape (10,10,10), and `size` is 2, then the actual size used is + (2,2,2). When `footprint` is given, `size` is ignored.""") +_mode_reflect_doc = ( +"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional + The `mode` parameter determines how the input array is extended + beyond its boundaries. Default is 'reflect'. Behavior for each valid + value is as follows: + + 'reflect' (`d c b a | a b c d | d c b a`) + The input is extended by reflecting about the edge of the last + pixel. This mode is also sometimes referred to as half-sample + symmetric. + + 'constant' (`k k k k | a b c d | k k k k`) + The input is extended by filling all values beyond the edge with + the same constant value, defined by the `cval` parameter. + + 'nearest' (`a a a a | a b c d | d d d d`) + The input is extended by replicating the last pixel. + + 'mirror' (`d c b | a b c d | c b a`) + The input is extended by reflecting about the center of the last + pixel. This mode is also sometimes referred to as whole-sample + symmetric. + + 'wrap' (`a b c d | a b c d | a b c d`) + The input is extended by wrapping around to the opposite edge. + + For consistency with the interpolation functions, the following mode + names can also be used: + + 'grid-mirror' + This is a synonym for 'reflect'. + + 'grid-constant' + This is a synonym for 'constant'. + + 'grid-wrap' + This is a synonym for 'wrap'.""") + +_mode_interp_constant_doc = ( +"""mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \ +'mirror', 'grid-wrap', 'wrap'}, optional + The `mode` parameter determines how the input array is extended + beyond its boundaries. Default is 'constant'. Behavior for each valid + value is as follows (see additional plots and details on + :ref:`boundary modes `): + + 'reflect' (`d c b a | a b c d | d c b a`) + The input is extended by reflecting about the edge of the last + pixel. This mode is also sometimes referred to as half-sample + symmetric. + + 'grid-mirror' + This is a synonym for 'reflect'. + + 'constant' (`k k k k | a b c d | k k k k`) + The input is extended by filling all values beyond the edge with + the same constant value, defined by the `cval` parameter. No + interpolation is performed beyond the edges of the input. + + 'grid-constant' (`k k k k | a b c d | k k k k`) + The input is extended by filling all values beyond the edge with + the same constant value, defined by the `cval` parameter. Interpolation + occurs for samples outside the input's extent as well. + + 'nearest' (`a a a a | a b c d | d d d d`) + The input is extended by replicating the last pixel. + + 'mirror' (`d c b | a b c d | c b a`) + The input is extended by reflecting about the center of the last + pixel. This mode is also sometimes referred to as whole-sample + symmetric. + + 'grid-wrap' (`a b c d | a b c d | a b c d`) + The input is extended by wrapping around to the opposite edge. + + 'wrap' (`d b c d | a b c d | b c a b`) + The input is extended by wrapping around to the opposite edge, but in a + way such that the last point and initial point exactly overlap. In this + case it is not well defined which sample will be chosen at the point of + overlap.""") +_mode_interp_mirror_doc = ( + _mode_interp_constant_doc.replace("Default is 'constant'", + "Default is 'mirror'") +) +assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \ + 'Default not replaced' + +_mode_multiple_doc = ( +"""mode : str or sequence, optional + The `mode` parameter determines how the input array is extended + when the filter overlaps a border. By passing a sequence of modes + with length equal to the number of dimensions of the input array, + different modes can be specified along each axis. Default value is + 'reflect'. The valid values and their behavior is as follows: + + 'reflect' (`d c b a | a b c d | d c b a`) + The input is extended by reflecting about the edge of the last + pixel. This mode is also sometimes referred to as half-sample + symmetric. + + 'constant' (`k k k k | a b c d | k k k k`) + The input is extended by filling all values beyond the edge with + the same constant value, defined by the `cval` parameter. + + 'nearest' (`a a a a | a b c d | d d d d`) + The input is extended by replicating the last pixel. + + 'mirror' (`d c b | a b c d | c b a`) + The input is extended by reflecting about the center of the last + pixel. This mode is also sometimes referred to as whole-sample + symmetric. + + 'wrap' (`a b c d | a b c d | a b c d`) + The input is extended by wrapping around to the opposite edge. + + For consistency with the interpolation functions, the following mode + names can also be used: + + 'grid-constant' + This is a synonym for 'constant'. + + 'grid-mirror' + This is a synonym for 'reflect'. + + 'grid-wrap' + This is a synonym for 'wrap'.""") +_cval_doc = ( +"""cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0.""") +_origin_doc = ( +"""origin : int, optional + Controls the placement of the filter on the input array's pixels. + A value of 0 (the default) centers the filter over the pixel, with + positive values shifting the filter to the left, and negative ones + to the right.""") +_origin_multiple_doc = ( +"""origin : int or sequence, optional + Controls the placement of the filter on the input array's pixels. + A value of 0 (the default) centers the filter over the pixel, with + positive values shifting the filter to the left, and negative ones + to the right. By passing a sequence of origins with length equal to + the number of dimensions of the input array, different shifts can + be specified along each axis.""") +_extra_arguments_doc = ( +"""extra_arguments : sequence, optional + Sequence of extra positional arguments to pass to passed function.""") +_extra_keywords_doc = ( +"""extra_keywords : dict, optional + dict of extra keyword arguments to pass to passed function.""") +_prefilter_doc = ( +"""prefilter : bool, optional + Determines if the input array is prefiltered with `spline_filter` + before interpolation. The default is True, which will create a + temporary `float64` array of filtered values if `order > 1`. If + setting this to False, the output will be slightly blurred if + `order > 1`, unless the input is prefiltered, i.e. it is the result + of calling `spline_filter` on the original input.""") + +docdict = { + 'input': _input_doc, + 'axis': _axis_doc, + 'output': _output_doc, + 'size_foot': _size_foot_doc, + 'mode_interp_constant': _mode_interp_constant_doc, + 'mode_interp_mirror': _mode_interp_mirror_doc, + 'mode_reflect': _mode_reflect_doc, + 'mode_multiple': _mode_multiple_doc, + 'cval': _cval_doc, + 'origin': _origin_doc, + 'origin_multiple': _origin_multiple_doc, + 'extra_arguments': _extra_arguments_doc, + 'extra_keywords': _extra_keywords_doc, + 'prefilter': _prefilter_doc + } + +docfiller = doccer.filldoc(docdict) diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1afb18ebc1f600ee1cc7638c4629b5036144eab4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py b/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py new file mode 100644 index 0000000000000000000000000000000000000000..dadce8cf800597764f7f978f22888e4458b3fe7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/_ni_support.py @@ -0,0 +1,119 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import operator +import warnings +import numpy + + +def _extend_mode_to_code(mode): + """Convert an extension mode to the corresponding integer code. + """ + if mode == 'nearest': + return 0 + elif mode == 'wrap': + return 1 + elif mode in ['reflect', 'grid-mirror']: + return 2 + elif mode == 'mirror': + return 3 + elif mode == 'constant': + return 4 + elif mode == 'grid-wrap': + return 5 + elif mode == 'grid-constant': + return 6 + else: + raise RuntimeError('boundary mode not supported') + + +def _normalize_sequence(input, rank): + """If input is a scalar, create a sequence of length equal to the + rank by duplicating the input. If input is a sequence, + check if its length is equal to the length of array. + """ + is_str = isinstance(input, str) + if not is_str and isinstance(input, Iterable): + normalized = list(input) + if len(normalized) != rank: + err = "sequence argument must have length equal to input rank" + raise RuntimeError(err) + else: + normalized = [input] * rank + return normalized + + +def _get_output(output, input, shape=None, complex_output=False): + if shape is None: + shape = input.shape + if output is None: + if not complex_output: + output = numpy.zeros(shape, dtype=input.dtype.name) + else: + complex_type = numpy.promote_types(input.dtype, numpy.complex64) + output = numpy.zeros(shape, dtype=complex_type) + elif isinstance(output, (type, numpy.dtype)): + # Classes (like `np.float32`) and dtypes are interpreted as dtype + if complex_output and numpy.dtype(output).kind != 'c': + warnings.warn("promoting specified output dtype to complex", stacklevel=3) + output = numpy.promote_types(output, numpy.complex64) + output = numpy.zeros(shape, dtype=output) + elif isinstance(output, str): + output = numpy.dtype(output) + if complex_output and output.kind != 'c': + raise RuntimeError("output must have complex dtype") + elif not issubclass(output.type, numpy.number): + raise RuntimeError("output must have numeric dtype") + output = numpy.zeros(shape, dtype=output) + elif output.shape != shape: + raise RuntimeError("output shape not correct") + elif complex_output and output.dtype.kind != 'c': + raise RuntimeError("output must have complex dtype") + return output + + +def _check_axes(axes, ndim): + if axes is None: + return tuple(range(ndim)) + elif numpy.isscalar(axes): + axes = (operator.index(axes),) + elif isinstance(axes, Iterable): + for ax in axes: + axes = tuple(operator.index(ax) for ax in axes) + if ax < -ndim or ax > ndim - 1: + raise ValueError(f"specified axis: {ax} is out of range") + axes = tuple(ax % ndim if ax < 0 else ax for ax in axes) + else: + message = "axes must be an integer, iterable of integers, or None" + raise ValueError(message) + if len(tuple(set(axes))) != len(axes): + raise ValueError("axes must be unique") + return axes diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/filters.py b/venv/lib/python3.10/site-packages/scipy/ndimage/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..e16d9d279a9585b2454c46ee09cf22143de833a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/filters.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'correlate1d', 'convolve1d', 'gaussian_filter1d', + 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', + 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', + 'gaussian_gradient_magnitude', 'correlate', 'convolve', + 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', + 'maximum_filter1d', 'minimum_filter', 'maximum_filter', + 'rank_filter', 'median_filter', 'percentile_filter', + 'generic_filter1d', 'generic_filter' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='filters', + private_modules=['_filters'], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/fourier.py b/venv/lib/python3.10/site-packages/scipy/ndimage/fourier.py new file mode 100644 index 0000000000000000000000000000000000000000..73c49bd52d9a446ce0fe25d9e15b8de68fbd46fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/fourier.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'fourier_gaussian', 'fourier_uniform', + 'fourier_ellipsoid', 'fourier_shift' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='fourier', + private_modules=['_fourier'], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/interpolation.py b/venv/lib/python3.10/site-packages/scipy/ndimage/interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..8a28816cbfad89faced7acb8a54cd6ecc4fa8ad2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/interpolation.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'spline_filter1d', 'spline_filter', + 'geometric_transform', 'map_coordinates', + 'affine_transform', 'shift', 'zoom', 'rotate', + 'docfiller' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='interpolation', + private_modules=['_interpolation'], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/measurements.py b/venv/lib/python3.10/site-packages/scipy/ndimage/measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..22f76b01840ffb829205bd1d28a7ad1f9ac5db61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/measurements.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'label', 'find_objects', 'labeled_comprehension', + 'sum', 'mean', 'variance', 'standard_deviation', + 'minimum', 'maximum', 'median', 'minimum_position', + 'maximum_position', 'extrema', 'center_of_mass', + 'histogram', 'watershed_ift', 'sum_labels' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='measurements', + private_modules=['_measurements'], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/morphology.py b/venv/lib/python3.10/site-packages/scipy/ndimage/morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..e522e7df3a4b06b7e04ed8c2d0ecaff2a98b951d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/morphology.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'iterate_structure', 'generate_binary_structure', + 'binary_erosion', 'binary_dilation', 'binary_opening', + 'binary_closing', 'binary_hit_or_miss', 'binary_propagation', + 'binary_fill_holes', 'grey_erosion', 'grey_dilation', + 'grey_opening', 'grey_closing', 'morphological_gradient', + 'morphological_laplace', 'white_tophat', 'black_tophat', + 'distance_transform_bf', 'distance_transform_cdt', + 'distance_transform_edt' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='morphology', + private_modules=['_morphology'], all=__all__, + attribute=name)