diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a33df3872a8d4a6b39e9a33de6b2622c82ef2ac4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_bunch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebd0647b3515d64e8ea08daa73c4753584afcc7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_ccallback.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1db0678f60f5a85f427a6327502bdcfd0916eaa1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbfffb44d88d8e9e75e8ee52426e012263caef2c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58197b38ddde88cce5916869a198165a67a69bb2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80bedd164e1b7191cc0b6e581303cee934bf9e72 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/__pycache__/doccer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5f2b90a026aaecbdc090b3d3234954ab29fce8ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Quansight-Labs +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91afdcedb180599a41758cdd8c03416cf6c20d76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py @@ -0,0 +1,116 @@ +""" +.. note: + If you are looking for overrides for NumPy-specific methods, see the + documentation for :obj:`unumpy`. This page explains how to write + back-ends and multimethods. + +``uarray`` is built around a back-end protocol, and overridable multimethods. +It is necessary to define multimethods for back-ends to be able to override them. +See the documentation of :obj:`generate_multimethod` on how to write multimethods. + + + +Let's start with the simplest: + +``__ua_domain__`` defines the back-end *domain*. The domain consists of period- +separated string consisting of the modules you extend plus the submodule. For +example, if a submodule ``module2.submodule`` extends ``module1`` +(i.e., it exposes dispatchables marked as types available in ``module1``), +then the domain string should be ``"module1.module2.submodule"``. + + +For the purpose of this demonstration, we'll be creating an object and setting +its attributes directly. However, note that you can use a module or your own type +as a backend as well. + +>>> class Backend: pass +>>> be = Backend() +>>> be.__ua_domain__ = "ua_examples" + +It might be useful at this point to sidetrack to the documentation of +:obj:`generate_multimethod` to find out how to generate a multimethod +overridable by :obj:`uarray`. Needless to say, writing a backend and +creating multimethods are mostly orthogonal activities, and knowing +one doesn't necessarily require knowledge of the other, although it +is certainly helpful. We expect core API designers/specifiers to write the +multimethods, and implementors to override them. But, as is often the case, +similar people write both. + +Without further ado, here's an example multimethod: + +>>> import uarray as ua +>>> from uarray import Dispatchable +>>> def override_me(a, b): +... return Dispatchable(a, int), +>>> def override_replacer(args, kwargs, dispatchables): +... return (dispatchables[0], args[1]), {} +>>> overridden_me = ua.generate_multimethod( +... override_me, override_replacer, "ua_examples" +... ) + +Next comes the part about overriding the multimethod. This requires +the ``__ua_function__`` protocol, and the ``__ua_convert__`` +protocol. The ``__ua_function__`` protocol has the signature +``(method, args, kwargs)`` where ``method`` is the passed +multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables`` +is the list of converted dispatchables passed in. + +>>> def __ua_function__(method, args, kwargs): +... return method.__name__, args, kwargs +>>> be.__ua_function__ = __ua_function__ + +The other protocol of interest is the ``__ua_convert__`` protocol. It has the +signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion +between the formats should ideally be an ``O(1)`` operation, but it means that +no memory copying should be involved, only views of the existing data. + +>>> def __ua_convert__(dispatchables, coerce): +... for d in dispatchables: +... if d.type is int: +... if coerce and d.coercible: +... yield str(d.value) +... else: +... yield d.value +>>> be.__ua_convert__ = __ua_convert__ + +Now that we have defined the backend, the next thing to do is to call the multimethod. + +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +Note that the marked type has no effect on the actual type of the passed object. +We can also coerce the type of the input. + +>>> with ua.set_backend(be, coerce=True): +... overridden_me(1, "2") +... overridden_me(1.0, "2") +('override_me', ('1', '2'), {}) +('override_me', ('1.0', '2'), {}) + +Another feature is that if you remove ``__ua_convert__``, the arguments are not +converted at all and it's up to the backend to handle that. + +>>> del be.__ua_convert__ +>>> with ua.set_backend(be): +... overridden_me(1, "2") +('override_me', (1, '2'), {}) + +You also have the option to return ``NotImplemented``, in which case processing moves on +to the next back-end, which in this case, doesn't exist. The same applies to +``__ua_convert__``. + +>>> be.__ua_function__ = lambda *a, **kw: NotImplemented +>>> with ua.set_backend(be): +... overridden_me(1, "2") +Traceback (most recent call last): + ... +uarray.BackendNotImplementedError: ... + +The last possibility is if we don't have ``__ua_convert__``, in which case the job is +left up to ``__ua_function__``, but putting things back into arrays after conversion +will not be possible. +""" + +from ._backend import * +__version__ = '0.8.8.dev0+aa94c5a4.scipy' diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..234d5a679529961d226e669f3222157b83b10856 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b387cee8248d20897592e0b4ab3735409b3d243a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..67da7d35ccea8ad26bd471b16e9400071a821cc0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py @@ -0,0 +1,704 @@ +import typing +import types +import inspect +import functools +from . import _uarray +import copyreg +import pickle +import contextlib + +from ._uarray import ( # type: ignore + BackendNotImplementedError, + _Function, + _SkipBackendContext, + _SetBackendContext, + _BackendState, +) + +__all__ = [ + "set_backend", + "set_global_backend", + "skip_backend", + "register_backend", + "determine_backend", + "determine_backend_multi", + "clear_backends", + "create_multimethod", + "generate_multimethod", + "_Function", + "BackendNotImplementedError", + "Dispatchable", + "wrap_single_convertor", + "wrap_single_convertor_instance", + "all_of_type", + "mark_as", + "set_state", + "get_state", + "reset_state", + "_BackendState", + "_SkipBackendContext", + "_SetBackendContext", +] + +ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]] +ArgumentReplacerType = typing.Callable[ + [tuple, dict, tuple], tuple[tuple, dict] +] + +def unpickle_function(mod_name, qname, self_): + import importlib + + try: + module = importlib.import_module(mod_name) + qname = qname.split(".") + func = module + for q in qname: + func = getattr(func, q) + + if self_ is not None: + func = types.MethodType(func, self_) + + return func + except (ImportError, AttributeError) as e: + from pickle import UnpicklingError + + raise UnpicklingError from e + + +def pickle_function(func): + mod_name = getattr(func, "__module__", None) + qname = getattr(func, "__qualname__", None) + self_ = getattr(func, "__self__", None) + + try: + test = unpickle_function(mod_name, qname, self_) + except pickle.UnpicklingError: + test = None + + if test is not func: + raise pickle.PicklingError( + f"Can't pickle {func}: it's not the same object as {test}" + ) + + return unpickle_function, (mod_name, qname, self_) + + +def pickle_state(state): + return _uarray._BackendState._unpickle, state._pickle() + + +def pickle_set_backend_context(ctx): + return _SetBackendContext, ctx._pickle() + + +def pickle_skip_backend_context(ctx): + return _SkipBackendContext, ctx._pickle() + + +copyreg.pickle(_Function, pickle_function) +copyreg.pickle(_uarray._BackendState, pickle_state) +copyreg.pickle(_SetBackendContext, pickle_set_backend_context) +copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context) + + +def get_state(): + """ + Returns an opaque object containing the current state of all the backends. + + Can be used for synchronization between threads/processes. + + See Also + -------- + set_state + Sets the state returned by this function. + """ + return _uarray.get_state() + + +@contextlib.contextmanager +def reset_state(): + """ + Returns a context manager that resets all state once exited. + + See Also + -------- + set_state + Context manager that sets the backend state. + get_state + Gets a state to be set by this context manager. + """ + with set_state(get_state()): + yield + + +@contextlib.contextmanager +def set_state(state): + """ + A context manager that sets the state of the backends to one returned by :obj:`get_state`. + + See Also + -------- + get_state + Gets a state to be set by this context manager. + """ # noqa: E501 + old_state = get_state() + _uarray.set_state(state) + try: + yield + finally: + _uarray.set_state(old_state, True) + + +def create_multimethod(*args, **kwargs): + """ + Creates a decorator for generating multimethods. + + This function creates a decorator that can be used with an argument + extractor in order to generate a multimethod. Other than for the + argument extractor, all arguments are passed on to + :obj:`generate_multimethod`. + + See Also + -------- + generate_multimethod + Generates a multimethod. + """ + + def wrapper(a): + return generate_multimethod(a, *args, **kwargs) + + return wrapper + + +def generate_multimethod( + argument_extractor: ArgumentExtractorType, + argument_replacer: ArgumentReplacerType, + domain: str, + default: typing.Optional[typing.Callable] = None, +): + """ + Generates a multimethod. + + Parameters + ---------- + argument_extractor : ArgumentExtractorType + A callable which extracts the dispatchable arguments. Extracted arguments + should be marked by the :obj:`Dispatchable` class. It has the same signature + as the desired multimethod. + argument_replacer : ArgumentReplacerType + A callable with the signature (args, kwargs, dispatchables), which should also + return an (args, kwargs) pair with the dispatchables replaced inside the + args/kwargs. + domain : str + A string value indicating the domain of this multimethod. + default: Optional[Callable], optional + The default implementation of this multimethod, where ``None`` (the default) + specifies there is no default implementation. + + Examples + -------- + In this example, ``a`` is to be dispatched over, so we return it, while marking it + as an ``int``. + The trailing comma is needed because the args have to be returned as an iterable. + + >>> def override_me(a, b): + ... return Dispatchable(a, int), + + Next, we define the argument replacer that replaces the dispatchables inside + args/kwargs with the supplied ones. + + >>> def override_replacer(args, kwargs, dispatchables): + ... return (dispatchables[0], args[1]), {} + + Next, we define the multimethod. + + >>> overridden_me = generate_multimethod( + ... override_me, override_replacer, "ua_examples" + ... ) + + Notice that there's no default implementation, unless you supply one. + + >>> overridden_me(1, "a") + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + >>> overridden_me2 = generate_multimethod( + ... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y) + ... ) + >>> overridden_me2(1, "a") + (1, 'a') + + See Also + -------- + uarray + See the module documentation for how to override the method by creating + backends. + """ + kw_defaults, arg_defaults, opts = get_defaults(argument_extractor) + ua_func = _Function( + argument_extractor, + argument_replacer, + domain, + arg_defaults, + kw_defaults, + default, + ) + + return functools.update_wrapper(ua_func, argument_extractor) + + +def set_backend(backend, coerce=False, only=False): + """ + A context manager that sets the preferred backend. + + Parameters + ---------- + backend + The backend to set. + coerce + Whether or not to coerce to a specific backend's types. Implies ``only``. + only + Whether or not this should be the last backend to try. + + See Also + -------- + skip_backend: A context manager that allows skipping of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["set", coerce, only] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SetBackendContext(backend, coerce, only) + backend.__ua_cache__["set", coerce, only] = ctx + return ctx + + +def skip_backend(backend): + """ + A context manager that allows one to skip a given backend from processing + entirely. This allows one to use another backend's code in a library that + is also a consumer of the same backend. + + Parameters + ---------- + backend + The backend to skip. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + set_global_backend: Set a single, global backend for a domain. + """ + try: + return backend.__ua_cache__["skip"] + except AttributeError: + backend.__ua_cache__ = {} + except KeyError: + pass + + ctx = _SkipBackendContext(backend) + backend.__ua_cache__["skip"] = ctx + return ctx + + +def get_defaults(f): + sig = inspect.signature(f) + kw_defaults = {} + arg_defaults = [] + opts = set() + for k, v in sig.parameters.items(): + if v.default is not inspect.Parameter.empty: + kw_defaults[k] = v.default + if v.kind in ( + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ): + arg_defaults.append(v.default) + opts.add(k) + + return kw_defaults, tuple(arg_defaults), opts + + +def set_global_backend(backend, coerce=False, only=False, *, try_last=False): + """ + This utility method replaces the default backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. This will be the first tried + backend outside the :obj:`set_backend` context manager. + + Note that this method is not thread-safe. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves, or by a reference + implementation, if one exists. + + Parameters + ---------- + backend + The backend to register. + coerce : bool + Whether to coerce input types when trying this backend. + only : bool + If ``True``, no more backends will be tried if this fails. + Implied by ``coerce=True``. + try_last : bool + If ``True``, the global backend is tried after registered backends. + + See Also + -------- + set_backend: A context manager that allows setting of backends. + skip_backend: A context manager that allows skipping of backends. + """ + _uarray.set_global_backend(backend, coerce, only, try_last) + + +def register_backend(backend): + """ + This utility method sets registers backend for permanent use. It + will be tried in the list of backends automatically, unless the + ``only`` flag is set on a backend. + + Note that this method is not thread-safe. + + Parameters + ---------- + backend + The backend to register. + """ + _uarray.register_backend(backend) + + +def clear_backends(domain, registered=True, globals=False): + """ + This utility method clears registered backends. + + .. warning:: + We caution library authors against using this function in + their code. We do *not* support this use-case. This function + is meant to be used only by users themselves. + + .. warning:: + Do NOT use this method inside a multimethod call, or the + program is likely to crash. + + Parameters + ---------- + domain : Optional[str] + The domain for which to de-register backends. ``None`` means + de-register for all domains. + registered : bool + Whether or not to clear registered backends. See :obj:`register_backend`. + globals : bool + Whether or not to clear global backends. See :obj:`set_global_backend`. + + See Also + -------- + register_backend : Register a backend globally. + set_global_backend : Set a global backend. + """ + _uarray.clear_backends(domain, registered, globals) + + +class Dispatchable: + """ + A utility class which marks an argument with a specific dispatch type. + + + Attributes + ---------- + value + The value of the Dispatchable. + + type + The type of the Dispatchable. + + Examples + -------- + >>> x = Dispatchable(1, str) + >>> x + , value=1> + + See Also + -------- + all_of_type + Marks all unmarked parameters of a function. + + mark_as + Allows one to create a utility function to mark as a given type. + """ + + def __init__(self, value, dispatch_type, coercible=True): + self.value = value + self.type = dispatch_type + self.coercible = coercible + + def __getitem__(self, index): + return (self.type, self.value)[index] + + def __str__(self): + return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>" + + __repr__ = __str__ + + +def mark_as(dispatch_type): + """ + Creates a utility function to mark something as a specific type. + + Examples + -------- + >>> mark_int = mark_as(int) + >>> mark_int(1) + , value=1> + """ + return functools.partial(Dispatchable, dispatch_type=dispatch_type) + + +def all_of_type(arg_type): + """ + Marks all unmarked arguments as a given type. + + Examples + -------- + >>> @all_of_type(str) + ... def f(a, b): + ... return a, Dispatchable(b, int) + >>> f('a', 1) + (, value='a'>, + , value=1>) + """ + + def outer(func): + @functools.wraps(func) + def inner(*args, **kwargs): + extracted_args = func(*args, **kwargs) + return tuple( + Dispatchable(arg, arg_type) + if not isinstance(arg, Dispatchable) + else arg + for arg in extracted_args + ) + + return inner + + return outer + + +def wrap_single_convertor(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def wrap_single_convertor_instance(convert_single): + """ + Wraps a ``__ua_convert__`` defined for a single element to all elements. + If any of them return ``NotImplemented``, the operation is assumed to be + undefined. + + Accepts a signature of (value, type, coerce). + """ + + @functools.wraps(convert_single) + def __ua_convert__(self, dispatchables, coerce): + converted = [] + for d in dispatchables: + c = convert_single(self, d.value, d.type, coerce and d.coercible) + + if c is NotImplemented: + return NotImplemented + + converted.append(c) + + return converted + + return __ua_convert__ + + +def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False): + """Set the backend to the first active backend that supports ``value`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend` to ensure the same backend + is used everywhere in a block of multimethod calls. + + Parameters + ---------- + value + The value being tested + dispatch_type + The dispatch type associated with ``value``, aka + ":ref:`marking `". + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + + See Also + -------- + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting + different types, ``TypeA`` and ``TypeB``. Neither supporting the other type: + + >>> with ua.set_backend(ex.BackendA): + ... ex.call_multimethod(ex.TypeB(), ex.TypeB()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + Now consider a multimethod that creates a new object of ``TypeA``, or + ``TypeB`` depending on the active backend. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, ex.TypeA()) + Traceback (most recent call last): + ... + uarray.BackendNotImplementedError: ... + + ``res`` is an object of ``TypeB`` because ``BackendB`` is set in the + innermost with statement. So, ``call_multimethod`` fails since the types + don't match. + + Instead, we need to first find a backend suitable for all of our objects. + + >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB): + ... x = ex.TypeA() + ... with ua.determine_backend(x, "mark", domain="ua_examples"): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, x) + TypeA + + """ + dispatchables = (Dispatchable(value, dispatch_type, coerce),) + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) + + +def determine_backend_multi( + dispatchables, *, domain, only=True, coerce=False, **kwargs +): + """Set a backend supporting all ``dispatchables`` + + This is useful for functions that call multimethods without any dispatchable + arguments. You can use :func:`determine_backend_multi` to ensure the same + backend is used everywhere in a block of multimethod calls involving + multiple arrays. + + Parameters + ---------- + dispatchables: Sequence[Union[uarray.Dispatchable, Any]] + The dispatchables that must be supported + domain: string + The domain to query for backends and set. + coerce: bool + Whether or not to allow coercion to the backend's types. Implies ``only``. + only: bool + Whether or not this should be the last backend to try. + dispatch_type: Optional[Any] + The default dispatch type associated with ``dispatchables``, aka + ":ref:`marking `". + + See Also + -------- + determine_backend: For a single dispatch value + set_backend: For when you know which backend to set + + Notes + ----- + + Support is determined by the ``__ua_convert__`` protocol. Backends not + supporting the type must return ``NotImplemented`` from their + ``__ua_convert__`` if they don't support input of that type. + + Examples + -------- + + :func:`determine_backend` allows the backend to be set from a single + object. :func:`determine_backend_multi` allows multiple objects to be + checked simultaneously for support in the backend. Suppose we have a + ``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call, + and a ``BackendBC`` that doesn't support ``TypeA``. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")], + ... domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + This won't call ``BackendBC`` because it doesn't support ``TypeA``. + + We can also use leave out the ``ua.Dispatchable`` if we specify the + default ``dispatch_type`` for the ``dispatchables`` argument. + + >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC): + ... a, b = ex.TypeA(), ex.TypeB() + ... with ua.determine_backend_multi( + ... [a, b], dispatch_type="mark", domain="ua_examples" + ... ): + ... res = ex.creation_multimethod() + ... ex.call_multimethod(res, a, b) + TypeA + + """ + if "dispatch_type" in kwargs: + disp_type = kwargs.pop("dispatch_type") + dispatchables = tuple( + d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type) + for d in dispatchables + ) + else: + dispatchables = tuple(dispatchables) + if not all(isinstance(d, Dispatchable) for d in dispatchables): + raise TypeError("dispatchables must be instances of uarray.Dispatchable") + + if len(kwargs) != 0: + raise TypeError(f"Received unexpected keyword arguments: {kwargs}") + + backend = _uarray.determine_backend(domain, dispatchables, coerce) + + return set_backend(backend, coerce=coerce, only=only) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d442a8273d1a633e01a54badb1668b46f0fc19d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..28ffc7e79d2cc197d2b8bb95743caabb9691d0aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py @@ -0,0 +1,22 @@ +""" +NumPy Array API compatibility library + +This is a small wrapper around NumPy and CuPy that is compatible with the +Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 +https://numpy.org/neps/nep-0047-array-api-standard.html. + +Unlike numpy.array_api, this is not a strict minimal implementation of the +Array API, but rather just an extension of the main NumPy namespace with +changes needed to be compliant with the Array API. See +https://numpy.org/doc/stable/reference/array_api.html for a full list of +changes. In particular, unlike numpy.array_api, this package does not use a +separate Array object, but rather just uses numpy.ndarray directly. + +Library authors using the Array API may wish to test against numpy.array_api +to ensure they are not using functionality outside of the standard, but prefer +this implementation for the default when working with NumPy arrays. + +""" +__version__ = '1.4.1' + +from .common import * diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2327641d64c1e90f959baa81d79b86563f724e8a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..513463aad31bc08c2d21a7dd0f617cb70a472586 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..553c03561b45e7791548b78b17cec6f86b86f9c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py @@ -0,0 +1,43 @@ +""" +Internal helpers +""" + +from functools import wraps +from inspect import signature + +def get_xp(xp): + """ + Decorator to automatically replace xp with the corresponding array module. + + Use like + + import numpy as np + + @get_xp(np) + def func(x, /, xp, kwarg=None): + return xp.func(x, kwarg=kwarg) + + Note that xp must be a keyword argument and come after all non-keyword + arguments. + + """ + def inner(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, xp=xp, **kwargs) + + sig = signature(f) + new_sig = sig.replace(parameters=[sig.parameters[i] for i in sig.parameters if i != 'xp']) + + if wrapped_f.__doc__ is None: + wrapped_f.__doc__ = f"""\ +Array API compatibility wrapper for {f.__name__}. + +See the corresponding documentation in NumPy/CuPy and/or the array API +specification for more details. + +""" + wrapped_f.__signature__ = new_sig + return wrapped_f + + return inner diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3f44dd486cb373ba9cca450dc486d6ecb66352 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py @@ -0,0 +1 @@ +from ._helpers import * diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13562a6749e93c545df721b07493212f6a8d8bf5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..c057e71d3c85df1fd2abbe0251083cf7bee45213 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py @@ -0,0 +1,536 @@ +""" +These are functions that are just aliases of existing functions in NumPy. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Optional, Sequence, Tuple, Union, List + from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol + +from typing import NamedTuple +from types import ModuleType +import inspect + +from ._helpers import _check_device, _is_numpy_array, array_namespace + +# These functions are modified from the NumPy versions. + +def arange( + start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs) + +def empty( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.empty(shape, dtype=dtype, **kwargs) + +def empty_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs +) -> ndarray: + _check_device(xp, device) + return xp.empty_like(x, dtype=dtype, **kwargs) + +def eye( + n_rows: int, + n_cols: Optional[int] = None, + /, + *, + xp, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs) + +def full( + shape: Union[int, Tuple[int, ...]], + fill_value: Union[int, float], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.full(shape, fill_value, dtype=dtype, **kwargs) + +def full_like( + x: ndarray, + /, + fill_value: Union[int, float], + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.full_like(x, fill_value, dtype=dtype, **kwargs) + +def linspace( + start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + xp, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs) + +def ones( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.ones(shape, dtype=dtype, **kwargs) + +def ones_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.ones_like(x, dtype=dtype, **kwargs) + +def zeros( + shape: Union[int, Tuple[int, ...]], + xp, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.zeros(shape, dtype=dtype, **kwargs) + +def zeros_like( + x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, + **kwargs, +) -> ndarray: + _check_device(xp, device) + return xp.zeros_like(x, dtype=dtype, **kwargs) + +# np.unique() is split into four functions in the array API: +# unique_all, unique_counts, unique_inverse, and unique_values (this is done +# to remove polymorphic return types). + +# The functions here return namedtuples (np.unique() returns a normal +# tuple). +class UniqueAllResult(NamedTuple): + values: ndarray + indices: ndarray + inverse_indices: ndarray + counts: ndarray + + +class UniqueCountsResult(NamedTuple): + values: ndarray + counts: ndarray + + +class UniqueInverseResult(NamedTuple): + values: ndarray + inverse_indices: ndarray + + +def _unique_kwargs(xp): + # Older versions of NumPy and CuPy do not have equal_nan. Rather than + # trying to parse version numbers, just check if equal_nan is in the + # signature. + s = inspect.signature(xp.unique) + if 'equal_nan' in s.parameters: + return {'equal_nan': False} + return {} + +def unique_all(x: ndarray, /, xp) -> UniqueAllResult: + kwargs = _unique_kwargs(xp) + values, indices, inverse_indices, counts = xp.unique( + x, + return_counts=True, + return_index=True, + return_inverse=True, + **kwargs, + ) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueAllResult( + values, + indices, + inverse_indices, + counts, + ) + + +def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult: + kwargs = _unique_kwargs(xp) + res = xp.unique( + x, + return_counts=True, + return_index=False, + return_inverse=False, + **kwargs + ) + + return UniqueCountsResult(*res) + + +def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult: + kwargs = _unique_kwargs(xp) + values, inverse_indices = xp.unique( + x, + return_counts=False, + return_index=False, + return_inverse=True, + **kwargs, + ) + # xp.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueInverseResult(values, inverse_indices) + + +def unique_values(x: ndarray, /, xp) -> ndarray: + kwargs = _unique_kwargs(xp) + return xp.unique( + x, + return_counts=False, + return_index=False, + return_inverse=False, + **kwargs, + ) + +def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray: + if not copy and dtype == x.dtype: + return x + return x.astype(dtype=dtype, copy=copy) + +# These functions have different keyword argument names + +def std( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, # correction instead of ddof + keepdims: bool = False, + **kwargs, +) -> ndarray: + return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) + +def var( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, # correction instead of ddof + keepdims: bool = False, + **kwargs, +) -> ndarray: + return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) + +# Unlike transpose(), the axes argument to permute_dims() is required. +def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray: + return xp.transpose(x, axes) + +# Creation functions add the device keyword (which does nothing for NumPy) + +# asarray also adds the copy keyword +def _asarray( + obj: Union[ + ndarray, + bool, + int, + float, + NestedSequence[bool | int | float], + SupportsBufferProtocol, + ], + /, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + copy: "Optional[Union[bool, np._CopyMode]]" = None, + namespace = None, + **kwargs, +) -> ndarray: + """ + Array API compatibility wrapper for asarray(). + + See the corresponding documentation in NumPy/CuPy and/or the array API + specification for more details. + + """ + if namespace is None: + try: + xp = array_namespace(obj, _use_compat=False) + except ValueError: + # TODO: What about lists of arrays? + raise ValueError("A namespace must be specified for asarray() with non-array input") + elif isinstance(namespace, ModuleType): + xp = namespace + elif namespace == 'numpy': + import numpy as xp + elif namespace == 'cupy': + import cupy as xp + else: + raise ValueError("Unrecognized namespace argument to asarray()") + + _check_device(xp, device) + if _is_numpy_array(obj): + import numpy as np + if hasattr(np, '_CopyMode'): + # Not present in older NumPys + COPY_FALSE = (False, np._CopyMode.IF_NEEDED) + COPY_TRUE = (True, np._CopyMode.ALWAYS) + else: + COPY_FALSE = (False,) + COPY_TRUE = (True,) + else: + COPY_FALSE = (False,) + COPY_TRUE = (True,) + if copy in COPY_FALSE: + # copy=False is not yet implemented in xp.asarray + raise NotImplementedError("copy=False is not yet implemented") + if isinstance(obj, xp.ndarray): + if dtype is not None and obj.dtype != dtype: + copy = True + if copy in COPY_TRUE: + return xp.array(obj, copy=True, dtype=dtype) + return obj + + return xp.asarray(obj, dtype=dtype, **kwargs) + +# np.reshape calls the keyword argument 'newshape' instead of 'shape' +def reshape(x: ndarray, + /, + shape: Tuple[int, ...], + xp, copy: Optional[bool] = None, + **kwargs) -> ndarray: + if copy is True: + x = x.copy() + elif copy is False: + y = x.view() + y.shape = shape + return y + return xp.reshape(x, shape, **kwargs) + +# The descending keyword is new in sort and argsort, and 'kind' replaced with +# 'stable' +def argsort( + x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, + **kwargs, +) -> ndarray: + # Note: this keyword argument is different, and the default is different. + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + if not descending: + res = xp.argsort(x, axis=axis, **kwargs) + else: + # As NumPy has no native descending sort, we imitate it here. Note that + # simply flipping the results of xp.argsort(x, ...) would not + # respect the relative order like it would in native descending sorts. + res = xp.flip( + xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs), + axis=axis, + ) + # Rely on flip()/argsort() to validate axis + normalised_axis = axis if axis >= 0 else x.ndim + axis + max_i = x.shape[normalised_axis] - 1 + res = max_i - res + return res + +def sort( + x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, + **kwargs, +) -> ndarray: + # Note: this keyword argument is different, and the default is different. + # We set it in kwargs like this because numpy.sort uses kind='quicksort' + # as the default whereas cupy.sort uses kind=None. + if stable: + kwargs['kind'] = "stable" + res = xp.sort(x, axis=axis, **kwargs) + if descending: + res = xp.flip(res, axis=axis) + return res + +# nonzero should error for zero-dimensional arrays +def nonzero(x: ndarray, /, xp, **kwargs) -> Tuple[ndarray, ...]: + if x.ndim == 0: + raise ValueError("nonzero() does not support zero-dimensional arrays") + return xp.nonzero(x, **kwargs) + +# sum() and prod() should always upcast when dtype=None +def sum( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs, +) -> ndarray: + # `xp.sum` already upcasts integers, but not floats or complexes + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def prod( + x: ndarray, + /, + xp, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs, +) -> ndarray: + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs) + +# ceil, floor, and trunc return integers for integer inputs + +def ceil(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.ceil(x, **kwargs) + +def floor(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.floor(x, **kwargs) + +def trunc(x: ndarray, /, xp, **kwargs) -> ndarray: + if xp.issubdtype(x.dtype, xp.integer): + return x + return xp.trunc(x, **kwargs) + +# linear algebra functions + +def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.matmul(x1, x2, **kwargs) + +# Unlike transpose, matrix_transpose only transposes the last two axes. +def matrix_transpose(x: ndarray, /, xp) -> ndarray: + if x.ndim < 2: + raise ValueError("x must be at least 2-dimensional for matrix_transpose") + return xp.swapaxes(x, -1, -2) + +def tensordot(x1: ndarray, + x2: ndarray, + /, + xp, + *, + axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, + **kwargs, +) -> ndarray: + return xp.tensordot(x1, x2, axes=axes, **kwargs) + +def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: + ndim = max(x1.ndim, x2.ndim) + x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) + x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) + if x1_shape[axis] != x2_shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + if hasattr(xp, 'broadcast_tensors'): + _broadcast = xp.broadcast_tensors + else: + _broadcast = xp.broadcast_arrays + + x1_, x2_ = _broadcast(x1, x2) + x1_ = xp.moveaxis(x1_, axis, -1) + x2_ = xp.moveaxis(x2_, axis, -1) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + +# isdtype is a new function in the 2022.12 array API specification. + +def isdtype( + dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp, + *, _tuple=True, # Disallow nested tuples +) -> bool: + """ + Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. + + Note that outside of this function, this compat library does not yet fully + support complex numbers. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + for more details + """ + if isinstance(kind, tuple) and _tuple: + return any(isdtype(dtype, k, xp, _tuple=False) for k in kind) + elif isinstance(kind, str): + if kind == 'bool': + return dtype == xp.bool_ + elif kind == 'signed integer': + return xp.issubdtype(dtype, xp.signedinteger) + elif kind == 'unsigned integer': + return xp.issubdtype(dtype, xp.unsignedinteger) + elif kind == 'integral': + return xp.issubdtype(dtype, xp.integer) + elif kind == 'real floating': + return xp.issubdtype(dtype, xp.floating) + elif kind == 'complex floating': + return xp.issubdtype(dtype, xp.complexfloating) + elif kind == 'numeric': + return xp.issubdtype(dtype, xp.number) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + # This will allow things that aren't required by the spec, like + # isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be + # more strict here to match the type annotation? Note that the + # numpy.array_api implementation will be very strict. + return dtype == kind + +__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', + 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like', + 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', + 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', + 'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort', + 'sort', 'nonzero', 'sum', 'prod', 'ceil', 'floor', 'trunc', + 'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b0aef3f7328eac55b059fb2729b163c327e669 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py @@ -0,0 +1,232 @@ +""" +Various helper functions which are not part of the spec. + +Functions which start with an underscore are for internal use only but helpers +that are in __all__ are intended as additional helper functions for use by end +users of the compat library. +""" +from __future__ import annotations + +import sys +import math + +def _is_numpy_array(x): + # Avoid importing NumPy if it isn't already + if 'numpy' not in sys.modules: + return False + + import numpy as np + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, (np.ndarray, np.generic)) + +def _is_cupy_array(x): + # Avoid importing NumPy if it isn't already + if 'cupy' not in sys.modules: + return False + + import cupy as cp + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, (cp.ndarray, cp.generic)) + +def _is_torch_array(x): + # Avoid importing torch if it isn't already + if 'torch' not in sys.modules: + return False + + import torch + + # TODO: Should we reject ndarray subclasses? + return isinstance(x, torch.Tensor) + +def is_array_api_obj(x): + """ + Check if x is an array API compatible array object. + """ + return _is_numpy_array(x) \ + or _is_cupy_array(x) \ + or _is_torch_array(x) \ + or hasattr(x, '__array_namespace__') + +def _check_api_version(api_version): + if api_version is not None and api_version != '2021.12': + raise ValueError("Only the 2021.12 version of the array API specification is currently supported") + +def array_namespace(*xs, api_version=None, _use_compat=True): + """ + Get the array API compatible namespace for the arrays `xs`. + + `xs` should contain one or more arrays. + + Typical usage is + + def your_function(x, y): + xp = array_api_compat.array_namespace(x, y) + # Now use xp as the array library namespace + return xp.mean(x, axis=0) + 2*xp.std(y, axis=0) + + api_version should be the newest version of the spec that you need support + for (currently the compat library wrapped APIs only support v2021.12). + """ + namespaces = set() + for x in xs: + if _is_numpy_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import numpy as numpy_namespace + namespaces.add(numpy_namespace) + else: + import numpy as np + namespaces.add(np) + elif _is_cupy_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import cupy as cupy_namespace + namespaces.add(cupy_namespace) + else: + import cupy as cp + namespaces.add(cp) + elif _is_torch_array(x): + _check_api_version(api_version) + if _use_compat: + from .. import torch as torch_namespace + namespaces.add(torch_namespace) + else: + import torch + namespaces.add(torch) + elif hasattr(x, '__array_namespace__'): + namespaces.add(x.__array_namespace__(api_version=api_version)) + else: + # TODO: Support Python scalars? + raise TypeError(f"{type(x).__name__} is not a supported array type") + + if not namespaces: + raise TypeError("Unrecognized array input") + + if len(namespaces) != 1: + raise TypeError(f"Multiple namespaces for array inputs: {namespaces}") + + xp, = namespaces + + return xp + +# backwards compatibility alias +get_namespace = array_namespace + +def _check_device(xp, device): + if xp == sys.modules.get('numpy'): + if device not in ["cpu", None]: + raise ValueError(f"Unsupported device for NumPy: {device!r}") + +# device() is not on numpy.ndarray and and to_device() is not on numpy.ndarray +# or cupy.ndarray. They are not included in array objects of this library +# because this library just reuses the respective ndarray classes without +# wrapping or subclassing them. These helper functions can be used instead of +# the wrapper functions for libraries that need to support both NumPy/CuPy and +# other libraries that use devices. +def device(x: "Array", /) -> "Device": + """ + Hardware device the array data resides on. + + Parameters + ---------- + x: array + array instance from NumPy or an array API compatible library. + + Returns + ------- + out: device + a ``device`` object (see the "Device Support" section of the array API specification). + """ + if _is_numpy_array(x): + return "cpu" + return x.device + +# Based on cupy.array_api.Array.to_device +def _cupy_to_device(x, device, /, stream=None): + import cupy as cp + from cupy.cuda import Device as _Device + from cupy.cuda import stream as stream_module + from cupy_backends.cuda.api import runtime + + if device == x.device: + return x + elif device == "cpu": + # allowing us to use `to_device(x, "cpu")` + # is useful for portable test swapping between + # host and device backends + return x.get() + elif not isinstance(device, _Device): + raise ValueError(f"Unsupported device {device!r}") + else: + # see cupy/cupy#5985 for the reason how we handle device/stream here + prev_device = runtime.getDevice() + prev_stream: stream_module.Stream = None + if stream is not None: + prev_stream = stream_module.get_current_stream() + # stream can be an int as specified in __dlpack__, or a CuPy stream + if isinstance(stream, int): + stream = cp.cuda.ExternalStream(stream) + elif isinstance(stream, cp.cuda.Stream): + pass + else: + raise ValueError('the input stream is not recognized') + stream.use() + try: + runtime.setDevice(device.id) + arr = x.copy() + finally: + runtime.setDevice(prev_device) + if stream is not None: + prev_stream.use() + return arr + +def _torch_to_device(x, device, /, stream=None): + if stream is not None: + raise NotImplementedError + return x.to(device) + +def to_device(x: "Array", device: "Device", /, *, stream: "Optional[Union[int, Any]]" = None) -> "Array": + """ + Copy the array from the device on which it currently resides to the specified ``device``. + + Parameters + ---------- + x: array + array instance from NumPy or an array API compatible library. + device: device + a ``device`` object (see the "Device Support" section of the array API specification). + stream: Optional[Union[int, Any]] + stream object to use during copy. In addition to the types supported in ``array.__dlpack__``, implementations may choose to support any library-specific stream object with the caveat that any code using such an object would not be portable. + + Returns + ------- + out: array + an array with the same data and data type as ``x`` and located on the specified ``device``. + + .. note:: + If ``stream`` is given, the copy operation should be enqueued on the provided ``stream``; otherwise, the copy operation should be enqueued on the default stream/queue. Whether the copy is performed synchronously or asynchronously is implementation-dependent. Accordingly, if synchronization is required to guarantee data safety, this must be clearly explained in a conforming library's documentation. + """ + if _is_numpy_array(x): + if stream is not None: + raise ValueError("The stream argument to to_device() is not supported") + if device == 'cpu': + return x + raise ValueError(f"Unsupported device {device!r}") + elif _is_cupy_array(x): + # cupy does not yet have to_device + return _cupy_to_device(x, device, stream=stream) + elif _is_torch_array(x): + return _torch_to_device(x, device, stream=stream) + return x.to_device(device, stream=stream) + +def size(x): + """ + Return the total number of elements of x + """ + if None in x.shape: + return None + return math.prod(x.shape) + +__all__ = ['is_array_api_obj', 'array_namespace', 'get_namespace', 'device', 'to_device', 'size'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5b55d1a7202870a3de71d797f0e62a86af3dc3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, NamedTuple +if TYPE_CHECKING: + from typing import Literal, Optional, Sequence, Tuple, Union + from ._typing import ndarray + +import numpy as np +if np.__version__[0] == "2": + from numpy.lib.array_utils import normalize_axis_tuple +else: + from numpy.core.numeric import normalize_axis_tuple + +from ._aliases import matmul, matrix_transpose, tensordot, vecdot, isdtype +from .._internal import get_xp + +# These are in the main NumPy namespace but not in numpy.linalg +def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray: + return xp.cross(x1, x2, axis=axis, **kwargs) + +def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: + return xp.outer(x1, x2, **kwargs) + +class EighResult(NamedTuple): + eigenvalues: ndarray + eigenvectors: ndarray + +class QRResult(NamedTuple): + Q: ndarray + R: ndarray + +class SlogdetResult(NamedTuple): + sign: ndarray + logabsdet: ndarray + +class SVDResult(NamedTuple): + U: ndarray + S: ndarray + Vh: ndarray + +# These functions are the same as their NumPy counterparts except they return +# a namedtuple. +def eigh(x: ndarray, /, xp, **kwargs) -> EighResult: + return EighResult(*xp.linalg.eigh(x, **kwargs)) + +def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced', + **kwargs) -> QRResult: + return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs)) + +def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult: + return SlogdetResult(*xp.linalg.slogdet(x, **kwargs)) + +def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult: + return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs)) + +# These functions have additional keyword arguments + +# The upper keyword argument is new from NumPy +def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray: + L = xp.linalg.cholesky(x, **kwargs) + if upper: + U = get_xp(xp)(matrix_transpose)(L) + if get_xp(xp)(isdtype)(U.dtype, 'complex floating'): + U = xp.conj(U) + return U + return L + +# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy. +# Note that it has a different semantic meaning from tol and rcond. +def matrix_rank(x: ndarray, + /, + xp, + *, + rtol: Optional[Union[float, ndarray]] = None, + **kwargs) -> ndarray: + # this is different from xp.linalg.matrix_rank, which supports 1 + # dimensional arrays. + if x.ndim < 2: + raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional") + S = xp.linalg.svd(x, compute_uv=False, **kwargs) + if rtol is None: + tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps + else: + # this is different from xp.linalg.matrix_rank, which does not + # multiply the tolerance by the largest singular value. + tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis] + return xp.count_nonzero(S > tol, axis=-1) + +def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray: + # this is different from xp.linalg.pinv, which does not multiply the + # default tolerance by max(M, N). + if rtol is None: + rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps + return xp.linalg.pinv(x, rcond=rtol, **kwargs) + +# These functions are new in the array API spec + +def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray: + return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + +# svdvals is not in NumPy (but it is in SciPy). It is equivalent to +# xp.linalg.svd(compute_uv=False). +def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]: + return xp.linalg.svd(x, compute_uv=False) + +def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray: + # xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or + # when axis=None and the input is 2-D, so to force a vector norm, we make + # it so the input is 1-D (for axis=None), or reshape so that norm is done + # on a single dimension. + if axis is None: + # Note: xp.linalg.norm() doesn't handle 0-D arrays + x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # xp.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + x = xp.transpose(x, newshape).reshape( + (xp.prod([x.shape[i] for i in axis], dtype=int), *[x.shape[i] for i in rest])) + _axis = 0 + else: + _axis = axis + + res = xp.linalg.norm(x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + shape = list(x.shape) + _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim) + for i in _axis: + shape[i] = 1 + res = xp.reshape(res, tuple(shape)) + + return res + +# xp.diagonal and xp.trace operate on the first two axes whereas these +# operates on the last two + +def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray: + return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs) + +def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray: + if dtype is None: + if x.dtype == xp.float32: + dtype = xp.float64 + elif x.dtype == xp.complex64: + dtype = xp.complex128 + return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs)) + +__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult', + 'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet', + 'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm', + 'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal', + 'trace'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..3f17806094baa04355abe360bb0fc7792ea6e1bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +__all__ = [ + "NestedSequence", + "SupportsBufferProtocol", +] + +from typing import ( + Any, + TypeVar, + Protocol, +) + +_T_co = TypeVar("_T_co", covariant=True) + +class NestedSequence(Protocol[_T_co]): + def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ... + def __len__(self, /) -> int: ... + +SupportsBufferProtocol = Any diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6613483d89bebd6fb1c0148131786904d4011048 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e1f66a5641983be247a34a269f5d7c8301f9d5e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a49f2f14bc26c3d6e9fdd115dee6499263d5cba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py @@ -0,0 +1,22 @@ +from numpy import * + +# from numpy import * doesn't overwrite these builtin names +from numpy import abs, max, min, round + +# These imports may overwrite names from the import * above. +from ._aliases import * + +# Don't know why, but we have to do an absolute import to import linalg. If we +# instead do +# +# from . import linalg +# +# It doesn't overwrite np.linalg from above. The import is generated +# dynamically so that the library can be vendored. +__import__(__package__ + '.linalg') + +from .linalg import matrix_transpose, vecdot + +from ..common._helpers import * + +__array_api_version__ = '2022.12' diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d9ac7e4bc870ac5ff0625aa42a9ec9bfe7e09d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0e1fd7025b77f65995435f26bd87a8dd8a9ad0d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58b20a9efc7e41f3e27597a323a6f592ca5af460 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42a3059361d64658fa49d9f63eab8cb591883af1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..e7d4a1be2f8001f804d338043d638c32c3958d3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from functools import partial + +from ..common import _aliases + +from .._internal import get_xp + +asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy') +asarray.__doc__ = _aliases._asarray.__doc__ +del partial + +import numpy as np +bool = np.bool_ + +# Basic renames +acos = np.arccos +acosh = np.arccosh +asin = np.arcsin +asinh = np.arcsinh +atan = np.arctan +atan2 = np.arctan2 +atanh = np.arctanh +bitwise_left_shift = np.left_shift +bitwise_invert = np.invert +bitwise_right_shift = np.right_shift +concat = np.concatenate +pow = np.power + +arange = get_xp(np)(_aliases.arange) +empty = get_xp(np)(_aliases.empty) +empty_like = get_xp(np)(_aliases.empty_like) +eye = get_xp(np)(_aliases.eye) +full = get_xp(np)(_aliases.full) +full_like = get_xp(np)(_aliases.full_like) +linspace = get_xp(np)(_aliases.linspace) +ones = get_xp(np)(_aliases.ones) +ones_like = get_xp(np)(_aliases.ones_like) +zeros = get_xp(np)(_aliases.zeros) +zeros_like = get_xp(np)(_aliases.zeros_like) +UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult) +UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult) +UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult) +unique_all = get_xp(np)(_aliases.unique_all) +unique_counts = get_xp(np)(_aliases.unique_counts) +unique_inverse = get_xp(np)(_aliases.unique_inverse) +unique_values = get_xp(np)(_aliases.unique_values) +astype = _aliases.astype +std = get_xp(np)(_aliases.std) +var = get_xp(np)(_aliases.var) +permute_dims = get_xp(np)(_aliases.permute_dims) +reshape = get_xp(np)(_aliases.reshape) +argsort = get_xp(np)(_aliases.argsort) +sort = get_xp(np)(_aliases.sort) +nonzero = get_xp(np)(_aliases.nonzero) +sum = get_xp(np)(_aliases.sum) +prod = get_xp(np)(_aliases.prod) +ceil = get_xp(np)(_aliases.ceil) +floor = get_xp(np)(_aliases.floor) +trunc = get_xp(np)(_aliases.trunc) +matmul = get_xp(np)(_aliases.matmul) +matrix_transpose = get_xp(np)(_aliases.matrix_transpose) +tensordot = get_xp(np)(_aliases.tensordot) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(np, 'vecdot'): + vecdot = np.vecdot +else: + vecdot = get_xp(np)(_aliases.vecdot) +if hasattr(np, 'isdtype'): + isdtype = np.isdtype +else: + isdtype = get_xp(np)(_aliases.isdtype) + +__all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos', + 'acosh', 'asin', 'asinh', 'atan', 'atan2', + 'atanh', 'bitwise_left_shift', 'bitwise_invert', + 'bitwise_right_shift', 'concat', 'pow'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ebb5abb987572be625ee864a37e61126d36d8b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +__all__ = [ + "ndarray", + "Device", + "Dtype", +] + +import sys +from typing import ( + Literal, + Union, + TYPE_CHECKING, +) + +from numpy import ( + ndarray, + dtype, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, +) + +Device = Literal["cpu"] +if TYPE_CHECKING or sys.version_info >= (3, 9): + Dtype = dtype[Union[ + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + ]] +else: + Dtype = dtype diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..39997df81e171ae44ae5a149e53c9ccacb118ae9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py @@ -0,0 +1,40 @@ +from numpy.linalg import * +from numpy.linalg import __all__ as linalg_all + +from ..common import _linalg +from .._internal import get_xp +from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) + +import numpy as np + +cross = get_xp(np)(_linalg.cross) +outer = get_xp(np)(_linalg.outer) +EighResult = _linalg.EighResult +QRResult = _linalg.QRResult +SlogdetResult = _linalg.SlogdetResult +SVDResult = _linalg.SVDResult +eigh = get_xp(np)(_linalg.eigh) +qr = get_xp(np)(_linalg.qr) +slogdet = get_xp(np)(_linalg.slogdet) +svd = get_xp(np)(_linalg.svd) +cholesky = get_xp(np)(_linalg.cholesky) +matrix_rank = get_xp(np)(_linalg.matrix_rank) +pinv = get_xp(np)(_linalg.pinv) +matrix_norm = get_xp(np)(_linalg.matrix_norm) +svdvals = get_xp(np)(_linalg.svdvals) +diagonal = get_xp(np)(_linalg.diagonal) +trace = get_xp(np)(_linalg.trace) + +# These functions are completely new here. If the library already has them +# (i.e., numpy 2.0), use the library version instead of our wrapper. +if hasattr(np.linalg, 'vector_norm'): + vector_norm = np.linalg.vector_norm +else: + vector_norm = get_xp(np)(_linalg.vector_norm) + +__all__ = linalg_all + _linalg.__all__ + +del get_xp +del np +del linalg_all +del _linalg diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae53ec52dee920bf7a7f2b4758f8ad787f7e6784 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py @@ -0,0 +1,22 @@ +from torch import * + +# Several names are not included in the above import * +import torch +for n in dir(torch): + if (n.startswith('_') + or n.endswith('_') + or 'cuda' in n + or 'cpu' in n + or 'backward' in n): + continue + exec(n + ' = torch.' + n) + +# These imports may overwrite names from the import * above. +from ._aliases import * + +# See the comment in the numpy __init__.py +__import__(__package__ + '.linalg') + +from ..common._helpers import * + +__array_api_version__ = '2022.12' diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..499b415f52e93648dfa01b636183d0874cf038c3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e99170088c591fb3daf81ceab9d712bfba5330b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a2cae320891db7b8c92aa96df8e7babf27fb99c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..929d31aa81ea9f019d0601c8c69213264efa75c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py @@ -0,0 +1,707 @@ +from __future__ import annotations + +from functools import wraps +from builtins import all as builtin_all, any as builtin_any + +from ..common._aliases import (UniqueAllResult, UniqueCountsResult, + UniqueInverseResult, + matrix_transpose as _aliases_matrix_transpose, + vecdot as _aliases_vecdot) +from .._internal import get_xp + +import torch + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import List, Optional, Sequence, Tuple, Union + from ..common._typing import Device + from torch import dtype as Dtype + + array = torch.Tensor + +_int_dtypes = { + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, +} + +_array_api_dtypes = { + torch.bool, + *_int_dtypes, + torch.float32, + torch.float64, + torch.complex64, + torch.complex128, +} + +_promotion_table = { + # bool + (torch.bool, torch.bool): torch.bool, + # ints + (torch.int8, torch.int8): torch.int8, + (torch.int8, torch.int16): torch.int16, + (torch.int8, torch.int32): torch.int32, + (torch.int8, torch.int64): torch.int64, + (torch.int16, torch.int8): torch.int16, + (torch.int16, torch.int16): torch.int16, + (torch.int16, torch.int32): torch.int32, + (torch.int16, torch.int64): torch.int64, + (torch.int32, torch.int8): torch.int32, + (torch.int32, torch.int16): torch.int32, + (torch.int32, torch.int32): torch.int32, + (torch.int32, torch.int64): torch.int64, + (torch.int64, torch.int8): torch.int64, + (torch.int64, torch.int16): torch.int64, + (torch.int64, torch.int32): torch.int64, + (torch.int64, torch.int64): torch.int64, + # uints + (torch.uint8, torch.uint8): torch.uint8, + # ints and uints (mixed sign) + (torch.int8, torch.uint8): torch.int16, + (torch.int16, torch.uint8): torch.int16, + (torch.int32, torch.uint8): torch.int32, + (torch.int64, torch.uint8): torch.int64, + (torch.uint8, torch.int8): torch.int16, + (torch.uint8, torch.int16): torch.int16, + (torch.uint8, torch.int32): torch.int32, + (torch.uint8, torch.int64): torch.int64, + # floats + (torch.float32, torch.float32): torch.float32, + (torch.float32, torch.float64): torch.float64, + (torch.float64, torch.float32): torch.float64, + (torch.float64, torch.float64): torch.float64, + # complexes + (torch.complex64, torch.complex64): torch.complex64, + (torch.complex64, torch.complex128): torch.complex128, + (torch.complex128, torch.complex64): torch.complex128, + (torch.complex128, torch.complex128): torch.complex128, + # Mixed float and complex + (torch.float32, torch.complex64): torch.complex64, + (torch.float32, torch.complex128): torch.complex128, + (torch.float64, torch.complex64): torch.complex128, + (torch.float64, torch.complex128): torch.complex128, +} + + +def _two_arg(f): + @wraps(f) + def _f(x1, x2, /, **kwargs): + x1, x2 = _fix_promotion(x1, x2) + return f(x1, x2, **kwargs) + if _f.__doc__ is None: + _f.__doc__ = f"""\ +Array API compatibility wrapper for torch.{f.__name__}. + +See the corresponding PyTorch documentation and/or the array API specification +for more details. + +""" + return _f + +def _fix_promotion(x1, x2, only_scalar=True): + if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes: + return x1, x2 + # If an argument is 0-D pytorch downcasts the other argument + if not only_scalar or x1.shape == (): + dtype = result_type(x1, x2) + x2 = x2.to(dtype) + if not only_scalar or x2.shape == (): + dtype = result_type(x1, x2) + x1 = x1.to(dtype) + return x1, x2 + +def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: + if len(arrays_and_dtypes) == 0: + raise TypeError("At least one array or dtype must be provided") + if len(arrays_and_dtypes) == 1: + x = arrays_and_dtypes[0] + if isinstance(x, torch.dtype): + return x + return x.dtype + if len(arrays_and_dtypes) > 2: + return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:])) + + x, y = arrays_and_dtypes + xdt = x.dtype if not isinstance(x, torch.dtype) else x + ydt = y.dtype if not isinstance(y, torch.dtype) else y + + if (xdt, ydt) in _promotion_table: + return _promotion_table[xdt, ydt] + + # This doesn't result_type(dtype, dtype) for non-array API dtypes + # because torch.result_type only accepts tensors. This does however, allow + # cross-kind promotion. + x = torch.tensor([], dtype=x) if isinstance(x, torch.dtype) else x + y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y + return torch.result_type(x, y) + +def can_cast(from_: Union[Dtype, array], to: Dtype, /) -> bool: + if not isinstance(from_, torch.dtype): + from_ = from_.dtype + return torch.can_cast(from_, to) + +# Basic renames +bitwise_invert = torch.bitwise_not +newaxis = None + +# Two-arg elementwise functions +# These require a wrapper to do the correct type promotion on 0-D tensors +add = _two_arg(torch.add) +atan2 = _two_arg(torch.atan2) +bitwise_and = _two_arg(torch.bitwise_and) +bitwise_left_shift = _two_arg(torch.bitwise_left_shift) +bitwise_or = _two_arg(torch.bitwise_or) +bitwise_right_shift = _two_arg(torch.bitwise_right_shift) +bitwise_xor = _two_arg(torch.bitwise_xor) +divide = _two_arg(torch.divide) +# Also a rename. torch.equal does not broadcast +equal = _two_arg(torch.eq) +floor_divide = _two_arg(torch.floor_divide) +greater = _two_arg(torch.greater) +greater_equal = _two_arg(torch.greater_equal) +less = _two_arg(torch.less) +less_equal = _two_arg(torch.less_equal) +logaddexp = _two_arg(torch.logaddexp) +# logical functions are not included here because they only accept bool in the +# spec, so type promotion is irrelevant. +multiply = _two_arg(torch.multiply) +not_equal = _two_arg(torch.not_equal) +pow = _two_arg(torch.pow) +remainder = _two_arg(torch.remainder) +subtract = _two_arg(torch.subtract) + +# These wrappers are mostly based on the fact that pytorch uses 'dim' instead +# of 'axis'. + +# torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745 +def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + return torch.amax(x, axis, keepdims=keepdims) + +def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + return torch.amin(x, axis, keepdims=keepdims) + +# torch.sort also returns a tuple +# https://github.com/pytorch/pytorch/issues/70921 +def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> array: + return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values + +def _normalize_axes(axis, ndim): + axes = [] + if ndim == 0 and axis: + # Better error message in this case + raise IndexError(f"Dimension out of range: {axis[0]}") + lower, upper = -ndim, ndim - 1 + for a in axis: + if a < lower or a > upper: + # Match torch error message (e.g., from sum()) + raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}") + if a < 0: + a = a + ndim + if a in axes: + # Use IndexError instead of RuntimeError, and "axis" instead of "dim" + raise IndexError(f"Axis {a} appears multiple times in the list of axes") + axes.append(a) + return sorted(axes) + +def _axis_none_keepdims(x, ndim, keepdims): + # Apply keepdims when axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + # Note that this is only valid for the axis=None case. + if keepdims: + for i in range(ndim): + x = torch.unsqueeze(x, 0) + return x + +def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs): + # Some reductions don't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + axes = _normalize_axes(axis, x.ndim) + for a in reversed(axes): + x = torch.movedim(x, a, -1) + x = torch.flatten(x, -len(axes)) + + out = f(x, -1, **kwargs) + + if keepdims: + for a in axes: + out = torch.unsqueeze(out, a) + return out + +def prod(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. Separate from the logic + # below because it still needs to upcast. + if axis == (): + if dtype is None: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: + return x.to(torch.int64) + return x.clone() + return x.to(dtype) + + # torch.prod doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.prod(x, dtype=dtype, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res + + return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + + +def sum(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[Dtype] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + + # https://github.com/pytorch/pytorch/issues/29137. + # Make sure it upcasts. + if axis == (): + if dtype is None: + # We can't upcast uint8 according to the spec because there is no + # torch.uint64, so at least upcast to int64 which is what sum does + # when axis=None. + if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: + return x.to(torch.int64) + return x.clone() + return x.to(dtype) + + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.sum(x, dtype=dtype, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res + + return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) + +def any(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + # torch.any doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.any(x, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.any doesn't return bool for uint8 + return torch.any(x, axis, keepdims=keepdims).to(torch.bool) + +def all(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + x = torch.asarray(x) + ndim = x.ndim + if axis == (): + return x.to(torch.bool) + # torch.all doesn't support multiple axes + # (https://github.com/pytorch/pytorch/issues/56586). + if isinstance(axis, tuple): + res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs) + return res.to(torch.bool) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.all(x, **kwargs) + res = _axis_none_keepdims(res, ndim, keepdims) + return res.to(torch.bool) + + # torch.all doesn't return bool for uint8 + return torch.all(x, axis, keepdims=keepdims).to(torch.bool) + +def mean(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs) -> array: + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.clone(x) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.mean(x, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.mean(x, axis, keepdims=keepdims, **kwargs) + +def std(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + if isinstance(correction, float): + _correction = int(correction) + if correction != _correction: + raise NotImplementedError("float correction in torch std() is not yet supported") + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.std(x, tuple(range(x.ndim)), correction=_correction, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.std(x, axis, correction=_correction, keepdims=keepdims, **kwargs) + +def var(x: array, + /, + *, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + correction: Union[int, float] = 0.0, + keepdims: bool = False, + **kwargs) -> array: + # Note, float correction is not supported + # https://github.com/pytorch/pytorch/issues/61492. We don't try to + # implement it here for now. + + # if isinstance(correction, float): + # correction = int(correction) + + # https://github.com/pytorch/pytorch/issues/29137 + if axis == (): + return torch.zeros_like(x) + if isinstance(axis, int): + axis = (axis,) + if axis is None: + # torch doesn't support keepdims with axis=None + # (https://github.com/pytorch/pytorch/issues/71209) + res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs) + res = _axis_none_keepdims(res, x.ndim, keepdims) + return res + return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs) + +# torch.concat doesn't support dim=None +# https://github.com/pytorch/pytorch/issues/70925 +def concat(arrays: Union[Tuple[array, ...], List[array]], + /, + *, + axis: Optional[int] = 0, + **kwargs) -> array: + if axis is None: + arrays = tuple(ar.flatten() for ar in arrays) + axis = 0 + return torch.concat(arrays, axis, **kwargs) + +# torch.squeeze only accepts int dim and doesn't require it +# https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was +# added at https://github.com/pytorch/pytorch/pull/89017. +def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: + if isinstance(axis, int): + axis = (axis,) + for a in axis: + if x.shape[a] != 1: + raise ValueError("squeezed dimensions must be equal to 1") + axes = _normalize_axes(axis, x.ndim) + # Remove this once pytorch 1.14 is released with the above PR #89017. + sequence = [a - i for i, a in enumerate(axes)] + for a in sequence: + x = torch.squeeze(x, a) + return x + +# torch.broadcast_to uses size instead of shape +def broadcast_to(x: array, /, shape: Tuple[int, ...], **kwargs) -> array: + return torch.broadcast_to(x, shape, **kwargs) + +# torch.permute uses dims instead of axes +def permute_dims(x: array, /, axes: Tuple[int, ...]) -> array: + return torch.permute(x, axes) + +# The axis parameter doesn't work for flip() and roll() +# https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't +# accept axis=None +def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: + if axis is None: + axis = tuple(range(x.ndim)) + # torch.flip doesn't accept dim as an int but the method does + # https://github.com/pytorch/pytorch/issues/18095 + return x.flip(axis, **kwargs) + +def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: + return torch.roll(x, shift, axis, **kwargs) + +def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]: + if x.ndim == 0: + raise ValueError("nonzero() does not support zero-dimensional arrays") + return torch.nonzero(x, as_tuple=True, **kwargs) + +def where(condition: array, x1: array, x2: array, /) -> array: + x1, x2 = _fix_promotion(x1, x2) + return torch.where(condition, x1, x2) + +# torch.reshape doesn't have the copy keyword +def reshape(x: array, + /, + shape: Tuple[int, ...], + copy: Optional[bool] = None, + **kwargs) -> array: + if copy is not None: + raise NotImplementedError("torch.reshape doesn't yet support the copy keyword") + return torch.reshape(x, shape, **kwargs) + +# torch.arange doesn't support returning empty arrays +# (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some +# keyword argument combinations +# (https://github.com/pytorch/pytorch/issues/70914) +def arange(start: Union[int, float], + /, + stop: Optional[Union[int, float]] = None, + step: Union[int, float] = 1, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if stop is None: + start, stop = 0, start + if step > 0 and stop <= start or step < 0 and stop >= start: + if dtype is None: + if builtin_all(isinstance(i, int) for i in [start, stop, step]): + dtype = torch.int64 + else: + dtype = torch.float32 + return torch.empty(0, dtype=dtype, device=device, **kwargs) + return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs) + +# torch.eye does not accept None as a default for the second argument and +# doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910) +def eye(n_rows: int, + n_cols: Optional[int] = None, + /, + *, + k: int = 0, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if n_cols is None: + n_cols = n_rows + z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs) + if abs(k) <= n_rows + n_cols: + z.diagonal(k).fill_(1) + return z + +# torch.linspace doesn't have the endpoint parameter +def linspace(start: Union[int, float], + stop: Union[int, float], + /, + num: int, + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + endpoint: bool = True, + **kwargs) -> array: + if not endpoint: + return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1] + return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs) + +# torch.full does not accept an int size +# https://github.com/pytorch/pytorch/issues/70906 +def full(shape: Union[int, Tuple[int, ...]], + fill_value: Union[bool, int, float, complex], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + if isinstance(shape, int): + shape = (shape,) + + return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) + +# ones, zeros, and empty do not accept shape as a keyword argument +def ones(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.ones(shape, dtype=dtype, device=device, **kwargs) + +def zeros(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.zeros(shape, dtype=dtype, device=device, **kwargs) + +def empty(shape: Union[int, Tuple[int, ...]], + *, + dtype: Optional[Dtype] = None, + device: Optional[Device] = None, + **kwargs) -> array: + return torch.empty(shape, dtype=dtype, device=device, **kwargs) + +# tril and triu do not call the keyword argument k + +def tril(x: array, /, *, k: int = 0) -> array: + return torch.tril(x, k) + +def triu(x: array, /, *, k: int = 0) -> array: + return torch.triu(x, k) + +# Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742 +def expand_dims(x: array, /, *, axis: int = 0) -> array: + return torch.unsqueeze(x, axis) + +def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: + return x.to(dtype, copy=copy) + +def broadcast_arrays(*arrays: array) -> List[array]: + shape = torch.broadcast_shapes(*[a.shape for a in arrays]) + return [torch.broadcast_to(a, shape) for a in arrays] + +# https://github.com/pytorch/pytorch/issues/70920 +def unique_all(x: array) -> UniqueAllResult: + # torch.unique doesn't support returning indices. + # https://github.com/pytorch/pytorch/issues/36748. The workaround + # suggested in that issue doesn't actually function correctly (it relies + # on non-deterministic behavior of scatter()). + raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)") + + # values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) + # # torch.unique incorrectly gives a 0 count for nan values. + # # https://github.com/pytorch/pytorch/issues/94106 + # counts[torch.isnan(values)] = 1 + # return UniqueAllResult(values, indices, inverse_indices, counts) + +def unique_counts(x: array) -> UniqueCountsResult: + values, counts = torch.unique(x, return_counts=True) + + # torch.unique incorrectly gives a 0 count for nan values. + # https://github.com/pytorch/pytorch/issues/94106 + counts[torch.isnan(values)] = 1 + return UniqueCountsResult(values, counts) + +def unique_inverse(x: array) -> UniqueInverseResult: + values, inverse = torch.unique(x, return_inverse=True) + return UniqueInverseResult(values, inverse) + +def unique_values(x: array) -> array: + return torch.unique(x) + +def matmul(x1: array, x2: array, /, **kwargs) -> array: + # torch.matmul doesn't type promote (but differently from _fix_promotion) + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.matmul(x1, x2, **kwargs) + +matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) +_vecdot = get_xp(torch)(_aliases_vecdot) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return _vecdot(x1, x2, axis=axis) + +# torch.tensordot uses dims instead of axes +def tensordot(x1: array, x2: array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs) -> array: + # Note: torch.tensordot fails with integer dtypes when there is only 1 + # element in the axis (https://github.com/pytorch/pytorch/issues/84530). + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.tensordot(x1, x2, dims=axes, **kwargs) + + +def isdtype( + dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], + *, _tuple=True, # Disallow nested tuples +) -> bool: + """ + Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. + + Note that outside of this function, this compat library does not yet fully + support complex numbers. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + for more details + """ + if isinstance(kind, tuple) and _tuple: + return builtin_any(isdtype(dtype, k, _tuple=False) for k in kind) + elif isinstance(kind, str): + if kind == 'bool': + return dtype == torch.bool + elif kind == 'signed integer': + return dtype in _int_dtypes and dtype.is_signed + elif kind == 'unsigned integer': + return dtype in _int_dtypes and not dtype.is_signed + elif kind == 'integral': + return dtype in _int_dtypes + elif kind == 'real floating': + return dtype.is_floating_point + elif kind == 'complex floating': + return dtype.is_complex + elif kind == 'numeric': + return isdtype(dtype, ('integral', 'real floating', 'complex floating')) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + return dtype == kind + +def take(x: array, indices: array, /, *, axis: Optional[int] = None, **kwargs) -> array: + if axis is None: + if x.ndim != 1: + raise ValueError("axis must be specified when ndim > 1") + axis = 0 + return torch.index_select(x, axis, indices, **kwargs) + +__all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'newaxis', + 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', + 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', + 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', + 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', + 'subtract', 'max', 'min', 'sort', 'prod', 'sum', 'any', 'all', + 'mean', 'std', 'var', 'concat', 'squeeze', 'broadcast_to', 'flip', 'roll', + 'nonzero', 'where', 'reshape', 'arange', 'eye', 'linspace', 'full', + 'ones', 'zeros', 'empty', 'tril', 'triu', 'expand_dims', 'astype', + 'broadcast_arrays', 'unique_all', 'unique_counts', + 'unique_inverse', 'unique_values', 'matmul', 'matrix_transpose', + 'vecdot', 'tensordot', 'isdtype', 'take'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..5266739106bd419515eacf17e5a9dc0ad519589d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + import torch + array = torch.Tensor + from torch import dtype as Dtype + from typing import Optional + +from torch.linalg import * + +# torch.linalg doesn't define __all__ +# from torch.linalg import __all__ as linalg_all +from torch import linalg as torch_linalg +linalg_all = [i for i in dir(torch_linalg) if not i.startswith('_')] + +# outer is implemented in torch but aren't in the linalg namespace +from torch import outer +from ._aliases import _fix_promotion, matrix_transpose, tensordot, sum + +# Note: torch.linalg.cross does not default to axis=-1 (it defaults to the +# first axis with size 3), see https://github.com/pytorch/pytorch/issues/58743 +def cross(x1: array, x2: array, /, *, axis: int = -1) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch_linalg.cross(x1, x2, dim=axis) + +def vecdot(x1: array, x2: array, /, *, axis: int = -1, **kwargs) -> array: + from ._aliases import isdtype + + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + + # torch.linalg.vecdot doesn't support integer dtypes + if isdtype(x1.dtype, 'integral') or isdtype(x2.dtype, 'integral'): + if kwargs: + raise RuntimeError("vecdot kwargs not supported for integral dtypes") + ndim = max(x1.ndim, x2.ndim) + x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) + x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) + if x1_shape[axis] != x2_shape[axis]: + raise ValueError("x1 and x2 must have the same size along the given axis") + + x1_, x2_ = torch.broadcast_tensors(x1, x2) + x1_ = torch.moveaxis(x1_, axis, -1) + x2_ = torch.moveaxis(x2_, axis, -1) + + res = x1_[..., None, :] @ x2_[..., None] + return res[..., 0, 0] + return torch.linalg.vecdot(x1, x2, dim=axis, **kwargs) + +def solve(x1: array, x2: array, /, **kwargs) -> array: + x1, x2 = _fix_promotion(x1, x2, only_scalar=False) + return torch.linalg.solve(x1, x2, **kwargs) + +# torch.trace doesn't support the offset argument and doesn't support stacking +def trace(x: array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> array: + # Use our wrapped sum to make sure it does upcasting correctly + return sum(torch.diagonal(x, offset=offset, dim1=-2, dim2=-1), axis=-1, dtype=dtype) + +__all__ = linalg_all + ['outer', 'trace', 'matrix_transpose', 'tensordot', + 'vecdot', 'solve'] + +del linalg_all diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acec1566eefecab9fdf589deafc647342412ad49 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4091f491de927b529a820b8e69df49507c47ccb5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5f97bd6e6beb057e3d6dc34081c2febf055e264 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4697af93eb5764bc11c78bdcdc00758839e7009 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_array_api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3908d94fbd112cd149335f24f069f7af928f5dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e2f8ade7ea5889a5bdbf7fc3c4ed1991cbe9b5c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b94b3b955e3cfbdf51e0abdea864a88b2400546 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..88db113d6d5a35c96ecc0a6a36ab42d74be49153 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py @@ -0,0 +1,32 @@ +import sys +from scipy._lib._testutils import _parse_size, _get_mem_available +import pytest + + +def test__parse_size(): + expected = { + '12': 12e6, + '12 b': 12, + '12k': 12e3, + ' 12 M ': 12e6, + ' 12 G ': 12e9, + ' 12Tb ': 12e12, + '12 Mib ': 12 * 1024.0**2, + '12Tib': 12 * 1024.0**4, + } + + for inp, outp in sorted(expected.items()): + if outp is None: + with pytest.raises(ValueError): + _parse_size(inp) + else: + assert _parse_size(inp) == outp + + +def test__mem_available(): + # May return None on non-Linux platforms + available = _get_mem_available() + if sys.platform.startswith('linux'): + assert available >= 0 + else: + assert available is None or available >= 0 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py new file mode 100644 index 0000000000000000000000000000000000000000..691bf3380dd530e27c957f996283a25cd0585982 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py @@ -0,0 +1,408 @@ +from multiprocessing import Pool +from multiprocessing.pool import Pool as PWL +import re +import math +from fractions import Fraction + +import numpy as np +from numpy.testing import assert_equal, assert_ +import pytest +from pytest import raises as assert_raises +import hypothesis.extra.numpy as npst +from hypothesis import given, strategies, reproduce_failure # noqa: F401 +from scipy.conftest import array_api_compatible + +from scipy._lib._array_api import xp_assert_equal +from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper, + getfullargspec_no_self, FullArgSpec, + rng_integers, _validate_int, _rename_parameter, + _contains_nan, _rng_html_rewrite, _lazywhere) + + +def test__aligned_zeros(): + niter = 10 + + def check(shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + # Size-0 arrays get invalid flags on NumPy 1.5 + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + # try various alignments + for align in [1, 2, 3, 4, 8, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in [np.uint8, np.float64]: + for shape in [n, (1, 2, 3, n)]: + for j in range(niter): + check(shape, dtype, order, align) + + +def test_check_random_state(): + # If seed is None, return the RandomState singleton used by np.random. + # If seed is an int, return a new RandomState instance seeded with seed. + # If seed is already a RandomState instance, return it. + # Otherwise raise ValueError. + rsi = check_random_state(1) + assert_equal(type(rsi), np.random.RandomState) + rsi = check_random_state(rsi) + assert_equal(type(rsi), np.random.RandomState) + rsi = check_random_state(None) + assert_equal(type(rsi), np.random.RandomState) + assert_raises(ValueError, check_random_state, 'a') + rg = np.random.Generator(np.random.PCG64()) + rsi = check_random_state(rg) + assert_equal(type(rsi), np.random.Generator) + + +def test_getfullargspec_no_self(): + p = MapWrapper(1) + argspec = getfullargspec_no_self(p.__init__) + assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [], + None, {})) + argspec = getfullargspec_no_self(p.__call__) + assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None, + [], None, {})) + + class _rv_generic: + def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs): + return None + + rv_obj = _rv_generic() + argspec = getfullargspec_no_self(rv_obj._rvs) + assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs', + (2, 3), ['size'], {'size': None}, {})) + + +def test_mapwrapper_serial(): + in_arg = np.arange(10.) + out_arg = np.sin(in_arg) + + p = MapWrapper(1) + assert_(p._mapfunc is map) + assert_(p.pool is None) + assert_(p._own_pool is False) + out = list(p(np.sin, in_arg)) + assert_equal(out, out_arg) + + with assert_raises(RuntimeError): + p = MapWrapper(0) + + +def test_pool(): + with Pool(2) as p: + p.map(math.sin, [1, 2, 3, 4]) + + +def test_mapwrapper_parallel(): + in_arg = np.arange(10.) + out_arg = np.sin(in_arg) + + with MapWrapper(2) as p: + out = p(np.sin, in_arg) + assert_equal(list(out), out_arg) + + assert_(p._own_pool is True) + assert_(isinstance(p.pool, PWL)) + assert_(p._mapfunc is not None) + + # the context manager should've closed the internal pool + # check that it has by asking it to calculate again. + with assert_raises(Exception) as excinfo: + p(np.sin, in_arg) + + assert_(excinfo.type is ValueError) + + # can also set a PoolWrapper up with a map-like callable instance + with Pool(2) as p: + q = MapWrapper(p.map) + + assert_(q._own_pool is False) + q.close() + + # closing the PoolWrapper shouldn't close the internal pool + # because it didn't create it + out = p.map(np.sin, in_arg) + assert_equal(list(out), out_arg) + + +def test_rng_integers(): + rng = np.random.RandomState() + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + # now try with np.random.Generator + try: + rng = np.random.default_rng() + except AttributeError: + return + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + +class TestValidateInt: + + @pytest.mark.parametrize('n', [4, np.uint8(4), np.int16(4), np.array(4)]) + def test_validate_int(self, n): + n = _validate_int(n, 'n') + assert n == 4 + + @pytest.mark.parametrize('n', [4.0, np.array([4]), Fraction(4, 1)]) + def test_validate_int_bad(self, n): + with pytest.raises(TypeError, match='n must be an integer'): + _validate_int(n, 'n') + + def test_validate_int_below_min(self): + with pytest.raises(ValueError, match='n must be an integer not ' + 'less than 0'): + _validate_int(-1, 'n', 0) + + +class TestRenameParameter: + # check that wrapper `_rename_parameter` for backward-compatible + # keyword renaming works correctly + + # Example method/function that still accepts keyword `old` + @_rename_parameter("old", "new") + def old_keyword_still_accepted(self, new): + return new + + # Example method/function for which keyword `old` is deprecated + @_rename_parameter("old", "new", dep_version="1.9.0") + def old_keyword_deprecated(self, new): + return new + + def test_old_keyword_still_accepted(self): + # positional argument and both keyword work identically + res1 = self.old_keyword_still_accepted(10) + res2 = self.old_keyword_still_accepted(new=10) + res3 = self.old_keyword_still_accepted(old=10) + assert res1 == res2 == res3 == 10 + + # unexpected keyword raises an error + message = re.escape("old_keyword_still_accepted() got an unexpected") + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(unexpected=10) + + # multiple values for the same parameter raises an error + message = re.escape("old_keyword_still_accepted() got multiple") + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(10, new=10) + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(10, old=10) + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(new=10, old=10) + + def test_old_keyword_deprecated(self): + # positional argument and both keyword work identically, + # but use of old keyword results in DeprecationWarning + dep_msg = "Use of keyword argument `old` is deprecated" + res1 = self.old_keyword_deprecated(10) + res2 = self.old_keyword_deprecated(new=10) + with pytest.warns(DeprecationWarning, match=dep_msg): + res3 = self.old_keyword_deprecated(old=10) + assert res1 == res2 == res3 == 10 + + # unexpected keyword raises an error + message = re.escape("old_keyword_deprecated() got an unexpected") + with pytest.raises(TypeError, match=message): + self.old_keyword_deprecated(unexpected=10) + + # multiple values for the same parameter raises an error and, + # if old keyword is used, results in DeprecationWarning + message = re.escape("old_keyword_deprecated() got multiple") + with pytest.raises(TypeError, match=message): + self.old_keyword_deprecated(10, new=10) + with pytest.raises(TypeError, match=message), \ + pytest.warns(DeprecationWarning, match=dep_msg): + self.old_keyword_deprecated(10, old=10) + with pytest.raises(TypeError, match=message), \ + pytest.warns(DeprecationWarning, match=dep_msg): + self.old_keyword_deprecated(new=10, old=10) + + +class TestContainsNaNTest: + + def test_policy(self): + data = np.array([1, 2, 3, np.nan]) + + contains_nan, nan_policy = _contains_nan(data, nan_policy="propagate") + assert contains_nan + assert nan_policy == "propagate" + + contains_nan, nan_policy = _contains_nan(data, nan_policy="omit") + assert contains_nan + assert nan_policy == "omit" + + msg = "The input contains nan values" + with pytest.raises(ValueError, match=msg): + _contains_nan(data, nan_policy="raise") + + msg = "nan_policy must be one of" + with pytest.raises(ValueError, match=msg): + _contains_nan(data, nan_policy="nan") + + def test_contains_nan_1d(self): + data1 = np.array([1, 2, 3]) + assert not _contains_nan(data1)[0] + + data2 = np.array([1, 2, 3, np.nan]) + assert _contains_nan(data2)[0] + + data3 = np.array([np.nan, 2, 3, np.nan]) + assert _contains_nan(data3)[0] + + data4 = np.array([1, 2, "3", np.nan]) # converted to string "nan" + assert not _contains_nan(data4)[0] + + data5 = np.array([1, 2, "3", np.nan], dtype='object') + assert _contains_nan(data5)[0] + + def test_contains_nan_2d(self): + data1 = np.array([[1, 2], [3, 4]]) + assert not _contains_nan(data1)[0] + + data2 = np.array([[1, 2], [3, np.nan]]) + assert _contains_nan(data2)[0] + + data3 = np.array([["1", 2], [3, np.nan]]) # converted to string "nan" + assert not _contains_nan(data3)[0] + + data4 = np.array([["1", 2], [3, np.nan]], dtype='object') + assert _contains_nan(data4)[0] + + +def test__rng_html_rewrite(): + def mock_str(): + lines = [ + 'np.random.default_rng(8989843)', + 'np.random.default_rng(seed)', + 'np.random.default_rng(0x9a71b21474694f919882289dc1559ca)', + ' bob ', + ] + return lines + + res = _rng_html_rewrite(mock_str)() + ref = [ + 'np.random.default_rng()', + 'np.random.default_rng(seed)', + 'np.random.default_rng()', + ' bob ', + ] + + assert res == ref + + +class TestLazywhere: + n_arrays = strategies.integers(min_value=1, max_value=3) + rng_seed = strategies.integers(min_value=1000000000, max_value=9999999999) + dtype = strategies.sampled_from((np.float32, np.float64)) + p = strategies.floats(min_value=0, max_value=1) + data = strategies.data() + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') # overflows, etc. + @array_api_compatible + @given(n_arrays=n_arrays, rng_seed=rng_seed, dtype=dtype, p=p, data=data) + def test_basic(self, n_arrays, rng_seed, dtype, p, data, xp): + mbs = npst.mutually_broadcastable_shapes(num_shapes=n_arrays+1, + min_side=0) + input_shapes, result_shape = data.draw(mbs) + cond_shape, *shapes = input_shapes + fillvalue = xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=tuple()))) + arrays = [xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=shape))) + for shape in shapes] + + def f(*args): + return sum(arg for arg in args) + + def f2(*args): + return sum(arg for arg in args) / 2 + + rng = np.random.default_rng(rng_seed) + cond = xp.asarray(rng.random(size=cond_shape) > p) + + res1 = _lazywhere(cond, arrays, f, fillvalue) + res2 = _lazywhere(cond, arrays, f, f2=f2) + + # Ensure arrays are at least 1d to follow sane type promotion rules. + if xp == np: + cond, fillvalue, *arrays = np.atleast_1d(cond, fillvalue, *arrays) + + ref1 = xp.where(cond, f(*arrays), fillvalue) + ref2 = xp.where(cond, f(*arrays), f2(*arrays)) + + if xp == np: + ref1 = ref1.reshape(result_shape) + ref2 = ref2.reshape(result_shape) + res1 = xp.asarray(res1)[()] + res2 = xp.asarray(res2)[()] + + isinstance(res1, type(xp.asarray([]))) + xp_assert_equal(res1, ref1) + assert_equal(res1.shape, ref1.shape) + assert_equal(res1.dtype, ref1.dtype) + + isinstance(res2, type(xp.asarray([]))) + xp_assert_equal(res2, ref2) + assert_equal(res2.shape, ref2.shape) + assert_equal(res2.dtype, ref2.dtype) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py new file mode 100644 index 0000000000000000000000000000000000000000..f19ca377129b925cad732dd25bf3089c646f923f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py @@ -0,0 +1,162 @@ +import pytest +import pickle +from numpy.testing import assert_equal +from scipy._lib._bunch import _make_tuple_bunch + + +# `Result` is defined at the top level of the module so it can be +# used to test pickling. +Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta']) + + +class TestMakeTupleBunch: + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Tests with Result + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + def setup_method(self): + # Set up an instance of Result. + self.result = Result(x=1, y=2, z=3, w=99, beta=0.5) + + def test_attribute_access(self): + assert_equal(self.result.x, 1) + assert_equal(self.result.y, 2) + assert_equal(self.result.z, 3) + assert_equal(self.result.w, 99) + assert_equal(self.result.beta, 0.5) + + def test_indexing(self): + assert_equal(self.result[0], 1) + assert_equal(self.result[1], 2) + assert_equal(self.result[2], 3) + assert_equal(self.result[-1], 3) + with pytest.raises(IndexError, match='index out of range'): + self.result[3] + + def test_unpacking(self): + x0, y0, z0 = self.result + assert_equal((x0, y0, z0), (1, 2, 3)) + assert_equal(self.result, (1, 2, 3)) + + def test_slice(self): + assert_equal(self.result[1:], (2, 3)) + assert_equal(self.result[::2], (1, 3)) + assert_equal(self.result[::-1], (3, 2, 1)) + + def test_len(self): + assert_equal(len(self.result), 3) + + def test_repr(self): + s = repr(self.result) + assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)') + + def test_hash(self): + assert_equal(hash(self.result), hash((1, 2, 3))) + + def test_pickle(self): + s = pickle.dumps(self.result) + obj = pickle.loads(s) + assert isinstance(obj, Result) + assert_equal(obj.x, self.result.x) + assert_equal(obj.y, self.result.y) + assert_equal(obj.z, self.result.z) + assert_equal(obj.w, self.result.w) + assert_equal(obj.beta, self.result.beta) + + def test_read_only_existing(self): + with pytest.raises(AttributeError, match="can't set attribute"): + self.result.x = -1 + + def test_read_only_new(self): + self.result.plate_of_shrimp = "lattice of coincidence" + assert self.result.plate_of_shrimp == "lattice of coincidence" + + def test_constructor_missing_parameter(self): + with pytest.raises(TypeError, match='missing'): + # `w` is missing. + Result(x=1, y=2, z=3, beta=0.75) + + def test_constructor_incorrect_parameter(self): + with pytest.raises(TypeError, match='unexpected'): + # `foo` is not an existing field. + Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999) + + def test_module(self): + m = 'scipy._lib.tests.test_bunch' + assert_equal(Result.__module__, m) + assert_equal(self.result.__module__, m) + + def test_extra_fields_per_instance(self): + # This test exists to ensure that instances of the same class + # store their own values for the extra fields. That is, the values + # are stored per instance and not in the class. + result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0) + result2 = Result(x=4, y=5, z=6, w=99, beta=1.0) + assert_equal(result1.w, -1) + assert_equal(result1.beta, 0.0) + # The rest of these checks aren't essential, but let's check + # them anyway. + assert_equal(result1[:], (1, 2, 3)) + assert_equal(result2.w, 99) + assert_equal(result2.beta, 1.0) + assert_equal(result2[:], (4, 5, 6)) + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Other tests + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + def test_extra_field_names_is_optional(self): + Square = _make_tuple_bunch('Square', ['width', 'height']) + sq = Square(width=1, height=2) + assert_equal(sq.width, 1) + assert_equal(sq.height, 2) + s = repr(sq) + assert_equal(s, 'Square(width=1, height=2)') + + def test_tuple_like(self): + Tup = _make_tuple_bunch('Tup', ['a', 'b']) + tu = Tup(a=1, b=2) + assert isinstance(tu, tuple) + assert isinstance(tu + (1,), tuple) + + def test_explicit_module(self): + m = 'some.module.name' + Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m) + foo = Foo(x=1, a=355, b=113) + assert_equal(Foo.__module__, m) + assert_equal(foo.__module__, m) + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Argument validation + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + @pytest.mark.parametrize('args', [('123', ['a'], ['b']), + ('Foo', ['-3'], ['x']), + ('Foo', ['a'], ['+-*/'])]) + def test_identifiers_not_allowed(self, args): + with pytest.raises(ValueError, match='identifiers'): + _make_tuple_bunch(*args) + + @pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']), + ('Foo', ['a', 'b'], ['b', 'x'])]) + def test_repeated_field_names(self, args): + with pytest.raises(ValueError, match='Duplicate'): + _make_tuple_bunch(*args) + + @pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']), + ('Foo', ['a'], ['_x'])]) + def test_leading_underscore_not_allowed(self, args): + with pytest.raises(ValueError, match='underscore'): + _make_tuple_bunch(*args) + + @pytest.mark.parametrize('args', [('Foo', ['def'], ['x']), + ('Foo', ['a'], ['or']), + ('and', ['a'], ['x'])]) + def test_keyword_not_allowed_in_fields(self, args): + with pytest.raises(ValueError, match='keyword'): + _make_tuple_bunch(*args) + + def test_at_least_one_field_name_required(self): + with pytest.raises(ValueError, match='at least one name'): + _make_tuple_bunch('Qwerty', [], ['a', 'b']) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..7910bd56f6b0c37276c9dff5a15cd3ddf755840e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py @@ -0,0 +1,10 @@ +import pytest + + +def test_cython_api_deprecation(): + match = ("`scipy._lib._test_deprecation_def.foo_deprecated` " + "is deprecated, use `foo` instead!\n" + "Deprecated in Scipy 42.0.0") + with pytest.warns(DeprecationWarning, match=match): + from .. import _test_deprecation_call + assert _test_deprecation_call.call() == (1, 1) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b429a8db0d76c419f031d007748291fd04d86bca Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8606713f6bbd8d22e6e2fd0ca823f41a25c9067b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac13bd648432834e9f7164093b8a87fc5e36bbba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c71180246086491c6d5413c8f4c9a1c03b67df0f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3496ed24d9f17d42893cecae2e6464f88c887b4c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3a9e486604e51525c945b75e0514f976a2d74d0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d01b9c63f9804fa7e7f61a91743348f809c7ff53 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f01485294c5f9acde614e45b87e93f9dc3bfc3a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..660bbb41b7fad43ed945dc701693451ceb60166c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13f3e1491a876bbf59d7ea10ad29c1f9b5996a2ab99216f31d5bfcd659012c1e +size 34462 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_18_data.npz b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_18_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..0b3d569a1a65e9b5ff153ae4121a6a5a69409f7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_18_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59f839467f2752b7df6fb6d4094396edd32a5929b764f7ffa1e6666431e6cac6 +size 161487 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8bdcd73a2fd80b87d9c570b40bddbe318a8d60 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/__init__.py @@ -0,0 +1,643 @@ +""" +.. _statsrefmanual: + +========================================== +Statistical functions (:mod:`scipy.stats`) +========================================== + +.. currentmodule:: scipy.stats + +This module contains a large number of probability distributions, +summary and frequency statistics, correlation functions and statistical +tests, masked statistics, kernel density estimation, quasi-Monte Carlo +functionality, and more. + +Statistics is a very large area, and there are topics that are out of scope +for SciPy and are covered by other packages. Some of the most important ones +are: + +- `statsmodels `__: + regression, linear models, time series analysis, extensions to topics + also covered by ``scipy.stats``. +- `Pandas `__: tabular data, time series + functionality, interfaces to other statistical languages. +- `PyMC `__: Bayesian statistical + modeling, probabilistic machine learning. +- `scikit-learn `__: classification, regression, + model selection. +- `Seaborn `__: statistical data visualization. +- `rpy2 `__: Python to R bridge. + + +Probability distributions +========================= + +Each univariate distribution is an instance of a subclass of `rv_continuous` +(`rv_discrete` for discrete distributions): + +.. autosummary:: + :toctree: generated/ + + rv_continuous + rv_discrete + rv_histogram + +Continuous distributions +------------------------ + +.. autosummary:: + :toctree: generated/ + + alpha -- Alpha + anglit -- Anglit + arcsine -- Arcsine + argus -- Argus + beta -- Beta + betaprime -- Beta Prime + bradford -- Bradford + burr -- Burr (Type III) + burr12 -- Burr (Type XII) + cauchy -- Cauchy + chi -- Chi + chi2 -- Chi-squared + cosine -- Cosine + crystalball -- Crystalball + dgamma -- Double Gamma + dweibull -- Double Weibull + erlang -- Erlang + expon -- Exponential + exponnorm -- Exponentially Modified Normal + exponweib -- Exponentiated Weibull + exponpow -- Exponential Power + f -- F (Snecdor F) + fatiguelife -- Fatigue Life (Birnbaum-Saunders) + fisk -- Fisk + foldcauchy -- Folded Cauchy + foldnorm -- Folded Normal + genlogistic -- Generalized Logistic + gennorm -- Generalized normal + genpareto -- Generalized Pareto + genexpon -- Generalized Exponential + genextreme -- Generalized Extreme Value + gausshyper -- Gauss Hypergeometric + gamma -- Gamma + gengamma -- Generalized gamma + genhalflogistic -- Generalized Half Logistic + genhyperbolic -- Generalized Hyperbolic + geninvgauss -- Generalized Inverse Gaussian + gibrat -- Gibrat + gompertz -- Gompertz (Truncated Gumbel) + gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I + gumbel_l -- Left Sided Gumbel, etc. + halfcauchy -- Half Cauchy + halflogistic -- Half Logistic + halfnorm -- Half Normal + halfgennorm -- Generalized Half Normal + hypsecant -- Hyperbolic Secant + invgamma -- Inverse Gamma + invgauss -- Inverse Gaussian + invweibull -- Inverse Weibull + jf_skew_t -- Jones and Faddy Skew-T + johnsonsb -- Johnson SB + johnsonsu -- Johnson SU + kappa4 -- Kappa 4 parameter + kappa3 -- Kappa 3 parameter + ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic + kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic + kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic. + laplace -- Laplace + laplace_asymmetric -- Asymmetric Laplace + levy -- Levy + levy_l + levy_stable + logistic -- Logistic + loggamma -- Log-Gamma + loglaplace -- Log-Laplace (Log Double Exponential) + lognorm -- Log-Normal + loguniform -- Log-Uniform + lomax -- Lomax (Pareto of the second kind) + maxwell -- Maxwell + mielke -- Mielke's Beta-Kappa + moyal -- Moyal + nakagami -- Nakagami + ncx2 -- Non-central chi-squared + ncf -- Non-central F + nct -- Non-central Student's T + norm -- Normal (Gaussian) + norminvgauss -- Normal Inverse Gaussian + pareto -- Pareto + pearson3 -- Pearson type III + powerlaw -- Power-function + powerlognorm -- Power log normal + powernorm -- Power normal + rdist -- R-distribution + rayleigh -- Rayleigh + rel_breitwigner -- Relativistic Breit-Wigner + rice -- Rice + recipinvgauss -- Reciprocal Inverse Gaussian + semicircular -- Semicircular + skewcauchy -- Skew Cauchy + skewnorm -- Skew normal + studentized_range -- Studentized Range + t -- Student's T + trapezoid -- Trapezoidal + triang -- Triangular + truncexpon -- Truncated Exponential + truncnorm -- Truncated Normal + truncpareto -- Truncated Pareto + truncweibull_min -- Truncated minimum Weibull distribution + tukeylambda -- Tukey-Lambda + uniform -- Uniform + vonmises -- Von-Mises (Circular) + vonmises_line -- Von-Mises (Line) + wald -- Wald + weibull_min -- Minimum Weibull (see Frechet) + weibull_max -- Maximum Weibull (see Frechet) + wrapcauchy -- Wrapped Cauchy + +The ``fit`` method of the univariate continuous distributions uses +maximum likelihood estimation to fit the distribution to a data set. +The ``fit`` method can accept regular data or *censored data*. +Censored data is represented with instances of the `CensoredData` +class. + +.. autosummary:: + :toctree: generated/ + + CensoredData + + +Multivariate distributions +-------------------------- + +.. autosummary:: + :toctree: generated/ + + multivariate_normal -- Multivariate normal distribution + matrix_normal -- Matrix normal distribution + dirichlet -- Dirichlet + dirichlet_multinomial -- Dirichlet multinomial distribution + wishart -- Wishart + invwishart -- Inverse Wishart + multinomial -- Multinomial distribution + special_ortho_group -- SO(N) group + ortho_group -- O(N) group + unitary_group -- U(N) group + random_correlation -- random correlation matrices + multivariate_t -- Multivariate t-distribution + multivariate_hypergeom -- Multivariate hypergeometric distribution + random_table -- Distribution of random tables with given marginals + uniform_direction -- Uniform distribution on S(N-1) + vonmises_fisher -- Von Mises-Fisher distribution + +`scipy.stats.multivariate_normal` methods accept instances +of the following class to represent the covariance. + +.. autosummary:: + :toctree: generated/ + + Covariance -- Representation of a covariance matrix + + +Discrete distributions +---------------------- + +.. autosummary:: + :toctree: generated/ + + bernoulli -- Bernoulli + betabinom -- Beta-Binomial + betanbinom -- Beta-Negative Binomial + binom -- Binomial + boltzmann -- Boltzmann (Truncated Discrete Exponential) + dlaplace -- Discrete Laplacian + geom -- Geometric + hypergeom -- Hypergeometric + logser -- Logarithmic (Log-Series, Series) + nbinom -- Negative Binomial + nchypergeom_fisher -- Fisher's Noncentral Hypergeometric + nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric + nhypergeom -- Negative Hypergeometric + planck -- Planck (Discrete Exponential) + poisson -- Poisson + randint -- Discrete Uniform + skellam -- Skellam + yulesimon -- Yule-Simon + zipf -- Zipf (Zeta) + zipfian -- Zipfian + + +An overview of statistical functions is given below. Many of these functions +have a similar version in `scipy.stats.mstats` which work for masked arrays. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe -- Descriptive statistics + gmean -- Geometric mean + hmean -- Harmonic mean + pmean -- Power mean + kurtosis -- Fisher or Pearson kurtosis + mode -- Modal value + moment -- Central moment + expectile -- Expectile + skew -- Skewness + kstat -- + kstatvar -- + tmean -- Truncated arithmetic mean + tvar -- Truncated variance + tmin -- + tmax -- + tstd -- + tsem -- + variation -- Coefficient of variation + find_repeats + rankdata + tiecorrect + trim_mean + gstd -- Geometric Standard Deviation + iqr + sem + bayes_mvs + mvsdist + entropy + differential_entropy + median_abs_deviation + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + cumfreq + percentileofscore + scoreatpercentile + relfreq + +.. autosummary:: + :toctree: generated/ + + binned_statistic -- Compute a binned statistic for a set of data. + binned_statistic_2d -- Compute a 2-D binned statistic for a set of data. + binned_statistic_dd -- Compute a d-D binned statistic for a set of data. + +Hypothesis Tests and related functions +====================================== +SciPy has many functions for performing hypothesis tests that return a +test statistic and a p-value, and several of them return confidence intervals +and/or other related information. + +The headings below are based on common uses of the functions within, but due to +the wide variety of statistical procedures, any attempt at coarse-grained +categorization will be imperfect. Also, note that tests within the same heading +are not interchangeable in general (e.g. many have different distributional +assumptions). + +One Sample Tests / Paired Sample Tests +-------------------------------------- +One sample tests are typically used to assess whether a single sample was +drawn from a specified distribution or a distribution with specified properties +(e.g. zero mean). + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + binomtest + quantile_test + skewtest + kurtosistest + normaltest + jarque_bera + shapiro + anderson + cramervonmises + ks_1samp + goodness_of_fit + chisquare + power_divergence + +Paired sample tests are often used to assess whether two samples were drawn +from the same distribution; they differ from the independent sample tests below +in that each observation in one sample is treated as paired with a +closely-related observation in the other sample (e.g. when environmental +factors are controlled between observations within a pair but not among pairs). +They can also be interpreted or used as one-sample tests (e.g. tests on the +mean or median of *differences* between paired observations). + +.. autosummary:: + :toctree: generated/ + + ttest_rel + wilcoxon + +Association/Correlation Tests +----------------------------- + +These tests are often used to assess whether there is a relationship (e.g. +linear) between paired observations in multiple samples or among the +coordinates of multivariate observations. + +.. autosummary:: + :toctree: generated/ + + linregress + pearsonr + spearmanr + pointbiserialr + kendalltau + weightedtau + somersd + siegelslopes + theilslopes + page_trend_test + multiscale_graphcorr + +These association tests and are to work with samples in the form of contingency +tables. Supporting functions are available in `scipy.stats.contingency`. + +.. autosummary:: + :toctree: generated/ + + chi2_contingency + fisher_exact + barnard_exact + boschloo_exact + +Independent Sample Tests +------------------------ +Independent sample tests are typically used to assess whether multiple samples +were independently drawn from the same distribution or different distributions +with a shared property (e.g. equal means). + +Some tests are specifically for comparing two samples. + +.. autosummary:: + :toctree: generated/ + + ttest_ind_from_stats + poisson_means_test + ttest_ind + mannwhitneyu + bws_test + ranksums + brunnermunzel + mood + ansari + cramervonmises_2samp + epps_singleton_2samp + ks_2samp + kstest + +Others are generalized to multiple samples. + +.. autosummary:: + :toctree: generated/ + + f_oneway + tukey_hsd + dunnett + kruskal + alexandergovern + fligner + levene + bartlett + median_test + friedmanchisquare + anderson_ksamp + +Resampling and Monte Carlo Methods +---------------------------------- +The following functions can reproduce the p-value and confidence interval +results of most of the functions above, and often produce accurate results in a +wider variety of conditions. They can also be used to perform hypothesis tests +and generate confidence intervals for custom statistics. This flexibility comes +at the cost of greater computational requirements and stochastic results. + +.. autosummary:: + :toctree: generated/ + + monte_carlo_test + permutation_test + bootstrap + +Instances of the following object can be passed into some hypothesis test +functions to perform a resampling or Monte Carlo version of the hypothesis +test. + +.. autosummary:: + :toctree: generated/ + + MonteCarloMethod + PermutationMethod + BootstrapMethod + +Multiple Hypothesis Testing and Meta-Analysis +--------------------------------------------- +These functions are for assessing the results of individual tests as a whole. +Functions for performing specific multiple hypothesis tests (e.g. post hoc +tests) are listed above. + +.. autosummary:: + :toctree: generated/ + + combine_pvalues + false_discovery_control + + +The following functions are related to the tests above but do not belong in the +above categories. + +Quasi-Monte Carlo +================= + +.. toctree:: + :maxdepth: 4 + + stats.qmc + +Contingency Tables +================== + +.. toctree:: + :maxdepth: 4 + + stats.contingency + +Masked statistics functions +=========================== + +.. toctree:: + + stats.mstats + + +Other statistical functionality +=============================== + +Transformations +--------------- + +.. autosummary:: + :toctree: generated/ + + boxcox + boxcox_normmax + boxcox_llf + yeojohnson + yeojohnson_normmax + yeojohnson_llf + obrientransform + sigmaclip + trimboth + trim1 + zmap + zscore + gzscore + +Statistical distances +--------------------- + +.. autosummary:: + :toctree: generated/ + + wasserstein_distance + wasserstein_distance_nd + energy_distance + +Sampling +-------- + +.. toctree:: + :maxdepth: 4 + + stats.sampling + +Random variate generation / CDF Inversion +----------------------------------------- + +.. autosummary:: + :toctree: generated/ + + rvs_ratio_uniforms + +Fitting / Survival Analysis +--------------------------- + +.. autosummary:: + :toctree: generated/ + + fit + ecdf + logrank + +Directional statistical functions +--------------------------------- + +.. autosummary:: + :toctree: generated/ + + directional_stats + circmean + circvar + circstd + +Sensitivity Analysis +-------------------- + +.. autosummary:: + :toctree: generated/ + + sobol_indices + +Plot-tests +---------- + +.. autosummary:: + :toctree: generated/ + + ppcc_max + ppcc_plot + probplot + boxcox_normplot + yeojohnson_normplot + +Univariate and multivariate kernel density estimation +----------------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + gaussian_kde + +Warnings / Errors used in :mod:`scipy.stats` +-------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + DegenerateDataWarning + ConstantInputWarning + NearConstantInputWarning + FitError + +Result classes used in :mod:`scipy.stats` +----------------------------------------- + +.. warning:: + + These classes are private, but they are included here because instances + of them are returned by other statistical functions. User import and + instantiation is not supported. + +.. toctree:: + :maxdepth: 2 + + stats._result_classes + +""" # noqa: E501 + +from ._warnings_errors import (ConstantInputWarning, NearConstantInputWarning, + DegenerateDataWarning, FitError) +from ._stats_py import * +from ._variation import variation +from .distributions import * +from ._morestats import * +from ._multicomp import * +from ._binomtest import binomtest +from ._binned_statistic import * +from ._kde import gaussian_kde +from . import mstats +from . import qmc +from ._multivariate import * +from . import contingency +from .contingency import chi2_contingency +from ._censored_data import CensoredData +from ._resampling import (bootstrap, monte_carlo_test, permutation_test, + MonteCarloMethod, PermutationMethod, BootstrapMethod) +from ._entropy import * +from ._hypotests import * +from ._rvs_sampling import rvs_ratio_uniforms +from ._page_trend_test import page_trend_test +from ._mannwhitneyu import mannwhitneyu +from ._bws_test import bws_test +from ._fit import fit, goodness_of_fit +from ._covariance import Covariance +from ._sensitivity_analysis import * +from ._survival import * + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + biasedurn, kde, morestats, mstats_basic, mstats_extras, mvn, stats +) + + +__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders. + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_bws_test.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_bws_test.py new file mode 100644 index 0000000000000000000000000000000000000000..6496ecfba798dc7ad719f784a57896e296590675 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_bws_test.py @@ -0,0 +1,177 @@ +import numpy as np +from functools import partial +from scipy import stats + + +def _bws_input_validation(x, y, alternative, method): + ''' Input validation and standardization for bws test''' + x, y = np.atleast_1d(x, y) + if x.ndim > 1 or y.ndim > 1: + raise ValueError('`x` and `y` must be exactly one-dimensional.') + if np.isnan(x).any() or np.isnan(y).any(): + raise ValueError('`x` and `y` must not contain NaNs.') + if np.size(x) == 0 or np.size(y) == 0: + raise ValueError('`x` and `y` must be of nonzero size.') + + z = stats.rankdata(np.concatenate((x, y))) + x, y = z[:len(x)], z[len(x):] + + alternatives = {'two-sided', 'less', 'greater'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f'`alternative` must be one of {alternatives}.') + + method = stats.PermutationMethod() if method is None else method + if not isinstance(method, stats.PermutationMethod): + raise ValueError('`method` must be an instance of ' + '`scipy.stats.PermutationMethod`') + + return x, y, alternative, method + + +def _bws_statistic(x, y, alternative, axis): + '''Compute the BWS test statistic for two independent samples''' + # Public function currently does not accept `axis`, but `permutation_test` + # uses `axis` to make vectorized call. + + Ri, Hj = np.sort(x, axis=axis), np.sort(y, axis=axis) + n, m = Ri.shape[axis], Hj.shape[axis] + i, j = np.arange(1, n+1), np.arange(1, m+1) + + Bx_num = Ri - (m + n)/n * i + By_num = Hj - (m + n)/m * j + + if alternative == 'two-sided': + Bx_num *= Bx_num + By_num *= By_num + else: + Bx_num *= np.abs(Bx_num) + By_num *= np.abs(By_num) + + Bx_den = i/(n+1) * (1 - i/(n+1)) * m*(m+n)/n + By_den = j/(m+1) * (1 - j/(m+1)) * n*(m+n)/m + + Bx = 1/n * np.sum(Bx_num/Bx_den, axis=axis) + By = 1/m * np.sum(By_num/By_den, axis=axis) + + B = (Bx + By) / 2 if alternative == 'two-sided' else (Bx - By) / 2 + + return B + + +def bws_test(x, y, *, alternative="two-sided", method=None): + r'''Perform the Baumgartner-Weiss-Schindler test on two independent samples. + + The Baumgartner-Weiss-Schindler (BWS) test is a nonparametric test of + the null hypothesis that the distribution underlying sample `x` + is the same as the distribution underlying sample `y`. Unlike + the Kolmogorov-Smirnov, Wilcoxon, and Cramer-Von Mises tests, + the BWS test weights the integral by the variance of the difference + in cumulative distribution functions (CDFs), emphasizing the tails of the + distributions, which increases the power of the test in many applications. + + Parameters + ---------- + x, y : array-like + 1-d arrays of samples. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + Let *F(u)* and *G(u)* be the cumulative distribution functions of the + distributions underlying `x` and `y`, respectively. Then the following + alternative hypotheses are available: + + * 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for + at least one *u*. + * 'less': the distribution underlying `x` is stochastically less than + the distribution underlying `y`, i.e. *F(u) >= G(u)* for all *u*. + * 'greater': the distribution underlying `x` is stochastically greater + than the distribution underlying `y`, i.e. *F(u) <= G(u)* for all + *u*. + + Under a more restrictive set of assumptions, the alternative hypotheses + can be expressed in terms of the locations of the distributions; + see [2] section 5.1. + method : PermutationMethod, optional + Configures the method used to compute the p-value. The default is + the default `PermutationMethod` object. + + Returns + ------- + res : PermutationTestResult + An object with attributes: + + statistic : float + The observed test statistic of the data. + pvalue : float + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null hypothesis. + + See also + -------- + scipy.stats.wilcoxon, scipy.stats.mannwhitneyu, scipy.stats.ttest_ind + + Notes + ----- + When ``alternative=='two-sided'``, the statistic is defined by the + equations given in [1]_ Section 2. This statistic is not appropriate for + one-sided alternatives; in that case, the statistic is the *negative* of + that given by the equations in [1]_ Section 2. Consequently, when the + distribution of the first sample is stochastically greater than that of the + second sample, the statistic will tend to be positive. + + References + ---------- + .. [1] Neuhäuser, M. (2005). Exact Tests Based on the + Baumgartner-Weiss-Schindler Statistic: A Survey. Statistical Papers, + 46(1), 1-29. + .. [2] Fay, M. P., & Proschan, M. A. (2010). Wilcoxon-Mann-Whitney or t-test? + On assumptions for hypothesis tests and multiple interpretations of + decision rules. Statistics surveys, 4, 1. + + Examples + -------- + We follow the example of table 3 in [1]_: Fourteen children were divided + randomly into two groups. Their ranks at performing a specific tests are + as follows. + + >>> import numpy as np + >>> x = [1, 2, 3, 4, 6, 7, 8] + >>> y = [5, 9, 10, 11, 12, 13, 14] + + We use the BWS test to assess whether there is a statistically significant + difference between the two groups. + The null hypothesis is that there is no difference in the distributions of + performance between the two groups. We decide that a significance level of + 1% is required to reject the null hypothesis in favor of the alternative + that the distributions are different. + Since the number of samples is very small, we can compare the observed test + statistic against the *exact* distribution of the test statistic under the + null hypothesis. + + >>> from scipy.stats import bws_test + >>> res = bws_test(x, y) + >>> print(res.statistic) + 5.132167152575315 + + This agrees with :math:`B = 5.132` reported in [1]_. The *p*-value produced + by `bws_test` also agrees with :math:`p = 0.0029` reported in [1]_. + + >>> print(res.pvalue) + 0.002913752913752914 + + Because the p-value is below our threshold of 1%, we take this as evidence + against the null hypothesis in favor of the alternative that there is a + difference in performance between the two groups. + ''' + + x, y, alternative, method = _bws_input_validation(x, y, alternative, + method) + bws_statistic = partial(_bws_statistic, alternative=alternative) + + permutation_alternative = 'less' if alternative == 'less' else 'greater' + res = stats.permutation_test((x, y), bws_statistic, + alternative=permutation_alternative, + **method._asdict()) + + return res diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_common.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4011d425cc4afea3c7ee8937526b13f1f92b0850 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_common.py @@ -0,0 +1,5 @@ +from collections import namedtuple + + +ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"]) +ConfidenceInterval. __doc__ = "Class for confidence intervals." diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb1e85be30c7e4ebc67113ede53e687f48d955d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py @@ -0,0 +1,4098 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +import sys +import keyword +import re +import types +import warnings +from itertools import zip_longest + +from scipy._lib import doccer +from ._distr_params import distcont, distdiscrete +from scipy._lib._util import check_random_state + +from scipy.special import comb, entr + + +# for root finding for continuous distribution ppf, and maximum likelihood +# estimation +from scipy import optimize + +# for functions of continuous distributions (e.g. moments, entropy, cdf) +from scipy import integrate + +# to approximate the pdf of a continuous distribution given its cdf +from scipy._lib._finite_differences import _derivative + +# for scipy.stats.entropy. Attempts to import just that function or file +# have cause import problems +from scipy import stats + +from numpy import (arange, putmask, ones, shape, ndarray, zeros, floor, + logical_and, log, sqrt, place, argmax, vectorize, asarray, + nan, inf, isinf, empty) + +import numpy as np +from ._constants import _XMAX, _LOGXMAX +from ._censored_data import CensoredData +from scipy.stats._warnings_errors import FitError + +# These are the docstring parts used for substitution in specific +# distribution docstrings + +docheaders = {'methods': """\nMethods\n-------\n""", + 'notes': """\nNotes\n-----\n""", + 'examples': """\nExamples\n--------\n"""} + +_doc_rvs = """\ +rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None) + Random variates. +""" +_doc_pdf = """\ +pdf(x, %(shapes)s, loc=0, scale=1) + Probability density function. +""" +_doc_logpdf = """\ +logpdf(x, %(shapes)s, loc=0, scale=1) + Log of the probability density function. +""" +_doc_pmf = """\ +pmf(k, %(shapes)s, loc=0, scale=1) + Probability mass function. +""" +_doc_logpmf = """\ +logpmf(k, %(shapes)s, loc=0, scale=1) + Log of the probability mass function. +""" +_doc_cdf = """\ +cdf(x, %(shapes)s, loc=0, scale=1) + Cumulative distribution function. +""" +_doc_logcdf = """\ +logcdf(x, %(shapes)s, loc=0, scale=1) + Log of the cumulative distribution function. +""" +_doc_sf = """\ +sf(x, %(shapes)s, loc=0, scale=1) + Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). +""" # noqa: E501 +_doc_logsf = """\ +logsf(x, %(shapes)s, loc=0, scale=1) + Log of the survival function. +""" +_doc_ppf = """\ +ppf(q, %(shapes)s, loc=0, scale=1) + Percent point function (inverse of ``cdf`` --- percentiles). +""" +_doc_isf = """\ +isf(q, %(shapes)s, loc=0, scale=1) + Inverse survival function (inverse of ``sf``). +""" +_doc_moment = """\ +moment(order, %(shapes)s, loc=0, scale=1) + Non-central moment of the specified order. +""" +_doc_stats = """\ +stats(%(shapes)s, loc=0, scale=1, moments='mv') + Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). +""" +_doc_entropy = """\ +entropy(%(shapes)s, loc=0, scale=1) + (Differential) entropy of the RV. +""" +_doc_fit = """\ +fit(data) + Parameter estimates for generic data. + See `scipy.stats.rv_continuous.fit `__ for detailed documentation of the + keyword arguments. +""" # noqa: E501 +_doc_expect = """\ +expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) + Expected value of a function (of one argument) with respect to the distribution. +""" # noqa: E501 +_doc_expect_discrete = """\ +expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False) + Expected value of a function (of one argument) with respect to the distribution. +""" +_doc_median = """\ +median(%(shapes)s, loc=0, scale=1) + Median of the distribution. +""" +_doc_mean = """\ +mean(%(shapes)s, loc=0, scale=1) + Mean of the distribution. +""" +_doc_var = """\ +var(%(shapes)s, loc=0, scale=1) + Variance of the distribution. +""" +_doc_std = """\ +std(%(shapes)s, loc=0, scale=1) + Standard deviation of the distribution. +""" +_doc_interval = """\ +interval(confidence, %(shapes)s, loc=0, scale=1) + Confidence interval with equal areas around the median. +""" +_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, + _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, + _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, + _doc_stats, _doc_entropy, _doc_fit, + _doc_expect, _doc_median, + _doc_mean, _doc_var, _doc_std, _doc_interval]) + +_doc_default_longsummary = """\ +As an instance of the `rv_continuous` class, `%(name)s` object inherits from it +a collection of generic methods (see below for the full list), +and completes them with details specific for this particular distribution. +""" + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape, +location, and scale parameters returning a "frozen" continuous RV object: + +rv = %(name)s(%(shapes)s, loc=0, scale=1) + - Frozen RV object with the same methods but holding the given shape, + location, and scale fixed. +""" +_doc_default_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability density function (``pdf``): + +>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s), 100) +>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), +... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') + +Alternatively, the distribution object can be called (as a function) +to fix the shape, location and scale parameters. This returns a "frozen" +RV object holding the given parameters fixed. + +Freeze the distribution and display the frozen ``pdf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') + +Check accuracy of ``cdf`` and ``ppf``: + +>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) +>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) + +And compare the histogram: + +>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2) +>>> ax.set_xlim([x[0], x[-1]]) +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +""" + +_doc_default_locscale = """\ +The probability density above is defined in the "standardized" form. To shift +and/or scale the distribution use the ``loc`` and ``scale`` parameters. +Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically +equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with +``y = (x - loc) / scale``. Note that shifting the location of a distribution +does not make it a "noncentral" distribution; noncentral generalizations of +some distributions are available in separate classes. +""" + +_doc_default = ''.join([_doc_default_longsummary, + _doc_allmethods, + '\n', + _doc_default_example]) + +_doc_default_before_notes = ''.join([_doc_default_longsummary, + _doc_allmethods]) + +docdict = { + 'rvs': _doc_rvs, + 'pdf': _doc_pdf, + 'logpdf': _doc_logpdf, + 'cdf': _doc_cdf, + 'logcdf': _doc_logcdf, + 'sf': _doc_sf, + 'logsf': _doc_logsf, + 'ppf': _doc_ppf, + 'isf': _doc_isf, + 'stats': _doc_stats, + 'entropy': _doc_entropy, + 'fit': _doc_fit, + 'moment': _doc_moment, + 'expect': _doc_expect, + 'interval': _doc_interval, + 'mean': _doc_mean, + 'std': _doc_std, + 'var': _doc_var, + 'median': _doc_median, + 'allmethods': _doc_allmethods, + 'longsummary': _doc_default_longsummary, + 'frozennote': _doc_default_frozen_note, + 'example': _doc_default_example, + 'default': _doc_default, + 'before_notes': _doc_default_before_notes, + 'after_notes': _doc_default_locscale +} + +# Reuse common content between continuous and discrete docs, change some +# minor bits. +docdict_discrete = docdict.copy() + +docdict_discrete['pmf'] = _doc_pmf +docdict_discrete['logpmf'] = _doc_logpmf +docdict_discrete['expect'] = _doc_expect_discrete +_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', + 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', + 'mean', 'var', 'std', 'interval'] +for obj in _doc_disc_methods: + docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') + +_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] +for obj in _doc_disc_methods_err_varname: + docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') + +docdict_discrete.pop('pdf') +docdict_discrete.pop('logpdf') + +_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) +docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods + +docdict_discrete['longsummary'] = _doc_default_longsummary.replace( + 'rv_continuous', 'rv_discrete') + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape and +location parameters returning a "frozen" discrete RV object: + +rv = %(name)s(%(shapes)s, loc=0) + - Frozen RV object with the same methods but holding the given shape and + location fixed. +""" +docdict_discrete['frozennote'] = _doc_default_frozen_note + +_doc_default_discrete_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability mass function (``pmf``): + +>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s)) +>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') +>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) + +Alternatively, the distribution object can be called (as a function) +to fix the shape and location. This returns a "frozen" RV object holding +the given parameters fixed. + +Freeze the distribution and display the frozen ``pmf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, +... label='frozen pmf') +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +Check accuracy of ``cdf`` and ``ppf``: + +>>> prob = %(name)s.cdf(x, %(shapes)s) +>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) +""" + + +_doc_default_discrete_locscale = """\ +The probability mass function above is defined in the "standardized" form. +To shift distribution use the ``loc`` parameter. +Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically +equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. +""" + +docdict_discrete['example'] = _doc_default_discrete_example +docdict_discrete['after_notes'] = _doc_default_discrete_locscale + +_doc_default_before_notes = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods']]) +docdict_discrete['before_notes'] = _doc_default_before_notes + +_doc_default_disc = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods'], + docdict_discrete['frozennote'], + docdict_discrete['example']]) +docdict_discrete['default'] = _doc_default_disc + +# clean up all the separate docstring elements, we do not need them anymore +for obj in [s for s in dir() if s.startswith('_doc_')]: + exec('del ' + obj) +del obj + + +def _moment(data, n, mu=None): + if mu is None: + mu = data.mean() + return ((data - mu)**n).mean() + + +def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): + if (n == 0): + return 1.0 + elif (n == 1): + if mu is None: + val = moment_func(1, *args) + else: + val = mu + elif (n == 2): + if mu2 is None or mu is None: + val = moment_func(2, *args) + else: + val = mu2 + mu*mu + elif (n == 3): + if g1 is None or mu2 is None or mu is None: + val = moment_func(3, *args) + else: + mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment + val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment + elif (n == 4): + if g1 is None or g2 is None or mu2 is None or mu is None: + val = moment_func(4, *args) + else: + mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment + mu3 = g1*np.power(mu2, 1.5) # 3rd central moment + val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu + else: + val = moment_func(n, *args) + + return val + + +def _skew(data): + """ + skew is third central moment / variance**(1.5) + """ + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m3 = ((data - mu)**3).mean() + return m3 / np.power(m2, 1.5) + + +def _kurtosis(data): + """kurtosis is fourth central moment / variance**2 - 3.""" + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m4 = ((data - mu)**4).mean() + return m4 / m2**2 - 3 + + +def _fit_determine_optimizer(optimizer): + if not callable(optimizer) and isinstance(optimizer, str): + if not optimizer.startswith('fmin_'): + optimizer = "fmin_"+optimizer + if optimizer == 'fmin_': + optimizer = 'fmin' + try: + optimizer = getattr(optimize, optimizer) + except AttributeError as e: + raise ValueError("%s is not a valid optimizer" % optimizer) from e + return optimizer + + +def _sum_finite(x): + """ + For a 1D array x, return a tuple containing the sum of the + finite values of x and the number of nonfinite values. + + This is a utility function used when evaluating the negative + loglikelihood for a distribution and an array of samples. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import _sum_finite + >>> tot, nbad = _sum_finite(np.array([-2, -np.inf, 5, 1])) + >>> tot + 4.0 + >>> nbad + 1 + """ + finite_x = np.isfinite(x) + bad_count = finite_x.size - np.count_nonzero(finite_x) + return np.sum(x[finite_x]), bad_count + + +# Frozen RV class +class rv_frozen: + + def __init__(self, dist, *args, **kwds): + self.args = args + self.kwds = kwds + + # create a new instance + self.dist = dist.__class__(**dist._updated_ctor_param()) + + shapes, _, _ = self.dist._parse_args(*args, **kwds) + self.a, self.b = self.dist._get_support(*shapes) + + @property + def random_state(self): + return self.dist._random_state + + @random_state.setter + def random_state(self, seed): + self.dist._random_state = check_random_state(seed) + + def cdf(self, x): + return self.dist.cdf(x, *self.args, **self.kwds) + + def logcdf(self, x): + return self.dist.logcdf(x, *self.args, **self.kwds) + + def ppf(self, q): + return self.dist.ppf(q, *self.args, **self.kwds) + + def isf(self, q): + return self.dist.isf(q, *self.args, **self.kwds) + + def rvs(self, size=None, random_state=None): + kwds = self.kwds.copy() + kwds.update({'size': size, 'random_state': random_state}) + return self.dist.rvs(*self.args, **kwds) + + def sf(self, x): + return self.dist.sf(x, *self.args, **self.kwds) + + def logsf(self, x): + return self.dist.logsf(x, *self.args, **self.kwds) + + def stats(self, moments='mv'): + kwds = self.kwds.copy() + kwds.update({'moments': moments}) + return self.dist.stats(*self.args, **kwds) + + def median(self): + return self.dist.median(*self.args, **self.kwds) + + def mean(self): + return self.dist.mean(*self.args, **self.kwds) + + def var(self): + return self.dist.var(*self.args, **self.kwds) + + def std(self): + return self.dist.std(*self.args, **self.kwds) + + def moment(self, order=None): + return self.dist.moment(order, *self.args, **self.kwds) + + def entropy(self): + return self.dist.entropy(*self.args, **self.kwds) + + def interval(self, confidence=None): + return self.dist.interval(confidence, *self.args, **self.kwds) + + def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): + # expect method only accepts shape parameters as positional args + # hence convert self.args, self.kwds, also loc/scale + # See the .expect method docstrings for the meaning of + # other parameters. + a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) + if isinstance(self.dist, rv_discrete): + return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) + else: + return self.dist.expect(func, a, loc, scale, lb, ub, + conditional, **kwds) + + def support(self): + return self.dist.support(*self.args, **self.kwds) + + +class rv_discrete_frozen(rv_frozen): + + def pmf(self, k): + return self.dist.pmf(k, *self.args, **self.kwds) + + def logpmf(self, k): # No error + return self.dist.logpmf(k, *self.args, **self.kwds) + + +class rv_continuous_frozen(rv_frozen): + + def pdf(self, x): + return self.dist.pdf(x, *self.args, **self.kwds) + + def logpdf(self, x): + return self.dist.logpdf(x, *self.args, **self.kwds) + + +def argsreduce(cond, *args): + """Clean arguments to: + + 1. Ensure all arguments are iterable (arrays of dimension at least one + 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is + True, in 1D. + + Return list of processed arguments. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import argsreduce + >>> rng = np.random.default_rng() + >>> A = rng.random((4, 5)) + >>> B = 2 + >>> C = rng.random((1, 5)) + >>> cond = np.ones(A.shape) + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (4, 5) + >>> B1.shape + (1,) + >>> C1.shape + (1, 5) + >>> cond[2,:] = 0 + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (15,) + >>> B1.shape + (1,) + >>> C1.shape + (15,) + + """ + # some distributions assume arguments are iterable. + newargs = np.atleast_1d(*args) + + # np.atleast_1d returns an array if only one argument, or a list of arrays + # if more than one argument. + if not isinstance(newargs, (list, tuple)): + newargs = (newargs,) + + if np.all(cond): + # broadcast arrays with cond + *newargs, cond = np.broadcast_arrays(*newargs, cond) + return [arg.ravel() for arg in newargs] + + s = cond.shape + # np.extract returns flattened arrays, which are not broadcastable together + # unless they are either the same size or size == 1. + return [(arg if np.size(arg) == 1 + else np.extract(cond, np.broadcast_to(arg, s))) + for arg in newargs] + + +parse_arg_template = """ +def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): + return (%(shape_arg_str)s), %(locscale_out)s + +def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): + return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) + +def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): + return (%(shape_arg_str)s), %(locscale_out)s, moments +""" + + +class rv_generic: + """Class which encapsulates common functionality between rv_discrete + and rv_continuous. + + """ + + def __init__(self, seed=None): + super().__init__() + + # figure out if _stats signature has 'moments' keyword + sig = _getfullargspec(self._stats) + self._stats_has_moments = ((sig.varkw is not None) or + ('moments' in sig.args) or + ('moments' in sig.kwonlyargs)) + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """Get or set the generator object for generating random variates. + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def __setstate__(self, state): + try: + self.__dict__.update(state) + # attaches the dynamically created methods on each instance. + # if a subclass overrides rv_generic.__setstate__, or implements + # it's own _attach_methods, then it must make sure that + # _attach_argparser_methods is called. + self._attach_methods() + except ValueError: + # reconstitute an old pickle scipy<1.6, that contains + # (_ctor_param, random_state) as state + self._ctor_param = state[0] + self._random_state = state[1] + self.__init__() + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_* instance. + + This method must be overridden by subclasses, and must itself call + _attach_argparser_methods. This method is called in __init__ in + subclasses, and in __setstate__ + """ + raise NotImplementedError + + def _attach_argparser_methods(self): + """ + Generates the argument-parsing functions dynamically and attaches + them to the instance. + + Should be called from `_attach_methods`, typically in __init__ and + during unpickling (__setstate__) + """ + ns = {} + exec(self._parse_arg_template, ns) + # NB: attach to the instance, not class + for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: + setattr(self, name, types.MethodType(ns[name], self)) + + def _construct_argparser( + self, meths_to_inspect, locscale_in, locscale_out): + """Construct the parser string for the shape arguments. + + This method should be called in __init__ of a class for each + distribution. It creates the `_parse_arg_template` attribute that is + then used by `_attach_argparser_methods` to dynamically create and + attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs` + methods to the instance. + + If self.shapes is a non-empty string, interprets it as a + comma-separated list of shape parameters. + + Otherwise inspects the call signatures of `meths_to_inspect` + and constructs the argument-parsing functions from these. + In this case also sets `shapes` and `numargs`. + """ + + if self.shapes: + # sanitize the user-supplied shapes + if not isinstance(self.shapes, str): + raise TypeError('shapes must be a string.') + + shapes = self.shapes.replace(',', ' ').split() + + for field in shapes: + if keyword.iskeyword(field): + raise SyntaxError('keywords cannot be used as shapes.') + if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): + raise SyntaxError( + 'shapes must be valid python identifiers') + else: + # find out the call signatures (_pdf, _cdf etc), deduce shape + # arguments. Generic methods only have 'self, x', any further args + # are shapes. + shapes_list = [] + for meth in meths_to_inspect: + shapes_args = _getfullargspec(meth) # NB does not contain self + args = shapes_args.args[1:] # peel off 'x', too + + if args: + shapes_list.append(args) + + # *args or **kwargs are not allowed w/automatic shapes + if shapes_args.varargs is not None: + raise TypeError( + '*args are not allowed w/out explicit shapes') + if shapes_args.varkw is not None: + raise TypeError( + '**kwds are not allowed w/out explicit shapes') + if shapes_args.kwonlyargs: + raise TypeError( + 'kwonly args are not allowed w/out explicit shapes') + if shapes_args.defaults is not None: + raise TypeError('defaults are not allowed for shapes') + + if shapes_list: + shapes = shapes_list[0] + + # make sure the signatures are consistent + for item in shapes_list: + if item != shapes: + raise TypeError('Shape arguments are inconsistent.') + else: + shapes = [] + + # have the arguments, construct the method from template + shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None + dct = dict(shape_arg_str=shapes_str, + locscale_in=locscale_in, + locscale_out=locscale_out, + ) + + # this string is used by _attach_argparser_methods + self._parse_arg_template = parse_arg_template % dct + + self.shapes = ', '.join(shapes) if shapes else None + if not hasattr(self, 'numargs'): + # allows more general subclassing with *args + self.numargs = len(shapes) + + def _construct_doc(self, docdict, shapes_vals=None): + """Construct the instance docstring with string substitutions.""" + tempdict = docdict.copy() + tempdict['name'] = self.name or 'distname' + tempdict['shapes'] = self.shapes or '' + + if shapes_vals is None: + shapes_vals = () + vals = ', '.join('%.3g' % val for val in shapes_vals) + tempdict['vals'] = vals + + tempdict['shapes_'] = self.shapes or '' + if self.shapes and self.numargs == 1: + tempdict['shapes_'] += ',' + + if self.shapes: + tempdict['set_vals_stmt'] = f'>>> {self.shapes} = {vals}' + else: + tempdict['set_vals_stmt'] = '' + + if self.shapes is None: + # remove shapes from call parameters if there are none + for item in ['default', 'before_notes']: + tempdict[item] = tempdict[item].replace( + "\n%(shapes)s : array_like\n shape parameters", "") + for i in range(2): + if self.shapes is None: + # necessary because we use %(shapes)s in two forms (w w/o ", ") + self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") + try: + self.__doc__ = doccer.docformat(self.__doc__, tempdict) + except TypeError as e: + raise Exception("Unable to construct docstring for " + f"distribution \"{self.name}\": {repr(e)}") from e + + # correct for empty shapes + self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') + + def _construct_default_doc(self, longname=None, + docdict=None, discrete='continuous'): + """Construct instance docstring from the default template.""" + if longname is None: + longname = 'A' + self.__doc__ = ''.join([f'{longname} {discrete} random variable.', + '\n\n%(before_notes)s\n', docheaders['notes'], + '\n%(example)s']) + self._construct_doc(docdict) + + def freeze(self, *args, **kwds): + """Freeze the distribution for the given arguments. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution. Should include all + the non-optional arguments, may include ``loc`` and ``scale``. + + Returns + ------- + rv_frozen : rv_frozen instance + The frozen distribution. + + """ + if isinstance(self, rv_continuous): + return rv_continuous_frozen(self, *args, **kwds) + else: + return rv_discrete_frozen(self, *args, **kwds) + + def __call__(self, *args, **kwds): + return self.freeze(*args, **kwds) + __call__.__doc__ = freeze.__doc__ + + # The actual calculation functions (no basic checking need be done) + # If these are defined, the others won't be looked at. + # Otherwise, the other set can be defined. + def _stats(self, *args, **kwds): + return None, None, None, None + + # Noncentral moments (also known as the moment about the origin). + # Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime". + # The primed mu is a widely used notation for the noncentral moment. + def _munp(self, n, *args): + # Silence floating point warnings from integration. + with np.errstate(all='ignore'): + vals = self.generic_moment(n, *args) + return vals + + def _argcheck_rvs(self, *args, **kwargs): + # Handle broadcasting and size validation of the rvs method. + # Subclasses should not have to override this method. + # The rule is that if `size` is not None, then `size` gives the + # shape of the result (integer values of `size` are treated as + # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) + # + # `args` is expected to contain the shape parameters (if any), the + # location and the scale in a flat tuple (e.g. if there are two + # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). + # The only keyword argument expected is 'size'. + size = kwargs.get('size', None) + all_bcast = np.broadcast_arrays(*args) + + def squeeze_left(a): + while a.ndim > 0 and a.shape[0] == 1: + a = a[0] + return a + + # Eliminate trivial leading dimensions. In the convention + # used by numpy's random variate generators, trivial leading + # dimensions are effectively ignored. In other words, when `size` + # is given, trivial leading dimensions of the broadcast parameters + # in excess of the number of dimensions in size are ignored, e.g. + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) + # array([ 1.00104267, 3.00422496, 4.99799278]) + # If `size` is not given, the exact broadcast shape is preserved: + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) + # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) + # + all_bcast = [squeeze_left(a) for a in all_bcast] + bcast_shape = all_bcast[0].shape + bcast_ndim = all_bcast[0].ndim + + if size is None: + size_ = bcast_shape + else: + size_ = tuple(np.atleast_1d(size)) + + # Check compatibility of size_ with the broadcast shape of all + # the parameters. This check is intended to be consistent with + # how the numpy random variate generators (e.g. np.random.normal, + # np.random.beta) handle their arguments. The rule is that, if size + # is given, it determines the shape of the output. Broadcasting + # can't change the output size. + + # This is the standard broadcasting convention of extending the + # shape with fewer dimensions with enough dimensions of length 1 + # so that the two shapes have the same number of dimensions. + ndiff = bcast_ndim - len(size_) + if ndiff < 0: + bcast_shape = (1,)*(-ndiff) + bcast_shape + elif ndiff > 0: + size_ = (1,)*ndiff + size_ + + # This compatibility test is not standard. In "regular" broadcasting, + # two shapes are compatible if for each dimension, the lengths are the + # same or one of the lengths is 1. Here, the length of a dimension in + # size_ must not be less than the corresponding length in bcast_shape. + ok = all([bcdim == 1 or bcdim == szdim + for (bcdim, szdim) in zip(bcast_shape, size_)]) + if not ok: + raise ValueError("size does not match the broadcast shape of " + f"the parameters. {size}, {size_}, {bcast_shape}") + + param_bcast = all_bcast[:-2] + loc_bcast = all_bcast[-2] + scale_bcast = all_bcast[-1] + + return param_bcast, loc_bcast, scale_bcast, size_ + + # These are the methods you must define (standard form functions) + # NB: generic _pdf, _logpdf, _cdf are different for + # rv_continuous and rv_discrete hence are defined in there + def _argcheck(self, *args): + """Default check for correct values on args and keywords. + + Returns condition array of 1's where arguments are correct and + 0's where they are not. + + """ + cond = 1 + for arg in args: + cond = logical_and(cond, (asarray(arg) > 0)) + return cond + + def _get_support(self, *args, **kwargs): + """Return the support of the (unscaled, unshifted) distribution. + + *Must* be overridden by distributions which have support dependent + upon the shape parameters of the distribution. Any such override + *must not* set or change any of the class members, as these members + are shared amongst all instances of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support for the specified + shape parameters. + """ + return self.a, self.b + + def _support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a <= x) & (x <= b) + + def _open_support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a < x) & (x < b) + + def _rvs(self, *args, size=None, random_state=None): + # This method must handle size being a tuple, and it must + # properly broadcast *args and size. size might be + # an empty tuple, which means a scalar random variate is to be + # generated. + + # Use basic inverse cdf algorithm for RV generation as default. + U = random_state.uniform(size=size) + Y = self._ppf(U, *args) + return Y + + def _logcdf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._cdf(x, *args)) + + def _sf(self, x, *args): + return 1.0-self._cdf(x, *args) + + def _logsf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._sf(x, *args)) + + def _ppf(self, q, *args): + return self._ppfvec(q, *args) + + def _isf(self, q, *args): + return self._ppf(1.0-q, *args) # use correct _ppf for subclasses + + # These are actually called, and should not be overwritten if you + # want to keep error checking. + def rvs(self, *args, **kwds): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional + Scale parameter (default=1). + size : int or tuple of ints, optional + Defining number of random variates (default is 1). + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + discrete = kwds.pop('discrete', None) + rndm = kwds.pop('random_state', None) + args, loc, scale, size = self._parse_args_rvs(*args, **kwds) + cond = logical_and(self._argcheck(*args), (scale >= 0)) + if not np.all(cond): + message = ("Domain error in arguments. The `scale` parameter must " + "be positive for all distributions, and many " + "distributions have restrictions on shape parameters. " + f"Please see the `scipy.stats.{self.name}` " + "documentation for details.") + raise ValueError(message) + + if np.all(scale == 0): + return loc*ones(size, 'd') + + # extra gymnastics needed for a custom random_state + if rndm is not None: + random_state_saved = self._random_state + random_state = check_random_state(rndm) + else: + random_state = self._random_state + + vals = self._rvs(*args, size=size, random_state=random_state) + + vals = vals * scale + loc + + # do not forget to restore the _random_state + if rndm is not None: + self._random_state = random_state_saved + + # Cast to int if discrete + if discrete and not isinstance(self, rv_sample): + if size == (): + vals = int(vals) + else: + vals = vals.astype(np.int64) + + return vals + + def stats(self, *args, **kwds): + """Some statistics of the given RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional (continuous RVs only) + scale parameter (default=1) + moments : str, optional + composed of letters ['mvsk'] defining which moments to compute: + 'm' = mean, + 'v' = variance, + 's' = (Fisher's) skew, + 'k' = (Fisher's) kurtosis. + (default is 'mv') + + Returns + ------- + stats : sequence + of requested moments. + + """ + args, loc, scale, moments = self._parse_args_stats(*args, **kwds) + # scale = 1 by construction for discrete RVs + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = [] + default = np.full(shape(cond), fill_value=self.badvalue) + + # Use only entries that are valid in calculation + if np.any(cond): + goodargs = argsreduce(cond, *(args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + + if self._stats_has_moments: + mu, mu2, g1, g2 = self._stats(*goodargs, + **{'moments': moments}) + else: + mu, mu2, g1, g2 = self._stats(*goodargs) + + if 'm' in moments: + if mu is None: + mu = self._munp(1, *goodargs) + out0 = default.copy() + place(out0, cond, mu * scale + loc) + output.append(out0) + + if 'v' in moments: + if mu2 is None: + mu2p = self._munp(2, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + # if mean is inf then var is also inf + with np.errstate(invalid='ignore'): + mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf) + out0 = default.copy() + place(out0, cond, mu2 * scale * scale) + output.append(out0) + + if 's' in moments: + if g1 is None: + mu3p = self._munp(3, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + with np.errstate(invalid='ignore'): + mu3 = (-mu*mu - 3*mu2)*mu + mu3p + g1 = mu3 / np.power(mu2, 1.5) + out0 = default.copy() + place(out0, cond, g1) + output.append(out0) + + if 'k' in moments: + if g2 is None: + mu4p = self._munp(4, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + if g1 is None: + mu3 = None + else: + # (mu2**1.5) breaks down for nan and inf + mu3 = g1 * np.power(mu2, 1.5) + if mu3 is None: + mu3p = self._munp(3, *goodargs) + with np.errstate(invalid='ignore'): + mu3 = (-mu * mu - 3 * mu2) * mu + mu3p + with np.errstate(invalid='ignore'): + mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p + g2 = mu4 / mu2**2.0 - 3.0 + out0 = default.copy() + place(out0, cond, g2) + output.append(out0) + else: # no valid args + output = [default.copy() for _ in moments] + + output = [out[()] for out in output] + if len(output) == 1: + return output[0] + else: + return tuple(output) + + def entropy(self, *args, **kwds): + """Differential entropy of the RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional (continuous distributions only). + Scale parameter (default=1). + + Notes + ----- + Entropy is defined base `e`: + + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import rv_discrete + >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) + >>> np.allclose(drv.entropy(), np.log(2.0)) + True + + """ + args, loc, scale = self._parse_args(*args, **kwds) + # NB: for discrete distributions scale=1 by construction in _parse_args + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = zeros(shape(cond0), 'd') + place(output, (1-cond0), self.badvalue) + goodargs = argsreduce(cond0, scale, *args) + goodscale = goodargs[0] + goodargs = goodargs[1:] + place(output, cond0, self.vecentropy(*goodargs) + log(goodscale)) + return output[()] + + def moment(self, order, *args, **kwds): + """non-central moment of distribution of specified order. + + Parameters + ---------- + order : int, order >= 1 + Order of moment. + arg1, arg2, arg3,... : float + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + """ + n = order + shapes, loc, scale = self._parse_args(*args, **kwds) + args = np.broadcast_arrays(*(*shapes, loc, scale)) + *shapes, loc, scale = args + + i0 = np.logical_and(self._argcheck(*shapes), scale > 0) + i1 = np.logical_and(i0, loc == 0) + i2 = np.logical_and(i0, loc != 0) + + args = argsreduce(i0, *shapes, loc, scale) + *shapes, loc, scale = args + + if (floor(n) != n): + raise ValueError("Moment must be an integer.") + if (n < 0): + raise ValueError("Moment must be positive.") + mu, mu2, g1, g2 = None, None, None, None + if (n > 0) and (n < 5): + if self._stats_has_moments: + mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]} + else: + mdict = {} + mu, mu2, g1, g2 = self._stats(*shapes, **mdict) + val = np.empty(loc.shape) # val needs to be indexed by loc + val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes) + + # Convert to transformed X = L + S*Y + # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) + result = zeros(i0.shape) + place(result, ~i0, self.badvalue) + + if i1.any(): + res1 = scale[loc == 0]**n * val[loc == 0] + place(result, i1, res1) + + if i2.any(): + mom = [mu, mu2, g1, g2] + arrs = [i for i in mom if i is not None] + idx = [i for i in range(4) if mom[i] is not None] + if any(idx): + arrs = argsreduce(loc != 0, *arrs) + j = 0 + for i in idx: + mom[i] = arrs[j] + j += 1 + mu, mu2, g1, g2 = mom + args = argsreduce(loc != 0, *shapes, loc, scale, val) + *shapes, loc, scale, val = args + + res2 = zeros(loc.shape, dtype='d') + fac = scale / loc + for k in range(n): + valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, + shapes) + res2 += comb(n, k, exact=True)*fac**k * valk + res2 += fac**n * val + res2 *= loc**n + place(result, i2, res2) + + return result[()] + + def median(self, *args, **kwds): + """Median of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter, Default is 0. + scale : array_like, optional + Scale parameter, Default is 1. + + Returns + ------- + median : float + The median of the distribution. + + See Also + -------- + rv_discrete.ppf + Inverse of the CDF + + """ + return self.ppf(0.5, *args, **kwds) + + def mean(self, *args, **kwds): + """Mean of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + mean : float + the mean of the distribution + + """ + kwds['moments'] = 'm' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def var(self, *args, **kwds): + """Variance of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + var : float + the variance of the distribution + + """ + kwds['moments'] = 'v' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def std(self, *args, **kwds): + """Standard deviation of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + std : float + standard deviation of the distribution + + """ + kwds['moments'] = 'v' + res = sqrt(self.stats(*args, **kwds)) + return res + + def interval(self, confidence, *args, **kwds): + """Confidence interval with equal areas around the median. + + Parameters + ---------- + confidence : array_like of float + Probability that an rv will be drawn from the returned range. + Each value should be in the range [0, 1]. + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : ndarray of float + end-points of range that contain ``100 * alpha %`` of the rv's + possible values. + + Notes + ----- + This is implemented as ``ppf([p_tail, 1-p_tail])``, where + ``ppf`` is the inverse cumulative distribution function and + ``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a + discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore, + when ``confidence=1`` and the distribution is discrete, the left end + of the interval will be beyond the support of the distribution. + For discrete distributions, the interval will limit the probability + in each tail to be less than or equal to ``p_tail`` (usually + strictly less). + + """ + alpha = confidence + + alpha = asarray(alpha) + if np.any((alpha > 1) | (alpha < 0)): + raise ValueError("alpha must be between 0 and 1 inclusive") + q1 = (1.0-alpha)/2 + q2 = (1.0+alpha)/2 + a = self.ppf(q1, *args, **kwds) + b = self.ppf(q2, *args, **kwds) + return a, b + + def support(self, *args, **kwargs): + """Support of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : array_like + end-points of the distribution's support. + + """ + args, loc, scale = self._parse_args(*args, **kwargs) + arrs = np.broadcast_arrays(*args, loc, scale) + args, loc, scale = arrs[:-2], arrs[-2], arrs[-1] + cond = self._argcheck(*args) & (scale > 0) + _a, _b = self._get_support(*args) + if cond.all(): + return _a * scale + loc, _b * scale + loc + elif cond.ndim == 0: + return self.badvalue, self.badvalue + # promote bounds to at least float to fill in the badvalue + _a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d') + out_a, out_b = _a * scale + loc, _b * scale + loc + place(out_a, 1-cond, self.badvalue) + place(out_b, 1-cond, self.badvalue) + return out_a, out_b + + def nnlf(self, theta, x): + """Negative loglikelihood function. + Notes + ----- + This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the + parameters (including loc and scale). + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (asarray(x)-loc) / scale + n_log_scale = len(x) * log(scale) + if np.any(~self._support_mask(x, *args)): + return inf + return self._nnlf(x, *args) + n_log_scale + + def _nnlf(self, x, *args): + return -np.sum(self._logpxf(x, *args), axis=0) + + def _nlff_and_penalty(self, x, args, log_fitfun): + # negative log fit function + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0, axis=0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + logff = log_fitfun(x, *args) + finite_logff = np.isfinite(logff) + n_bad += np.sum(~finite_logff, axis=0) + if n_bad > 0: + penalty = n_bad * log(_XMAX) * 100 + return -np.sum(logff[finite_logff], axis=0) + penalty + return -np.sum(logff, axis=0) + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = asarray((x-loc) / scale) + n_log_scale = len(x) * log(scale) + return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale + + def _penalized_nlpsf(self, theta, x): + """Penalized negative log product spacing function. + i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty + where theta are the parameters (including loc and scale) + Follows reference [1] of scipy.stats.fit + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (np.sort(x) - loc)/scale + + def log_psf(x, *args): + x, lj = np.unique(x, return_counts=True) # fast for sorted x + cdf_data = self._cdf(x, *args) if x.size else [] + if not (x.size and 1 - cdf_data[-1] <= 0): + cdf = np.concatenate(([0], cdf_data, [1])) + lj = np.concatenate((lj, [1])) + else: + cdf = np.concatenate(([0], cdf_data)) + # here we could use logcdf w/ logsumexp trick to take differences, + # but in the context of the method, it seems unlikely to matter + return lj * np.log(np.diff(cdf) / lj) + + return self._nlff_and_penalty(x, args, log_psf) + + +class _ShapeInfo: + def __init__(self, name, integrality=False, domain=(-np.inf, np.inf), + inclusive=(True, True)): + self.name = name + self.integrality = integrality + + domain = list(domain) + if np.isfinite(domain[0]) and not inclusive[0]: + domain[0] = np.nextafter(domain[0], np.inf) + if np.isfinite(domain[1]) and not inclusive[1]: + domain[1] = np.nextafter(domain[1], -np.inf) + self.domain = domain + + +def _get_fixed_fit_value(kwds, names): + """ + Given names such as `['f0', 'fa', 'fix_a']`, check that there is + at most one non-None value in `kwds` associaed with those names. + Return that value, or None if none of the names occur in `kwds`. + As a side effect, all occurrences of those names in `kwds` are + removed. + """ + vals = [(name, kwds.pop(name)) for name in names if name in kwds] + if len(vals) > 1: + repeated = [name for name, val in vals] + raise ValueError("fit method got multiple keyword arguments to " + "specify the same fixed parameter: " + + ', '.join(repeated)) + return vals[0][1] if vals else None + + +# continuous random variables: implement maybe later +# +# hf --- Hazard Function (PDF / SF) +# chf --- Cumulative hazard function (-log(SF)) +# psf --- Probability sparsity function (reciprocal of the pdf) in +# units of percent-point-function (as a function of q). +# Also, the derivative of the percent-point function. + + +class rv_continuous(rv_generic): + """A generic continuous random variable class meant for subclassing. + + `rv_continuous` is a base class to construct specific distribution classes + and instances for continuous random variables. It cannot be used + directly as a distribution. + + Parameters + ---------- + momtype : int, optional + The type of generic moment calculation to use: 0 for pdf, 1 (default) + for ppf. + a : float, optional + Lower bound of the support of the distribution, default is minus + infinity. + b : float, optional + Upper bound of the support of the distribution, default is plus + infinity. + xtol : float, optional + The tolerance for fixed point calculation for generic ppf. + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example ``"m, n"`` for a + distribution that takes two integers as the two shape arguments for all + its methods. If not provided, shape parameters will be inferred from + the signature of the private methods, ``_pdf`` and ``_cdf`` of the + instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pdf + logpdf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + fit + fit_loc_scale + nnlf + support + + Notes + ----- + Public methods of an instance of a distribution class (e.g., ``pdf``, + ``cdf``) check their arguments and pass valid arguments to private, + computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid + if it is within the support of the distribution. + Whether a shape parameter is valid is decided by an ``_argcheck`` method + (which defaults to checking that its arguments are strictly positive.) + + **Subclassing** + + New random variables can be defined by subclassing the `rv_continuous` class + and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized + to location 0 and scale 1). + + If positive argument checking is not correct for your RV + then you will also need to re-define the ``_argcheck`` method. + + For most of the scipy.stats distributions, the support interval doesn't + depend on the shape parameters. ``x`` being in the support interval is + equivalent to ``self.a <= x <= self.b``. If either of the endpoints of + the support do depend on the shape parameters, then + i) the distribution must implement the ``_get_support`` method; and + ii) those dependent endpoints must be omitted from the distribution's + call to the ``rv_continuous`` initializer. + + Correct, but potentially slow defaults exist for the remaining + methods but for speed and/or accuracy you can over-ride:: + + _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf + + The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``, + applied to a uniform random variate. In order to generate random variates + efficiently, either the default ``_ppf`` needs to be overwritten (e.g. + if the inverse cdf can expressed in an explicit form) or a sampling + method needs to be implemented in a custom ``_rvs`` method. + + If possible, you should override ``_isf``, ``_sf`` or ``_logsf``. + The main reason would be to improve numerical accuracy: for example, + the survival function ``_sf`` is computed as ``1 - _cdf`` which can + result in loss of precision if ``_cdf(x)`` is close to one. + + **Methods that can be overwritten by subclasses** + :: + + _rvs + _pdf + _cdf + _sf + _ppf + _isf + _stats + _munp + _entropy + _argcheck + _get_support + + There are additional (internal and private) generic methods that can + be useful for cross-checking and for debugging, but might work in all + cases when directly called. + + A note on ``shapes``: subclasses need not specify them explicitly. In this + case, `shapes` will be automatically deduced from the signatures of the + overridden methods (`pdf`, `cdf` etc). + If, for some reason, you prefer to avoid relying on introspection, you can + specify ``shapes`` explicitly as an argument to the instance constructor. + + + **Frozen Distributions** + + Normally, you must provide shape parameters (and, optionally, location and + scale parameters to each call of a method of a distribution. + + Alternatively, the object may be called (as a function) to fix the shape, + location, and scale parameters returning a "frozen" continuous RV object: + + rv = generic(, loc=0, scale=1) + `rv_frozen` object with the same methods but holding the given shape, + location, and scale fixed + + **Statistics** + + Statistics are computed using numerical integration by default. + For speed you can redefine this using ``_stats``: + + - take shape parameters and return mu, mu2, g1, g2 + - If you can't compute one of these, return it as None + - Can also be defined with a keyword argument ``moments``, which is a + string composed of "m", "v", "s", and/or "k". + Only the components appearing in string should be computed and + returned in the order "m", "v", "s", or "k" with missing values + returned as None. + + Alternatively, you can override ``_munp``, which takes ``n`` and shape + parameters and returns the n-th non-central moment of the distribution. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + To create a new Gaussian distribution, we would do the following: + + >>> from scipy.stats import rv_continuous + >>> class gaussian_gen(rv_continuous): + ... "Gaussian distribution" + ... def _pdf(self, x): + ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) + >>> gaussian = gaussian_gen(name='gaussian') + + ``scipy.stats`` distributions are *instances*, so here we subclass + `rv_continuous` and create an instance. With this, we now have + a fully functional distribution with all relevant methods automagically + generated by the framework. + + Note that above we defined a standard normal distribution, with zero mean + and unit variance. Shifting and scaling of the distribution can be done + by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` + essentially computes ``y = (x - loc) / scale`` and + ``gaussian._pdf(y) / scale``. + + """ + + def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, + badvalue=None, name=None, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # save the ctor parameters, cf generic freeze + self._ctor_param = dict( + momtype=momtype, a=a, b=b, xtol=xtol, + badvalue=badvalue, name=name, longname=longname, + shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + if name is None: + name = 'Distribution' + self.badvalue = badvalue + self.name = name + self.a = a + self.b = b + if a is None: + self.a = -inf + if b is None: + self.b = inf + self.xtol = xtol + self.moment_type = momtype + self.shapes = shapes + + self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], + locscale_in='loc=0, scale=1', + locscale_out='loc, scale') + self._attach_methods() + + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict, + discrete='continuous') + else: + dct = dict(distcont) + self._construct_doc(docdict, dct.get(self.name)) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in __setstate__ + # _random_state attribute is taken care of by rv_generic + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "vecentropy", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """ + Attaches dynamically created methods to the rv_continuous instance. + """ + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction + self._ppfvec = vectorize(self._ppf_single, otypes='d') + self._ppfvec.nin = self.numargs + 1 + self.vecentropy = vectorize(self._entropy, otypes='d') + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self._cdfvec.nin = self.numargs + 1 + + if self.moment_type == 0: + self.generic_moment = vectorize(self._mom0_sc, otypes='d') + else: + self.generic_moment = vectorize(self._mom1_sc, otypes='d') + # Because of the *args argument of _mom0_sc, vectorize cannot count the + # number of arguments correctly. + self.generic_moment.nin = self.numargs + 1 + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['xtol'] = self.xtol + dct['badvalue'] = self.badvalue + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _ppf_to_solve(self, x, q, *args): + return self.cdf(*(x, )+args)-q + + def _ppf_single(self, q, *args): + factor = 10. + left, right = self._get_support(*args) + + if np.isinf(left): + left = min(-factor, right) + while self._ppf_to_solve(left, q, *args) > 0.: + left, right = left * factor, left + # left is now such that cdf(left) <= q + # if right has changed, then cdf(right) > q + + if np.isinf(right): + right = max(factor, left) + while self._ppf_to_solve(right, q, *args) < 0.: + left, right = right, right * factor + # right is now such that cdf(right) >= q + + return optimize.brentq(self._ppf_to_solve, + left, right, args=(q,)+args, xtol=self.xtol) + + # moment from definition + def _mom_integ0(self, x, m, *args): + return x**m * self.pdf(x, *args) + + def _mom0_sc(self, m, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._mom_integ0, _a, _b, + args=(m,)+args)[0] + + # moment calculated using ppf + def _mom_integ1(self, q, m, *args): + return (self.ppf(q, *args))**m + + def _mom1_sc(self, m, *args): + return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] + + def _pdf(self, x, *args): + return _derivative(self._cdf, x, dx=1e-5, args=args, order=5) + + # Could also define any of these + def _logpdf(self, x, *args): + p = self._pdf(x, *args) + with np.errstate(divide='ignore'): + return log(p) + + def _logpxf(self, x, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpdf(x, *args) + + def _cdf_single(self, x, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._pdf, _a, x, args=args)[0] + + def _cdf(self, x, *args): + return self._cdfvec(x, *args) + + # generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined + # in rv_generic + + def pdf(self, x, *args, **kwds): + """Probability density function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + pdf : ndarray + Probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._pdf(*goodargs) / scale) + if output.ndim == 0: + return output[()] + return output + + def logpdf(self, x, *args, **kwds): + """Log of the probability density function at x of the given RV. + + This uses a more numerically accurate calculation if available. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logpdf : array_like + Log of the probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._logpdf(*goodargs) - log(scale)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, x, *args, **kwds): + """ + Cumulative distribution function of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `x` + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= np.asarray(_b)) & cond0 + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._cdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, x, *args, **kwds): + """Log of the cumulative distribution function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= _b) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, x, *args, **kwds): + """Survival function (1 - `cdf`) at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + sf : array_like + Survival function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._sf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, x, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as (1 - `cdf`), + evaluated at `x`. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `x`. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + lower tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 0) + cond3 = cond0 & (q == 1) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._ppf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + upper tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : ndarray or scalar + Quantile corresponding to the upper tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 1) + cond3 = cond0 & (q == 0) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._isf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-2] + scale = theta[-1] + args = tuple(theta[:-2]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _nnlf_and_penalty(self, x, args): + """ + Compute the penalized negative log-likelihood for the + "standardized" data (i.e. already shifted by loc and + scaled by scale) for the shape parameters in `args`. + + `x` can be a 1D numpy array or a CensoredData instance. + """ + if isinstance(x, CensoredData): + # Filter out the data that is not in the support. + xs = x._supported(*self._get_support(*args)) + n_bad = len(x) - len(xs) + i1, i2 = xs._interval.T + terms = [ + # logpdf of the noncensored data. + self._logpdf(xs._uncensored, *args), + # logcdf of the left-censored data. + self._logcdf(xs._left, *args), + # logsf of the right-censored data. + self._logsf(xs._right, *args), + # log of probability of the interval-censored data. + np.log(self._delta_cdf(i1, i2, *args)), + ] + else: + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + terms = [self._logpdf(x, *args)] + + totals, bad_counts = zip(*[_sum_finite(term) for term in terms]) + total = sum(totals) + n_bad += sum(bad_counts) + + return -total + n_bad * _LOGXMAX * 100 + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + if isinstance(x, CensoredData): + x = (x - loc) / scale + n_log_scale = (len(x) - x.num_censored()) * log(scale) + else: + x = (x - loc) / scale + n_log_scale = len(x) * log(scale) + + return self._nnlf_and_penalty(x, args) + n_log_scale + + def _fitstart(self, data, args=None): + """Starting point for fit (shape arguments + loc + scale).""" + if args is None: + args = (1.0,)*self.numargs + loc, scale = self._fit_loc_scale_support(data, *args) + return args + (loc, scale) + + def _reduce_func(self, args, kwds, data=None): + """ + Return the (possibly reduced) function to optimize in order to find MLE + estimates for the .fit method. + """ + # Convert fixed shape parameters to the standard numeric form: e.g. for + # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value + # for `f0`, `fa` or 'fix_a'. The following converts the latter two + # into the first (numeric) form. + shapes = [] + if self.shapes: + shapes = self.shapes.replace(',', ' ').split() + for j, s in enumerate(shapes): + key = 'f' + str(j) + names = [key, 'f' + s, 'fix_' + s] + val = _get_fixed_fit_value(kwds, names) + if val is not None: + kwds[key] = val + + args = list(args) + Nargs = len(args) + fixedn = [] + names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] + x0 = [] + for n, key in enumerate(names): + if key in kwds: + fixedn.append(n) + args[n] = kwds.pop(key) + else: + x0.append(args[n]) + + methods = {"mle", "mm"} + method = kwds.pop('method', "mle").lower() + if method == "mm": + n_params = len(shapes) + 2 - len(fixedn) + exponents = (np.arange(1, n_params+1))[:, np.newaxis] + data_moments = np.sum(data[None, :]**exponents/len(data), axis=1) + + def objective(theta, x): + return self._moment_error(theta, x, data_moments) + + elif method == "mle": + objective = self._penalized_nnlf + else: + raise ValueError(f"Method '{method}' not available; " + f"must be one of {methods}") + + if len(fixedn) == 0: + func = objective + restore = None + else: + if len(fixedn) == Nargs: + raise ValueError( + "All parameters fixed. There is nothing to optimize.") + + def restore(args, theta): + # Replace with theta for all numbers not in fixedn + # This allows the non-fixed values to vary, but + # we still call self.nnlf with all parameters. + i = 0 + for n in range(Nargs): + if n not in fixedn: + args[n] = theta[i] + i += 1 + return args + + def func(theta, x): + newtheta = restore(args[:], theta) + return objective(newtheta, x) + + return x0, func, restore, args + + def _moment_error(self, theta, x, data_moments): + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + + dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale) + for i in range(len(data_moments))]) + if np.any(np.isnan(dist_moments)): + raise ValueError("Method of moments encountered a non-finite " + "distribution moment and cannot continue. " + "Consider trying method='MLE'.") + + return (((data_moments - dist_moments) / + np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + def fit(self, data, *args, **kwds): + r""" + Return estimates of shape (if applicable), location, and scale + parameters from data. The default estimation method is Maximum + Likelihood Estimation (MLE), but Method of Moments (MM) + is also available. + + Starting estimates for the fit are given by input arguments; + for any arguments not provided with starting estimates, + ``self._fitstart(data)`` is called to generate such. + + One can hold some parameters fixed to specific values by passing in + keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) + and ``floc`` and ``fscale`` (for location and scale parameters, + respectively). + + Parameters + ---------- + data : array_like or `CensoredData` instance + Data to use in estimating the distribution parameters. + arg1, arg2, arg3,... : floats, optional + Starting value(s) for any shape-characterizing arguments (those not + provided will be determined by a call to ``_fitstart(data)``). + No default value. + **kwds : floats, optional + - `loc`: initial guess of the distribution's location parameter. + - `scale`: initial guess of the distribution's scale parameter. + + Special keyword arguments are recognized as holding certain + parameters fixed: + + - f0...fn : hold respective shape parameters fixed. + Alternatively, shape parameters to fix can be specified by name. + For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a`` + are equivalent to ``f0``, and ``fb`` and ``fix_b`` are + equivalent to ``f1``. + + - floc : hold location parameter fixed to specified value. + + - fscale : hold scale parameter fixed to specified value. + + - optimizer : The optimizer to use. The optimizer must take + ``func`` and starting position as the first two arguments, + plus ``args`` (for extra arguments to pass to the + function to be optimized) and ``disp``. + The ``fit`` method calls the optimizer with ``disp=0`` to suppress output. + The optimizer must return the estimated parameters. + + - method : The method to use. The default is "MLE" (Maximum + Likelihood Estimate); "MM" (Method of Moments) + is also available. + + Raises + ------ + TypeError, ValueError + If an input is invalid + `~scipy.stats.FitError` + If fitting fails or the fit produced would be invalid + + Returns + ------- + parameter_tuple : tuple of floats + Estimates for any shape parameters (if applicable), followed by + those for location and scale. For most random variables, shape + statistics will be returned, but there are exceptions (e.g. + ``norm``). + + Notes + ----- + With ``method="MLE"`` (default), the fit is computed by minimizing + the negative log-likelihood function. A large, finite penalty + (rather than infinite negative log-likelihood) is applied for + observations beyond the support of the distribution. + + With ``method="MM"``, the fit is computed by minimizing the L2 norm + of the relative errors between the first *k* raw (about zero) data + moments and the corresponding distribution moments, where *k* is the + number of non-fixed parameters. + More precisely, the objective function is:: + + (((data_moments - dist_moments) + / np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + where the constant ``1e-8`` avoids division by zero in case of + vanishing data moments. Typically, this error norm can be reduced to + zero. + Note that the standard method of moments can produce parameters for + which some data are outside the support of the fitted distribution; + this implementation does nothing to prevent this. + + For either method, + the returned answer is not guaranteed to be globally optimal; it + may only be locally optimal, or the optimization may fail altogether. + If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``, + the `fit` method will raise a ``RuntimeError``. + + When passing a ``CensoredData`` instance to ``data``, the log-likelihood + function is defined as: + + .. math:: + + l(\pmb{\theta}; k) & = \sum + \log(f(k_u; \pmb{\theta})) + + \sum + \log(F(k_l; \pmb{\theta})) \\ + & + \sum + \log(1 - F(k_r; \pmb{\theta})) \\ + & + \sum + \log(F(k_{\text{high}, i}; \pmb{\theta}) + - F(k_{\text{low}, i}; \pmb{\theta})) + + where :math:`f` and :math:`F` are the pdf and cdf, respectively, of the + function being fitted, :math:`\pmb{\theta}` is the parameter vector, + :math:`u` are the indices of uncensored observations, + :math:`l` are the indices of left-censored observations, + :math:`r` are the indices of right-censored observations, + subscripts "low"/"high" denote endpoints of interval-censored observations, and + :math:`i` are the indices of interval-censored observations. + + Examples + -------- + + Generate some data to fit: draw random variates from the `beta` + distribution + + >>> import numpy as np + >>> from scipy.stats import beta + >>> a, b = 1., 2. + >>> rng = np.random.default_rng(172786373191770012695001057628748821561) + >>> x = beta.rvs(a, b, size=1000, random_state=rng) + + Now we can fit all four parameters (``a``, ``b``, ``loc`` and + ``scale``): + + >>> a1, b1, loc1, scale1 = beta.fit(x) + >>> a1, b1, loc1, scale1 + (1.0198945204435628, 1.9484708982737828, 4.372241314917588e-05, 0.9979078845964814) + + The fit can be done also using a custom optimizer: + + >>> from scipy.optimize import minimize + >>> def custom_optimizer(func, x0, args=(), disp=0): + ... res = minimize(func, x0, args, method="slsqp", options={"disp": disp}) + ... if res.success: + ... return res.x + ... raise RuntimeError('optimization routine failed') + >>> a1, b1, loc1, scale1 = beta.fit(x, method="MLE", optimizer=custom_optimizer) + >>> a1, b1, loc1, scale1 + (1.0198821087258905, 1.948484145914738, 4.3705304486881485e-05, 0.9979104663953395) + + We can also use some prior knowledge about the dataset: let's keep + ``loc`` and ``scale`` fixed: + + >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) + >>> loc1, scale1 + (0, 1) + + We can also keep shape parameters fixed by using ``f``-keywords. To + keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, + equivalently, ``fa=1``: + + >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) + >>> a1 + 1 + + Not all distributions return estimates for the shape parameters. + ``norm`` for example just returns estimates for location and scale: + + >>> from scipy.stats import norm + >>> x = norm.rvs(a, b, size=1000, random_state=123) + >>> loc1, scale1 = norm.fit(x) + >>> loc1, scale1 + (0.92087172783841631, 2.0015750750324668) + """ # noqa: E501 + method = kwds.get('method', "mle").lower() + + censored = isinstance(data, CensoredData) + if censored: + if method != 'mle': + raise ValueError('For censored data, the method must' + ' be "MLE".') + if data.num_censored() == 0: + # There are no censored values in data, so replace the + # CensoredData instance with a regular array. + data = data._uncensored + censored = False + + Narg = len(args) + if Narg > self.numargs: + raise TypeError("Too many input arguments.") + + # Check the finiteness of data only if data is not an instance of + # CensoredData. The arrays in a CensoredData instance have already + # been validated. + if not censored: + # Note: `ravel()` is called for backwards compatibility. + data = np.asarray(data).ravel() + if not np.isfinite(data).all(): + raise ValueError("The data contains non-finite values.") + + start = [None]*2 + if (Narg < self.numargs) or not ('loc' in kwds and + 'scale' in kwds): + # get distribution specific starting locations + start = self._fitstart(data) + args += start[Narg:-2] + loc = kwds.pop('loc', start[-2]) + scale = kwds.pop('scale', start[-1]) + args += (loc, scale) + x0, func, restore, args = self._reduce_func(args, kwds, data=data) + optimizer = kwds.pop('optimizer', optimize.fmin) + # convert string to function in scipy.optimize + optimizer = _fit_determine_optimizer(optimizer) + # by now kwds must be empty, since everybody took what they needed + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + # In some cases, method of moments can be done with fsolve/root + # instead of an optimizer, but sometimes no solution exists, + # especially when the user fixes parameters. Minimizing the sum + # of squares of the error generalizes to these cases. + vals = optimizer(func, x0, args=(data,), disp=0) + obj = func(vals, data) + + if restore is not None: + vals = restore(args, vals) + vals = tuple(vals) + + loc, scale, shapes = self._unpack_loc_scale(vals) + if not (np.all(self._argcheck(*shapes)) and scale > 0): + raise FitError("Optimization converged to parameters that are " + "outside the range allowed by the distribution.") + + if method == 'mm': + if not np.isfinite(obj): + raise FitError("Optimization failed: either a data moment " + "or fitted distribution moment is " + "non-finite.") + + return vals + + def _fit_loc_scale_support(self, data, *args): + """Estimate loc and scale parameters from data accounting for support. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + if isinstance(data, CensoredData): + # For this estimate, "uncensor" the data by taking the + # given endpoints as the data for the left- or right-censored + # data, and the mean for the interval-censored data. + data = data._uncensor() + else: + data = np.asarray(data) + + # Estimate location and scale according to the method of moments. + loc_hat, scale_hat = self.fit_loc_scale(data, *args) + + # Compute the support according to the shape parameters. + self._argcheck(*args) + _a, _b = self._get_support(*args) + a, b = _a, _b + support_width = b - a + + # If the support is empty then return the moment-based estimates. + if support_width <= 0: + return loc_hat, scale_hat + + # Compute the proposed support according to the loc and scale + # estimates. + a_hat = loc_hat + a * scale_hat + b_hat = loc_hat + b * scale_hat + + # Use the moment-based estimates if they are compatible with the data. + data_a = np.min(data) + data_b = np.max(data) + if a_hat < data_a and data_b < b_hat: + return loc_hat, scale_hat + + # Otherwise find other estimates that are compatible with the data. + data_width = data_b - data_a + rel_margin = 0.1 + margin = data_width * rel_margin + + # For a finite interval, both the location and scale + # should have interesting values. + if support_width < np.inf: + loc_hat = (data_a - a) - margin + scale_hat = (data_width + 2 * margin) / support_width + return loc_hat, scale_hat + + # For a one-sided interval, use only an interesting location parameter. + if a > -np.inf: + return (data_a - a) - margin, 1 + elif b < np.inf: + return (data_b - b) + margin, 1 + else: + raise RuntimeError + + def fit_loc_scale(self, data, *args): + """ + Estimate loc and scale parameters from data using 1st and 2nd moments. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + mu, mu2 = self.stats(*args, **{'moments': 'mv'}) + tmp = asarray(data) + muhat = tmp.mean() + mu2hat = tmp.var() + Shat = sqrt(mu2hat / mu2) + with np.errstate(invalid='ignore'): + Lhat = muhat - Shat*mu + if not np.isfinite(Lhat): + Lhat = 0 + if not (np.isfinite(Shat) and (0 < Shat)): + Shat = 1 + return Lhat, Shat + + def _entropy(self, *args): + def integ(x): + val = self._pdf(x, *args) + return entr(val) + + # upper limit is often inf, so suppress warnings when integrating + _a, _b = self._get_support(*args) + with np.errstate(over='ignore'): + h = integrate.quad(integ, _a, _b)[0] + + if not np.isnan(h): + return h + else: + # try with different limits if integration problems + low, upp = self.ppf([1e-10, 1. - 1e-10], *args) + if np.isinf(_b): + upper = upp + else: + upper = _b + if np.isinf(_a): + lower = low + else: + lower = _a + return integrate.quad(integ, lower, upper)[0] + + def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, + conditional=False, **kwds): + """Calculate expected value of a function with respect to the + distribution by numerical integration. + + The expected value of a function ``f(x)`` with respect to a + distribution ``dist`` is defined as:: + + ub + E[f(x)] = Integral(f(x) * dist.pdf(x)), + lb + + where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)`` + distribution. If the bounds ``lb`` and ``ub`` correspond to the + support of the distribution, e.g. ``[-inf, inf]`` in the default + case, then the integral is the unrestricted expectation of ``f(x)``. + Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0`` + outside a finite interval in which case the expectation is + calculated within the finite range ``[lb, ub]``. + + Parameters + ---------- + func : callable, optional + Function for which integral is calculated. Takes only one argument. + The default is the identity mapping f(x) = x. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter (default=0). + scale : float, optional + Scale parameter (default=1). + lb, ub : scalar, optional + Lower and upper bound for integration. Default is set to the + support of the distribution. + conditional : bool, optional + If True, the integral is corrected by the conditional probability + of the integration interval. The return value is the expectation + of the function, conditional on being in the given interval. + Default is False. + + Additional keyword arguments are passed to the integration routine. + + Returns + ------- + expect : float + The calculated expected value. + + Notes + ----- + The integration behavior of this function is inherited from + `scipy.integrate.quad`. Neither this function nor + `scipy.integrate.quad` can verify whether the integral exists or is + finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and + ``cauchy(0).expect()`` returns ``0.0``. + + Likewise, the accuracy of results is not verified by the function. + `scipy.integrate.quad` is typically reliable for integrals that are + numerically favorable, but it is not guaranteed to converge + to a correct value for all possible intervals and integrands. This + function is provided for convenience; for critical applications, + check results against other integration methods. + + The function is not vectorized. + + Examples + -------- + + To understand the effect of the bounds of integration consider + + >>> from scipy.stats import expon + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0) + 0.6321205588285578 + + This is close to + + >>> expon(1).cdf(2.0) - expon(1).cdf(0.0) + 0.6321205588285577 + + If ``conditional=True`` + + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True) + 1.0000000000000002 + + The slight deviation from 1 is due to numerical integration. + + The integrand can be treated as a complex-valued function + by passing ``complex_func=True`` to `scipy.integrate.quad` . + + >>> import numpy as np + >>> from scipy.stats import vonmises + >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), + ... complex_func=True) + >>> res + (-0.18576377217422957+0.40590124735052263j) + + >>> np.angle(res) # location of the (circular) distribution + 2.0 + + """ + lockwds = {'loc': loc, + 'scale': scale} + self._argcheck(*args) + _a, _b = self._get_support(*args) + if func is None: + def fun(x, *args): + return x * self.pdf(x, *args, **lockwds) + else: + def fun(x, *args): + return func(x) * self.pdf(x, *args, **lockwds) + if lb is None: + lb = loc + _a * scale + if ub is None: + ub = loc + _b * scale + + cdf_bounds = self.cdf([lb, ub], *args, **lockwds) + invfac = cdf_bounds[1] - cdf_bounds[0] + + kwds['args'] = args + + # split interval to help integrator w/ infinite support; see gh-8928 + alpha = 0.05 # split body from tails at probability mass `alpha` + inner_bounds = np.array([alpha, 1-alpha]) + cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds + c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale + + # Do not silence warnings from integration. + lbc = integrate.quad(fun, lb, c, **kwds)[0] + cd = integrate.quad(fun, c, d, **kwds)[0] + dub = integrate.quad(fun, d, ub, **kwds)[0] + vals = (lbc + cd + dub) + + if conditional: + vals /= invfac + return np.array(vals)[()] # make it a numpy scalar like other methods + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False)) + scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False)) + param_info = shape_info + [loc_info, scale_info] + return param_info + + # For now, _delta_cdf is a private method. + def _delta_cdf(self, x1, x2, *args, loc=0, scale=1): + """ + Compute CDF(x2) - CDF(x1). + + Where x1 is greater than the median, compute SF(x1) - SF(x2), + otherwise compute CDF(x2) - CDF(x1). + + This function is only useful if `dist.sf(x, ...)` has an implementation + that is numerically more accurate than `1 - dist.cdf(x, ...)`. + """ + cdf1 = self.cdf(x1, *args, loc=loc, scale=scale) + # Possible optimizations (needs investigation-these might not be + # better): + # * Use _lazywhere instead of np.where + # * Instead of cdf1 > 0.5, compare x1 to the median. + result = np.where(cdf1 > 0.5, + (self.sf(x1, *args, loc=loc, scale=scale) + - self.sf(x2, *args, loc=loc, scale=scale)), + self.cdf(x2, *args, loc=loc, scale=scale) - cdf1) + if result.ndim == 0: + result = result[()] + return result + + +# Helpers for the discrete distributions +def _drv2_moment(self, n, *args): + """Non-central moment of discrete distribution.""" + def fun(x): + return np.power(x, n) * self._pmf(x, *args) + + _a, _b = self._get_support(*args) + return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc) + + +def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm + _a, _b = self._get_support(*args) + b = _b + a = _a + if isinf(b): # Be sure ending point is > q + b = int(max(100*q, 10)) + while 1: + if b >= _b: + qb = 1.0 + break + qb = self._cdf(b, *args) + if (qb < q): + b += 10 + else: + break + else: + qb = 1.0 + if isinf(a): # be sure starting point < q + a = int(min(-100*q, -10)) + while 1: + if a <= _a: + qb = 0.0 + break + qa = self._cdf(a, *args) + if (qa > q): + a -= 10 + else: + break + else: + qa = self._cdf(a, *args) + + while 1: + if (qa == q): + return a + if (qb == q): + return b + if b <= a+1: + if qa > q: + return a + else: + return b + c = int((a+b)/2.0) + qc = self._cdf(c, *args) + if (qc < q): + if a != c: + a = c + else: + raise RuntimeError('updating stopped, endless loop') + qa = qc + elif (qc > q): + if b != c: + b = c + else: + raise RuntimeError('updating stopped, endless loop') + qb = qc + else: + return c + + +# Must over-ride one of _pmf or _cdf or pass in +# x_k, p(x_k) lists in initialization + + +class rv_discrete(rv_generic): + """A generic discrete random variable class meant for subclassing. + + `rv_discrete` is a base class to construct specific distribution classes + and instances for discrete random variables. It can also be used + to construct an arbitrary distribution defined by a list of support + points and corresponding probabilities. + + Parameters + ---------- + a : float, optional + Lower bound of the support of the distribution, default: 0 + b : float, optional + Upper bound of the support of the distribution, default: plus infinity + moment_tol : float, optional + The tolerance for the generic calculation of moments. + values : tuple of two array_like, optional + ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero + probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk`` + and ``pk`` must have the same shape, and ``xk`` must be unique. + inc : integer, optional + Increment for the support of the distribution. + Default is 1. (other values have not been tested) + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example "m, n" for a distribution + that takes two integers as the two shape arguments for all its methods + If not provided, shape parameters will be inferred from + the signatures of the private methods, ``_pmf`` and ``_cdf`` of + the instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pmf + logpmf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + support + + Notes + ----- + This class is similar to `rv_continuous`. Whether a shape parameter is + valid is decided by an ``_argcheck`` method (which defaults to checking + that its arguments are strictly positive.) + The main differences are as follows. + + - The support of the distribution is a set of integers. + - Instead of the probability density function, ``pdf`` (and the + corresponding private ``_pdf``), this class defines the + *probability mass function*, `pmf` (and the corresponding + private ``_pmf``.) + - There is no ``scale`` parameter. + - The default implementations of methods (e.g. ``_cdf``) are not designed + for distributions with support that is unbounded below (i.e. + ``a=-np.inf``), so they must be overridden. + + To create a new discrete distribution, we would do the following: + + >>> from scipy.stats import rv_discrete + >>> class poisson_gen(rv_discrete): + ... "Poisson distribution" + ... def _pmf(self, k, mu): + ... return exp(-mu) * mu**k / factorial(k) + + and create an instance:: + + >>> poisson = poisson_gen(name="poisson") + + Note that above we defined the Poisson distribution in the standard form. + Shifting the distribution can be done by providing the ``loc`` parameter + to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` + delegates the work to ``poisson._pmf(x-loc, mu)``. + + **Discrete distributions from a list of probabilities** + + Alternatively, you can construct an arbitrary discrete rv defined + on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the + ``values`` keyword argument to the `rv_discrete` constructor. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + Custom made discrete distribution: + + >>> import numpy as np + >>> from scipy import stats + >>> xk = np.arange(7) + >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) + >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) + >>> + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') + >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) + >>> plt.show() + + Random number generation: + + >>> R = custm.rvs(size=100) + + """ + def __new__(cls, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + if values is not None: + # dispatch to a subclass + return super().__new__(rv_sample) + else: + # business as usual + return super().__new__(cls) + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.a = a + self.b = b + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + + if values is not None: + raise ValueError("rv_discrete.__init__(..., values != None, ...)") + + self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + self._attach_methods() + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + # these methods will be remade in __setstate__ + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_discrete instance.""" + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self.vecentropy = vectorize(self._entropy) + + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction needs to be after we know numargs + # correct nin for generic moment vectorization + _vec_generic_moment = vectorize(_drv2_moment, otypes='d') + _vec_generic_moment.nin = self.numargs + 2 + self.generic_moment = types.MethodType(_vec_generic_moment, self) + + # correct nin for ppf vectorization + _vppf = vectorize(_drv2_ppfsingle, otypes='d') + _vppf.nin = self.numargs + 2 + self._ppfvec = types.MethodType(_vppf, self) + + # now that self.numargs is defined, we can adjust nin + self._cdfvec.nin = self.numargs + 1 + + def _construct_docstrings(self, name, longname): + if name is None: + name = 'Distribution' + self.name = name + + # generate docstring for subclass instances + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict_discrete, + discrete='discrete') + else: + dct = dict(distdiscrete) + self._construct_doc(docdict_discrete, dct.get(self.name)) + + # discrete RV do not have the scale parameter, remove it + self.__doc__ = self.__doc__.replace( + '\n scale : array_like, ' + 'optional\n scale parameter (default=1)', '') + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['badvalue'] = self.badvalue + dct['moment_tol'] = self.moment_tol + dct['inc'] = self.inc + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _nonzero(self, k, *args): + return floor(k) == k + + def _pmf(self, k, *args): + return self._cdf(k, *args) - self._cdf(k-1, *args) + + def _logpmf(self, k, *args): + return log(self._pmf(k, *args)) + + def _logpxf(self, k, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpmf(k, *args) + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-1] + scale = 1 + args = tuple(theta[:-1]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _cdf_single(self, k, *args): + _a, _b = self._get_support(*args) + m = arange(int(_a), k+1) + return np.sum(self._pmf(m, *args), axis=0) + + def _cdf(self, x, *args): + k = floor(x) + return self._cdfvec(k, *args) + + # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic + + def rvs(self, *args, **kwargs): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + size : int or tuple of ints, optional + Defining number of random variates (Default is 1). Note that `size` + has to be given as keyword, not as positional argument. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + kwargs['discrete'] = True + return super().rvs(*args, **kwargs) + + def pmf(self, k, *args, **kwds): + """Probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + pmf : array_like + Probability mass function evaluated at k + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logpmf(self, k, *args, **kwds): + """Log of the probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter. Default is 0. + + Returns + ------- + logpmf : array_like + Log of the probability mass function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logpmf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, k, *args, **kwds): + """Cumulative distribution function of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond3 = np.isneginf(k) + cond = cond0 & cond1 & np.isfinite(k) + + output = zeros(shape(cond), 'd') + place(output, cond2*(cond0 == cond0), 1.0) + place(output, cond3*(cond0 == cond0), 0.0) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, k, *args, **kwds): + """Log of the cumulative distribution function at k of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2*(cond0 == cond0), 0.0) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, k, *args, **kwds): + """Survival function (1 - `cdf`) at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + sf : array_like + Survival function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = ((k < _a) | np.isneginf(k)) & cond0 + cond = cond0 & cond1 & np.isfinite(k) + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, k, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as 1 - `cdf`, + evaluated at `k`. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k < _a) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Lower tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : array_like + Quantile corresponding to the lower tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), _a-1 + loc) + place(output, cond2, _b + loc) + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._ppf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Upper tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : ndarray or scalar + Quantile corresponding to the upper tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond3 = (q == 0) & cond0 + cond = cond0 & cond1 + + # same problem as with ppf; copied from ppf and changed + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + lower_bound = _a - 1 + loc + upper_bound = _b + loc + place(output, cond2*(cond == cond), lower_bound) + place(output, cond3*(cond == cond), upper_bound) + + # call place only if at least 1 valid argument + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + # PB same as ticket 766 + place(output, cond, self._isf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def _entropy(self, *args): + if hasattr(self, 'pk'): + return stats.entropy(self.pk) + else: + _a, _b = self._get_support(*args) + return _expect(lambda x: entr(self.pmf(x, *args)), + _a, _b, self.ppf(0.5, *args), self.inc) + + def expect(self, func=None, args=(), loc=0, lb=None, ub=None, + conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): + """ + Calculate expected value of a function with respect to the distribution + for discrete distribution by numerical summation. + + Parameters + ---------- + func : callable, optional + Function for which the expectation value is calculated. + Takes only one argument. + The default is the identity mapping f(k) = k. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter. + Default is 0. + lb, ub : int, optional + Lower and upper bound for the summation, default is set to the + support of the distribution, inclusive (``lb <= k <= ub``). + conditional : bool, optional + If true then the expectation is corrected by the conditional + probability of the summation interval. The return value is the + expectation of the function, `func`, conditional on being in + the given interval (k such that ``lb <= k <= ub``). + Default is False. + maxcount : int, optional + Maximal number of terms to evaluate (to avoid an endless loop for + an infinite sum). Default is 1000. + tolerance : float, optional + Absolute tolerance for the summation. Default is 1e-10. + chunksize : int, optional + Iterate over the support of a distributions in chunks of this size. + Default is 32. + + Returns + ------- + expect : float + Expected value. + + Notes + ----- + For heavy-tailed distributions, the expected value may or + may not exist, + depending on the function, `func`. If it does exist, but the + sum converges + slowly, the accuracy of the result may be rather low. For instance, for + ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. + increasing `maxcount` and/or `chunksize` may improve the result, + but may also make zipf very slow. + + The function is not vectorized. + + """ + if func is None: + def fun(x): + # loc and args from outer scope + return (x+loc)*self._pmf(x, *args) + else: + def fun(x): + # loc and args from outer scope + return func(x+loc)*self._pmf(x, *args) + # used pmf because _pmf does not check support in randint and there + # might be problems(?) with correct self.a, self.b at this stage maybe + # not anymore, seems to work now with _pmf + + _a, _b = self._get_support(*args) + if lb is None: + lb = _a + else: + lb = lb - loc # convert bound for standardized distribution + if ub is None: + ub = _b + else: + ub = ub - loc # convert bound for standardized distribution + if conditional: + invfac = self.sf(lb-1, *args) - self.sf(ub, *args) + else: + invfac = 1.0 + + if isinstance(self, rv_sample): + res = self._expect(fun, lb, ub) + return res / invfac + + # iterate over the support, starting from the median + x0 = self.ppf(0.5, *args) + res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) + return res / invfac + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False)) + param_info = shape_info + [loc_info] + return param_info + + +def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, + chunksize=32): + """Helper for computing the expectation value of `fun`.""" + # short-circuit if the support size is small enough + if (ub - lb) <= chunksize: + supp = np.arange(lb, ub+1, inc) + vals = fun(supp) + return np.sum(vals) + + # otherwise, iterate starting from x0 + if x0 < lb: + x0 = lb + if x0 > ub: + x0 = ub + + count, tot = 0, 0. + # iterate over [x0, ub] inclusive + for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + return tot + + # iterate over [lb, x0) + for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + break + + return tot + + +def _iter_chunked(x0, x1, chunksize=4, inc=1): + """Iterate from x0 to x1 in chunks of chunksize and steps inc. + + x0 must be finite, x1 need not be. In the latter case, the iterator is + infinite. + Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards + (make sure to set inc < 0.) + + >>> from scipy.stats._distn_infrastructure import _iter_chunked + >>> [x for x in _iter_chunked(2, 5, inc=2)] + [array([2, 4])] + >>> [x for x in _iter_chunked(2, 11, inc=2)] + [array([2, 4, 6, 8]), array([10])] + >>> [x for x in _iter_chunked(2, -5, inc=-2)] + [array([ 2, 0, -2, -4])] + >>> [x for x in _iter_chunked(2, -9, inc=-2)] + [array([ 2, 0, -2, -4]), array([-6, -8])] + + """ + if inc == 0: + raise ValueError('Cannot increment by zero.') + if chunksize <= 0: + raise ValueError('Chunk size must be positive; got %s.' % chunksize) + + s = 1 if inc > 0 else -1 + stepsize = abs(chunksize * inc) + + x = x0 + while (x - x1) * inc < 0: + delta = min(stepsize, abs(x - x1)) + step = delta * s + supp = np.arange(x, x + step, inc) + x += step + yield supp + + +class rv_sample(rv_discrete): + """A 'sample' discrete distribution defined by the support and values. + + The ctor ignores most of the arguments, only needs the `values` argument. + """ + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super(rv_discrete, self).__init__(seed) + + if values is None: + raise ValueError("rv_sample.__init__(..., values=None,...)") + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + self.vecentropy = self._entropy + + xk, pk = values + + if np.shape(xk) != np.shape(pk): + raise ValueError("xk and pk must have the same shape.") + if np.less(pk, 0.0).any(): + raise ValueError("All elements of pk must be non-negative.") + if not np.allclose(np.sum(pk), 1): + raise ValueError("The sum of provided pk is not 1.") + if not len(set(np.ravel(xk))) == np.size(xk): + raise ValueError("xk may not contain duplicate values.") + + indx = np.argsort(np.ravel(xk)) + self.xk = np.take(np.ravel(xk), indx, 0) + self.pk = np.take(np.ravel(pk), indx, 0) + self.a = self.xk[0] + self.b = self.xk[-1] + + self.qvals = np.cumsum(self.pk, axis=0) + + self.shapes = ' ' # bypass inspection + + self._construct_argparser(meths_to_inspect=[self._pmf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + + self._attach_methods() + + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in rv_generic.__setstate__, + # which calls rv_generic._attach_methods + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"] + [dct.pop(attr, None) for attr in attrs] + + return dct + + def _attach_methods(self): + """Attaches dynamically created argparser methods.""" + self._attach_argparser_methods() + + def _get_support(self, *args): + """Return the support of the (unscaled, unshifted) distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support. + """ + return self.a, self.b + + def _pmf(self, x): + return np.select([x == k for k in self.xk], + [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) + + def _cdf(self, x): + xx, xxk = np.broadcast_arrays(x[:, None], self.xk) + indx = np.argmax(xxk > xx, axis=-1) - 1 + return self.qvals[indx] + + def _ppf(self, q): + qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) + indx = argmax(sqq >= qq, axis=-1) + return self.xk[indx] + + def _rvs(self, size=None, random_state=None): + # Need to define it explicitly, otherwise .rvs() with size=None + # fails due to explicit broadcasting in _ppf + U = random_state.uniform(size=size) + if size is None: + U = np.array(U, ndmin=1) + Y = self._ppf(U)[0] + else: + Y = self._ppf(U) + return Y + + def _entropy(self): + return stats.entropy(self.pk) + + def generic_moment(self, n): + n = asarray(n) + return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) + + def _expect(self, fun, lb, ub, *args, **kwds): + # ignore all args, just do a brute force summation + supp = self.xk[(lb <= self.xk) & (self.xk <= ub)] + vals = fun(supp) + return np.sum(vals) + + +def _check_shape(argshape, size): + """ + This is a utility function used by `_rvs()` in the class geninvgauss_gen. + It compares the tuple argshape to the tuple size. + + Parameters + ---------- + argshape : tuple of integers + Shape of the arguments. + size : tuple of integers or integer + Size argument of rvs(). + + Returns + ------- + The function returns two tuples, scalar_shape and bc. + + scalar_shape : tuple + Shape to which the 1-d array of random variates returned by + _rvs_scalar() is converted when it is copied into the + output array of _rvs(). + + bc : tuple of booleans + bc is an tuple the same length as size. bc[j] is True if the data + associated with that index is generated in one call of _rvs_scalar(). + + """ + scalar_shape = [] + bc = [] + for argdim, sizedim in zip_longest(argshape[::-1], size[::-1], + fillvalue=1): + if sizedim > argdim or (argdim == sizedim == 1): + scalar_shape.append(sizedim) + bc.append(True) + else: + bc.append(False) + return tuple(scalar_shape[::-1]), tuple(bc[::-1]) + + +def get_distribution_names(namespace_pairs, rv_base_class): + """Collect names of statistical distributions and their generators. + + Parameters + ---------- + namespace_pairs : sequence + A snapshot of (name, value) pairs in the namespace of a module. + rv_base_class : class + The base class of random variable generator classes in a module. + + Returns + ------- + distn_names : list of strings + Names of the statistical distributions. + distn_gen_names : list of strings + Names of the generators of the statistical distributions. + Note that these are not simply the names of the statistical + distributions, with a _gen suffix added. + + """ + distn_names = [] + distn_gen_names = [] + for name, value in namespace_pairs: + if name.startswith('_'): + continue + if name.endswith('_gen') and issubclass(value, rv_base_class): + distn_gen_names.append(name) + if isinstance(value, rv_base_class): + distn_names.append(name) + return distn_names, distn_gen_names diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_entropy.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a81b05749fda69191eab0876eb4bfba8be8b59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_entropy.py @@ -0,0 +1,423 @@ +""" +Created on Fri Apr 2 09:06:05 2021 + +@author: matth +""" + +from __future__ import annotations +import math +import numpy as np +from scipy import special +from ._axis_nan_policy import _axis_nan_policy_factory, _broadcast_arrays + +__all__ = ['entropy', 'differential_entropy'] + + +@_axis_nan_policy_factory( + lambda x: x, + n_samples=lambda kwgs: ( + 2 if ("qk" in kwgs and kwgs["qk"] is not None) + else 1 + ), + n_outputs=1, result_to_tuple=lambda x: (x,), paired=True, + too_small=-1 # entropy doesn't have too small inputs +) +def entropy(pk: np.typing.ArrayLike, + qk: np.typing.ArrayLike | None = None, + base: float | None = None, + axis: int = 0 + ) -> np.number | np.ndarray: + """ + Calculate the Shannon entropy/relative entropy of given distribution(s). + + If only probabilities `pk` are given, the Shannon entropy is calculated as + ``H = -sum(pk * log(pk))``. + + If `qk` is not None, then compute the relative entropy + ``D = sum(pk * log(pk / qk))``. This quantity is also known + as the Kullback-Leibler divergence. + + This routine will normalize `pk` and `qk` if they don't sum to 1. + + Parameters + ---------- + pk : array_like + Defines the (discrete) distribution. Along each axis-slice of ``pk``, + element ``i`` is the (possibly unnormalized) probability of event + ``i``. + qk : array_like, optional + Sequence against which the relative entropy is computed. Should be in + the same format as `pk`. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + axis : int, optional + The axis along which the entropy is calculated. Default is 0. + + Returns + ------- + S : {float, array_like} + The calculated entropy. + + Notes + ----- + Informally, the Shannon entropy quantifies the expected uncertainty + inherent in the possible outcomes of a discrete random variable. + For example, + if messages consisting of sequences of symbols from a set are to be + encoded and transmitted over a noiseless channel, then the Shannon entropy + ``H(pk)`` gives a tight lower bound for the average number of units of + information needed per symbol if the symbols occur with frequencies + governed by the discrete distribution `pk` [1]_. The choice of base + determines the choice of units; e.g., ``e`` for nats, ``2`` for bits, etc. + + The relative entropy, ``D(pk|qk)``, quantifies the increase in the average + number of units of information needed per symbol if the encoding is + optimized for the probability distribution `qk` instead of the true + distribution `pk`. Informally, the relative entropy quantifies the expected + excess in surprise experienced if one believes the true distribution is + `qk` when it is actually `pk`. + + A related quantity, the cross entropy ``CE(pk, qk)``, satisfies the + equation ``CE(pk, qk) = H(pk) + D(pk|qk)`` and can also be calculated with + the formula ``CE = -sum(pk * log(qk))``. It gives the average + number of units of information needed per symbol if an encoding is + optimized for the probability distribution `qk` when the true distribution + is `pk`. It is not computed directly by `entropy`, but it can be computed + using two calls to the function (see Examples). + + See [2]_ for more information. + + References + ---------- + .. [1] Shannon, C.E. (1948), A Mathematical Theory of Communication. + Bell System Technical Journal, 27: 379-423. + https://doi.org/10.1002/j.1538-7305.1948.tb01338.x + .. [2] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information + Theory (Wiley Series in Telecommunications and Signal Processing). + Wiley-Interscience, USA. + + + Examples + -------- + The outcome of a fair coin is the most uncertain: + + >>> import numpy as np + >>> from scipy.stats import entropy + >>> base = 2 # work in units of bits + >>> pk = np.array([1/2, 1/2]) # fair coin + >>> H = entropy(pk, base=base) + >>> H + 1.0 + >>> H == -np.sum(pk * np.log(pk)) / np.log(base) + True + + The outcome of a biased coin is less uncertain: + + >>> qk = np.array([9/10, 1/10]) # biased coin + >>> entropy(qk, base=base) + 0.46899559358928117 + + The relative entropy between the fair coin and biased coin is calculated + as: + + >>> D = entropy(pk, qk, base=base) + >>> D + 0.7369655941662062 + >>> D == np.sum(pk * np.log(pk/qk)) / np.log(base) + True + + The cross entropy can be calculated as the sum of the entropy and + relative entropy`: + + >>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base) + >>> CE + 1.736965594166206 + >>> CE == -np.sum(pk * np.log(qk)) / np.log(base) + True + + """ + if base is not None and base <= 0: + raise ValueError("`base` must be a positive number or `None`.") + + pk = np.asarray(pk) + with np.errstate(invalid='ignore'): + pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True) + if qk is None: + vec = special.entr(pk) + else: + qk = np.asarray(qk) + pk, qk = _broadcast_arrays((pk, qk), axis=None) # don't ignore any axes + sum_kwargs = dict(axis=axis, keepdims=True) + qk = 1.0*qk / np.sum(qk, **sum_kwargs) # type: ignore[operator, call-overload] + vec = special.rel_entr(pk, qk) + S = np.sum(vec, axis=axis) + if base is not None: + S /= np.log(base) + return S + + +def _differential_entropy_is_too_small(samples, kwargs, axis=-1): + values = samples[0] + n = values.shape[axis] + window_length = kwargs.get("window_length", + math.floor(math.sqrt(n) + 0.5)) + if not 2 <= 2 * window_length < n: + return True + return False + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,), + too_small=_differential_entropy_is_too_small +) +def differential_entropy( + values: np.typing.ArrayLike, + *, + window_length: int | None = None, + base: float | None = None, + axis: int = 0, + method: str = "auto", +) -> np.number | np.ndarray: + r"""Given a sample of a distribution, estimate the differential entropy. + + Several estimation methods are available using the `method` parameter. By + default, a method is selected based the size of the sample. + + Parameters + ---------- + values : sequence + Sample from a continuous distribution. + window_length : int, optional + Window length for computing Vasicek estimate. Must be an integer + between 1 and half of the sample size. If ``None`` (the default), it + uses the heuristic value + + .. math:: + \left \lfloor \sqrt{n} + 0.5 \right \rfloor + + where :math:`n` is the sample size. This heuristic was originally + proposed in [2]_ and has become common in the literature. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + axis : int, optional + The axis along which the differential entropy is calculated. + Default is 0. + method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional + The method used to estimate the differential entropy from the sample. + Default is ``'auto'``. See Notes for more information. + + Returns + ------- + entropy : float + The calculated differential entropy. + + Notes + ----- + This function will converge to the true differential entropy in the limit + + .. math:: + n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0 + + The optimal choice of ``window_length`` for a given sample size depends on + the (unknown) distribution. Typically, the smoother the density of the + distribution, the larger the optimal value of ``window_length`` [1]_. + + The following options are available for the `method` parameter. + + * ``'vasicek'`` uses the estimator presented in [1]_. This is + one of the first and most influential estimators of differential entropy. + * ``'van es'`` uses the bias-corrected estimator presented in [3]_, which + is not only consistent but, under some conditions, asymptotically normal. + * ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown + in simulation to have smaller bias and mean squared error than + the Vasicek estimator. + * ``'correa'`` uses the estimator presented in [5]_ based on local linear + regression. In a simulation study, it had consistently smaller mean + square error than the Vasiceck estimator, but it is more expensive to + compute. + * ``'auto'`` selects the method automatically (default). Currently, + this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'`` + for moderate sample sizes (11-1000), and ``'vasicek'`` for larger + samples, but this behavior is subject to change in future versions. + + All estimators are implemented as described in [6]_. + + References + ---------- + .. [1] Vasicek, O. (1976). A test for normality based on sample entropy. + Journal of the Royal Statistical Society: + Series B (Methodological), 38(1), 54-59. + .. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based + goodness-of-fit test for exponentiality. Communications in + Statistics-Theory and Methods, 28(5), 1183-1202. + .. [3] Van Es, B. (1992). Estimating functionals related to a density by a + class of statistics based on spacings. Scandinavian Journal of + Statistics, 61-72. + .. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures + of sample entropy. Statistics & Probability Letters, 20(3), 225-234. + .. [5] Correa, J. C. (1995). A new estimator of entropy. Communications + in Statistics-Theory and Methods, 24(10), 2439-2449. + .. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods. + Annals of Data Science, 2(2), 231-241. + https://link.springer.com/article/10.1007/s40745-015-0045-9 + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import differential_entropy, norm + + Entropy of a standard normal distribution: + + >>> rng = np.random.default_rng() + >>> values = rng.standard_normal(100) + >>> differential_entropy(values) + 1.3407817436640392 + + Compare with the true entropy: + + >>> float(norm.entropy()) + 1.4189385332046727 + + For several sample sizes between 5 and 1000, compare the accuracy of + the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically, + compare the root mean squared error (over 1000 trials) between the estimate + and the true differential entropy of the distribution. + + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> + >>> + >>> def rmse(res, expected): + ... '''Root mean squared error''' + ... return np.sqrt(np.mean((res - expected)**2)) + >>> + >>> + >>> a, b = np.log10(5), np.log10(1000) + >>> ns = np.round(np.logspace(a, b, 10)).astype(int) + >>> reps = 1000 # number of repetitions for each sample size + >>> expected = stats.expon.entropy() + >>> + >>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []} + >>> for method in method_errors: + ... for n in ns: + ... rvs = stats.expon.rvs(size=(reps, n), random_state=rng) + ... res = stats.differential_entropy(rvs, method=method, axis=-1) + ... error = rmse(res, expected) + ... method_errors[method].append(error) + >>> + >>> for method, errors in method_errors.items(): + ... plt.loglog(ns, errors, label=method) + >>> + >>> plt.legend() + >>> plt.xlabel('sample size') + >>> plt.ylabel('RMSE (1000 trials)') + >>> plt.title('Entropy Estimator Error (Exponential Distribution)') + + """ + values = np.asarray(values) + values = np.moveaxis(values, axis, -1) + n = values.shape[-1] # number of observations + + if window_length is None: + window_length = math.floor(math.sqrt(n) + 0.5) + + if not 2 <= 2 * window_length < n: + raise ValueError( + f"Window length ({window_length}) must be positive and less " + f"than half the sample size ({n}).", + ) + + if base is not None and base <= 0: + raise ValueError("`base` must be a positive number or `None`.") + + sorted_data = np.sort(values, axis=-1) + + methods = {"vasicek": _vasicek_entropy, + "van es": _van_es_entropy, + "correa": _correa_entropy, + "ebrahimi": _ebrahimi_entropy, + "auto": _vasicek_entropy} + method = method.lower() + if method not in methods: + message = f"`method` must be one of {set(methods)}" + raise ValueError(message) + + if method == "auto": + if n <= 10: + method = 'van es' + elif n <= 1000: + method = 'ebrahimi' + else: + method = 'vasicek' + + res = methods[method](sorted_data, window_length) + + if base is not None: + res /= np.log(base) + + return res + + +def _pad_along_last_axis(X, m): + """Pad the data for computing the rolling window difference.""" + # scales a bit better than method in _vasicek_like_entropy + shape = np.array(X.shape) + shape[-1] = m + Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape + Xr = np.broadcast_to(X[..., [-1]], shape) + return np.concatenate((Xl, X, Xr), axis=-1) + + +def _vasicek_entropy(X, m): + """Compute the Vasicek estimator as described in [6] Eq. 1.3.""" + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + differences = X[..., 2 * m:] - X[..., : -2 * m:] + logs = np.log(n/(2*m) * differences) + return np.mean(logs, axis=-1) + + +def _van_es_entropy(X, m): + """Compute the van Es estimator as described in [6].""" + # No equation number, but referred to as HVE_mn. + # Typo: there should be a log within the summation. + n = X.shape[-1] + difference = X[..., m:] - X[..., :-m] + term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1) + k = np.arange(m, n+1) + return term1 + np.sum(1/k) + np.log(m) - np.log(n+1) + + +def _ebrahimi_entropy(X, m): + """Compute the Ebrahimi estimator as described in [6].""" + # No equation number, but referred to as HE_mn + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + + differences = X[..., 2 * m:] - X[..., : -2 * m:] + + i = np.arange(1, n+1).astype(float) + ci = np.ones_like(i)*2 + ci[i <= m] = 1 + (i[i <= m] - 1)/m + ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m + + logs = np.log(n * differences / (ci * m)) + return np.mean(logs, axis=-1) + + +def _correa_entropy(X, m): + """Compute the Correa estimator as described in [6].""" + # No equation number, but referred to as HC_mn + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + + i = np.arange(1, n+1) + dj = np.arange(-m, m+1)[:, None] + j = i + dj + j0 = j + m - 1 # 0-indexed version of j + + Xibar = np.mean(X[..., j0], axis=-2, keepdims=True) + difference = X[..., j0] - Xibar + num = np.sum(difference*dj, axis=-2) # dj is d-i + den = n*np.sum(difference**2, axis=-2) + return -np.mean(np.log(num/den), axis=-1) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_generate_pyx.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_generate_pyx.py new file mode 100644 index 0000000000000000000000000000000000000000..a9647b53ca97018e20136b927d6bf71e3251bfd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_generate_pyx.py @@ -0,0 +1,27 @@ +import pathlib +import subprocess +import sys +import os +import argparse + + +def make_boost(outdir): + # Call code generator inside _boost directory + code_gen = pathlib.Path(__file__).parent / '_boost/include/code_gen.py' + subprocess.run([sys.executable, str(code_gen), '-o', outdir], + check=True) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-o", "--outdir", type=str, + help="Path to the output directory") + args = parser.parse_args() + + if not args.outdir: + raise ValueError("A path to the output directory is required") + else: + # Meson build + srcdir_abs = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) + outdir_abs = pathlib.Path(os.getcwd()) / args.outdir + make_boost(outdir_abs) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_ksstats.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_ksstats.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc60da7bba862a1b16f4b41c66e523f985ac415 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_ksstats.py @@ -0,0 +1,600 @@ +# Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where: +# D_n = sup_x{|F_n(x) - F(x)|}, +# F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n}, +# F(x) is the CDF of a probability distribution. +# +# Exact methods: +# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1] +# or a recursion algorithm due to Pomeranz[2]. +# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform +# the Durbin algorithm. +# D_n >= d <==> D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence +# Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d). +# For d > 0.5, the latter intersection probability is 0. +# +# Approximate methods: +# For d close to 0.5, ignoring that intersection term may still give a +# reasonable approximation. +# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending +# Kolmogorov's initial asymptotic, suitable for large d. (See +# scipy.special.kolmogorov for that asymptotic) +# Pelz-Good[6] used the functional equation for Jacobi theta functions to +# transform the Li-Chien/Korolyuk formula produce a computational formula +# suitable for small d. +# +# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of +# the above approaches and it is that which is used here. +# +# Other approaches: +# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d. +# Moscovich and Nadler[9] use FFTs to compute the convolutions. + +# References: +# [1] Durbin J (1968). +# "The Probability that the Sample Distribution Function Lies Between Two +# Parallel Straight Lines." +# Annals of Mathematical Statistics, 39, 398-411. +# [2] Pomeranz J (1974). +# "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for +# Small Samples (Algorithm 487)." +# Communications of the ACM, 17(12), 703-704. +# [3] Marsaglia G, Tsang WW, Wang J (2003). +# "Evaluating Kolmogorov's Distribution." +# Journal of Statistical Software, 8(18), 1-4. +# [4] LI-CHIEN, C. (1956). +# "On the exact distribution of the statistics of A. N. Kolmogorov and +# their asymptotic expansion." +# Acta Matematica Sinica, 6, 55-81. +# [5] KOROLYUK, V. S. (1960). +# "Asymptotic analysis of the distribution of the maximum deviation in +# the Bernoulli scheme." +# Theor. Probability Appl., 4, 339-366. +# [6] Pelz W, Good IJ (1976). +# "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample +# Statistic." +# Journal of the Royal Statistical Society, Series B, 38(2), 152-156. +# [7] Simard, R., L'Ecuyer, P. (2011) +# "Computing the Two-Sided Kolmogorov-Smirnov Distribution", +# Journal of Statistical Software, Vol 39, 11, 1-18. +# [8] Carvalho, Luis (2015) +# "An Improved Evaluation of Kolmogorov's Distribution" +# Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8. +# [9] Amit Moscovich, Boaz Nadler (2017) +# "Fast calculation of boundary crossing probabilities for Poisson +# processes", +# Statistics & Probability Letters, Vol 123, 177-182. + + +import numpy as np +import scipy.special +import scipy.special._ufuncs as scu +from scipy._lib._finite_differences import _derivative + +_E128 = 128 +_EP128 = np.ldexp(np.longdouble(1), _E128) +_EM128 = np.ldexp(np.longdouble(1), -_E128) + +_SQRT2PI = np.sqrt(2 * np.pi) +_LOG_2PI = np.log(2 * np.pi) +_MIN_LOG = -708 +_SQRT3 = np.sqrt(3) +_PI_SQUARED = np.pi ** 2 +_PI_FOUR = np.pi ** 4 +_PI_SIX = np.pi ** 6 + +# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers, +# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1. +_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3, + -1.9175269175269175269e-3, 8.4175084175084175084e-4, + -5.952380952380952381e-4, 7.9365079365079365079e-4, + -2.7777777777777777778e-3, 8.3333333333333333333e-2] + + +def _log_nfactorial_div_n_pow_n(n): + # Computes n! / n**n + # = (n-1)! / n**(n-1) + # Uses Stirling's approximation, but removes n*log(n) up-front to + # avoid subtractive cancellation. + # = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1) + rn = 1.0/n + return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n) + + +def _clip_prob(p): + """clips a probability to range 0<=p<=1.""" + return np.clip(p, 0.0, 1.0) + + +def _select_and_clip_prob(cdfprob, sfprob, cdf=True): + """Selects either the CDF or SF, and then clips to range 0<=p<=1.""" + p = np.where(cdf, cdfprob, sfprob) + return _clip_prob(p) + + +def _kolmogn_DMTW(n, d, cdf=True): + r"""Computes the Kolmogorov CDF: Pr(D_n <= d) using the MTW approach to + the Durbin matrix algorithm. + + Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3]. + """ + # Write d = (k-h)/n, where k is positive integer and 0 <= h < 1 + # Generate initial matrix H of size m*m where m=(2k-1) + # Compute k-th row of (n!/n^n) * H^n, scaling intermediate results. + # Requires memory O(m^2) and computation O(m^2 log(n)). + # Most suitable for small m. + + if d >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf) + nd = n * d + if nd <= 0.5: + return _select_and_clip_prob(0.0, 1.0, cdf) + k = int(np.ceil(nd)) + h = k - nd + m = 2 * k - 1 + + H = np.zeros([m, m]) + + # Initialize: v is first column (and last row) of H + # v[j] = (1-h^(j+1)/(j+1)! (except for v[-1]) + # w[j] = 1/(j)! + # q = k-th row of H (actually i!/n^i*H^i) + intm = np.arange(1, m + 1) + v = 1.0 - h ** intm + w = np.empty(m) + fac = 1.0 + for j in intm: + w[j - 1] = fac + fac /= j # This might underflow. Isn't a problem. + v[j - 1] *= fac + tt = max(2 * h - 1.0, 0)**m - 2*h**m + v[-1] = (1.0 + tt) * fac + + for i in range(1, m): + H[i - 1:, i] = w[:m - i + 1] + H[:, 0] = v + H[-1, :] = np.flip(v, axis=0) + + Hpwr = np.eye(np.shape(H)[0]) # Holds intermediate powers of H + nn = n + expnt = 0 # Scaling of Hpwr + Hexpnt = 0 # Scaling of H + while nn > 0: + if nn % 2: + Hpwr = np.matmul(Hpwr, H) + expnt += Hexpnt + H = np.matmul(H, H) + Hexpnt *= 2 + # Scale as needed. + if np.abs(H[k - 1, k - 1]) > _EP128: + H /= _EP128 + Hexpnt += _E128 + nn = nn // 2 + + p = Hpwr[k - 1, k - 1] + + # Multiply by n!/n^n + for i in range(1, n + 1): + p = i * p / n + if np.abs(p) < _EM128: + p *= _EP128 + expnt -= _E128 + + # unscale + if expnt != 0: + p = np.ldexp(p, expnt) + + return _select_and_clip_prob(p, 1.0-p, cdf) + + +def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf): + """Compute the endpoints of the interval for row i.""" + if i == 0: + j1, j2 = -ll - ceilf - 1, ll + ceilf - 1 + else: + # i + 1 = 2*ip1div2 + ip1mod2 + ip1div2, ip1mod2 = divmod(i + 1, 2) + if ip1mod2 == 0: # i is odd + if ip1div2 == n + 1: + j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1 + else: + j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1 + else: + j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1 + + return max(j1 + 2, 0), min(j2, n) + + +def _kolmogn_Pomeranz(n, x, cdf=True): + r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm. + + Pomeranz (1974) [2] + """ + + # V is n*(2n+2) matrix. + # Each row is convolution of the previous row and probabilities from a + # Poisson distribution. + # Desired CDF probability is n! V[n-1, 2n+1] (final entry in final row). + # Only two rows are needed at any given stage: + # - Call them V0 and V1. + # - Swap each iteration + # Only a few (contiguous) entries in each row can be non-zero. + # - Keep track of start and end (j1 and j2 below) + # - V0s and V1s track the start in the two rows + # Scale intermediate results as needed. + # Only a few different Poisson distributions can occur + t = n * x + ll = int(np.floor(t)) + f = 1.0 * (t - ll) # fractional part of t + g = min(f, 1.0 - f) + ceilf = (1 if f > 0 else 0) + roundf = (1 if f > 0.5 else 0) + npwrs = 2 * (ll + 1) # Maximum number of powers needed in convolutions + gpower = np.empty(npwrs) # gpower = (g/n)^m/m! + twogpower = np.empty(npwrs) # twogpower = (2g/n)^m/m! + onem2gpower = np.empty(npwrs) # onem2gpower = ((1-2g)/n)^m/m! + # gpower etc are *almost* Poisson probs, just missing normalizing factor. + + gpower[0] = 1.0 + twogpower[0] = 1.0 + onem2gpower[0] = 1.0 + expnt = 0 + g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n + for m in range(1, npwrs): + gpower[m] = gpower[m - 1] * g_over_n / m + twogpower[m] = twogpower[m - 1] * two_g_over_n / m + onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m + + V0 = np.zeros([npwrs]) + V1 = np.zeros([npwrs]) + V1[0] = 1 # first row + V0s, V1s = 0, 0 # start indices of the two rows + + j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf) + for i in range(1, 2 * n + 2): + # Preserve j1, V1, V1s, V0s from last iteration + k1 = j1 + V0, V1 = V1, V0 + V0s, V1s = V1s, V0s + V1.fill(0.0) + j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf) + if i == 1 or i == 2 * n + 1: + pwrs = gpower + else: + pwrs = (twogpower if i % 2 else onem2gpower) + ln2 = j2 - k1 + 1 + if ln2 > 0: + conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2]) + conv_start = j1 - k1 # First index to use from conv + conv_len = j2 - j1 + 1 # Number of entries to use from conv + V1[:conv_len] = conv[conv_start:conv_start + conv_len] + # Scale to avoid underflow. + if 0 < np.max(V1) < _EM128: + V1 *= _EP128 + expnt -= _E128 + V1s = V0s + j1 - k1 + + # multiply by n! + ans = V1[n - V1s] + for m in range(1, n + 1): + if np.abs(ans) > _EP128: + ans *= _EM128 + expnt += _E128 + ans *= m + + # Undo any intermediate scaling + if expnt != 0: + ans = np.ldexp(ans, expnt) + ans = _select_and_clip_prob(ans, 1.0 - ans, cdf) + return ans + + +def _kolmogn_PelzGood(n, x, cdf=True): + """Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1. + + Start with Li-Chien, Korolyuk approximation: + Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5 + where z = x*sqrt(n). + Transform each K_(z) using Jacobi theta functions into a form suitable + for small z. + Pelz-Good (1976). [6] + """ + if x <= 0.0: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + if x >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf=cdf) + + z = np.sqrt(n) * x + zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6 + + qlog = -_PI_SQUARED / 8 / zsquared + if qlog < _MIN_LOG: # z ~ 0.041743441416853426 + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + + q = np.exp(qlog) + + # Coefficients of terms in the sums for K1, K2 and K3 + k1a = -zsquared + k1b = _PI_SQUARED / 4 + + k2a = 6 * zsix + 2 * zfour + k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4 + k2c = _PI_FOUR * (1 - 2 * zsquared) / 16 + + k3d = _PI_SIX * (5 - 30 * zsquared) / 64 + k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16 + k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4 + k3a = -30 * zsix - 90 * z**8 + + K0to3 = np.zeros(4) + # Use a Horner scheme to evaluate sum c_i q^(i^2) + # Reduces to a sum over odd integers. + maxk = int(np.ceil(16 * z / np.pi)) + for k in range(maxk, 0, -1): + m = 2 * k - 1 + msquared, mfour, msix = m**2, m**4, m**6 + qpower = np.power(q, 8 * k) + coeffs = np.array([1.0, + k1a + k1b*msquared, + k2a + k2b*msquared + k2c*mfour, + k3a + k3b*msquared + k3c*mfour + k3d*msix]) + K0to3 *= qpower + K0to3 += coeffs + K0to3 *= q + K0to3 *= _SQRT2PI + # z**10 > 0 as z > 0.04 + K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10]) + + # Now do the other sum over the other terms, all integers k + # K_2: (pi^2 k^2) q^(k^2), + # K_3: (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2) + # Don't expect much subtractive cancellation so use direct calculation + q = np.exp(-_PI_SQUARED / 2 / zsquared) + ks = np.arange(maxk, 0, -1) + ksquared = ks ** 2 + sqrt3z = _SQRT3 * z + kspi = np.pi * ks + qpwers = q ** ksquared + k2extra = np.sum(ksquared * qpwers) + k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree) + K0to3[2] += k2extra + k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers) + k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix) + K0to3[3] += k3extra + powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0) + K0to3 /= powers_of_n + + if not cdf: + K0to3 *= -1 + K0to3[0] += 1 + + Ksum = sum(K0to3) + return Ksum + + +def _kolmogn(n, x, cdf=True): + """Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic. + + x must be of type float, n of type integer. + + Simard & L'Ecuyer (2011) [7]. + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if x >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf=cdf) + if x <= 0.0: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + t = n * x + if t <= 1.0: # Ruben-Gambino: 1/2n <= x <= 1/n + if t <= 0.5: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + if n <= 140: + prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1)) + else: + prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1)) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + if t >= n - 1: # Ruben-Gambino + prob = 2 * (1.0 - x)**n + return _select_and_clip_prob(1 - prob, prob, cdf=cdf) + if x >= 0.5: # Exact: 2 * smirnov + prob = 2 * scipy.special.smirnov(n, x) + return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf) + + nxsquared = t * x + if n <= 140: + if nxsquared <= 0.754693: + prob = _kolmogn_DMTW(n, x, cdf=True) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + if nxsquared <= 4: + prob = _kolmogn_Pomeranz(n, x, cdf=True) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + # Now use Miller approximation of 2*smirnov + prob = 2 * scipy.special.smirnov(n, x) + return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf) + + # Split CDF and SF as they have different cutoffs on nxsquared. + if not cdf: + if nxsquared >= 370.0: + return 0.0 + if nxsquared >= 2.2: + prob = 2 * scipy.special.smirnov(n, x) + return _clip_prob(prob) + # Fall through and compute the SF as 1.0-CDF + if nxsquared >= 18.0: + cdfprob = 1.0 + elif n <= 100000 and n * x**1.5 <= 1.4: + cdfprob = _kolmogn_DMTW(n, x, cdf=True) + else: + cdfprob = _kolmogn_PelzGood(n, x, cdf=True) + return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf) + + +def _kolmogn_p(n, x): + """Computes the PDF for the two-sided Kolmogorov-Smirnov statistic. + + x must be of type float, n of type integer. + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if x >= 1.0 or x <= 0: + return 0 + t = n * x + if t <= 1.0: + # Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1) + if t <= 0.5: + return 0.0 + if n <= 140: + prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1)) + else: + prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1)) + return prd * 2 * n**2 + if t >= n - 1: + # Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1) + return 2 * (1.0 - x) ** (n-1) * n + if x >= 0.5: + return 2 * scipy.stats.ksone.pdf(x, n) + + # Just take a small delta. + # Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a. + # as the CDF is a piecewise degree n polynomial. + # It has knots at 1/n, 2/n, ... (n-1)/n + # and is not a C-infinity function at the knots + delta = x / 2.0**16 + delta = min(delta, x - 1.0/n) + delta = min(delta, 0.5 - x) + + def _kk(_x): + return kolmogn(n, _x) + + return _derivative(_kk, x, dx=delta, order=5) + + +def _kolmogni(n, p, q): + """Computes the PPF/ISF of kolmogn. + + n of type integer, n>= 1 + p is the CDF, q the SF, p+q=1 + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if p <= 0: + return 1.0/n + if q <= 0: + return 1.0 + delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n) + if delta <= 1.0/n: + return (delta + 1.0 / n) / 2 + x = -np.expm1(np.log(q/2.0)/n) + if x >= 1 - 1.0/n: + return x + x1 = scu._kolmogci(p)/np.sqrt(n) + x1 = min(x1, 1.0 - 1.0/n) + + def _f(x): + return _kolmogn(n, x) - p + + return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14) + + +def kolmogn(n, x, cdf=True): + """Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. + + The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), + for a sample of size n drawn from a distribution with CDF F(t), where + :math:`D_n &= sup_t |F_n(t) - F(t)|`, and + :math:`F_n(t)` is the Empirical Cumulative Distribution Function of the sample. + + Parameters + ---------- + n : integer, array_like + the number of samples + x : float, array_like + The K-S statistic, float between 0 and 1 + cdf : bool, optional + whether to compute the CDF(default=true) or the SF. + + Returns + ------- + cdf : ndarray + CDF (or SF it cdf is False) at the specified locations. + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, x, cdf, None], + op_dtypes=[None, np.float64, np.bool_, np.float64]) + for _n, _x, _cdf, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + z[...] = _kolmogn(int(_n), _x, cdf=_cdf) + result = it.operands[-1] + return result + + +def kolmognp(n, x): + """Computes the PDF for the two-sided Kolmogorov-Smirnov distribution. + + Parameters + ---------- + n : integer, array_like + the number of samples + x : float, array_like + The K-S statistic, float between 0 and 1 + + Returns + ------- + pdf : ndarray + The PDF at the specified locations + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, x, None]) + for _n, _x, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + z[...] = _kolmogn_p(int(_n), _x) + result = it.operands[-1] + return result + + +def kolmogni(n, q, cdf=True): + """Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution. + + Parameters + ---------- + n : integer, array_like + the number of samples + q : float, array_like + Probabilities, float between 0 and 1 + cdf : bool, optional + whether to compute the PPF(default=true) or the ISF. + + Returns + ------- + ppf : ndarray + PPF (or ISF if cdf is False) at the specified locations + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, q, cdf, None]) + for _n, _q, _cdf, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + _pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q) + z[...] = _kolmogni(int(_n), _pcdf, _psf) + result = it.operands[-1] + return result diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_multicomp.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_multicomp.py new file mode 100644 index 0000000000000000000000000000000000000000..c12ce65a91dbb0a6fed48e06127f8902ca71b9bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_multicomp.py @@ -0,0 +1,459 @@ +from __future__ import annotations + +import warnings +from dataclasses import dataclass, field +from typing import TYPE_CHECKING + +import numpy as np + +from scipy import stats +from scipy.optimize import minimize_scalar +from scipy.stats._common import ConfidenceInterval +from scipy.stats._qmc import check_random_state +from scipy.stats._stats_py import _var + +if TYPE_CHECKING: + import numpy.typing as npt + from scipy._lib._util import DecimalNumber, SeedType + from typing import Literal, Sequence # noqa: UP035 + + +__all__ = [ + 'dunnett' +] + + +@dataclass +class DunnettResult: + """Result object returned by `scipy.stats.dunnett`. + + Attributes + ---------- + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``i`` is the statistic for the comparison between + groups ``i`` and the control. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``i`` is the p-value for the comparison between + group ``i`` and the control. + """ + statistic: np.ndarray + pvalue: np.ndarray + _alternative: Literal['two-sided', 'less', 'greater'] = field(repr=False) + _rho: np.ndarray = field(repr=False) + _df: int = field(repr=False) + _std: float = field(repr=False) + _mean_samples: np.ndarray = field(repr=False) + _mean_control: np.ndarray = field(repr=False) + _n_samples: np.ndarray = field(repr=False) + _n_control: int = field(repr=False) + _rng: SeedType = field(repr=False) + _ci: ConfidenceInterval | None = field(default=None, repr=False) + _ci_cl: DecimalNumber | None = field(default=None, repr=False) + + def __str__(self): + # Note: `__str__` prints the confidence intervals from the most + # recent call to `confidence_interval`. If it has not been called, + # it will be called with the default CL of .95. + if self._ci is None: + self.confidence_interval(confidence_level=.95) + s = ( + "Dunnett's test" + f" ({self._ci_cl*100:.1f}% Confidence Interval)\n" + "Comparison Statistic p-value Lower CI Upper CI\n" + ) + for i in range(self.pvalue.size): + s += (f" (Sample {i} - Control) {self.statistic[i]:>10.3f}" + f"{self.pvalue[i]:>10.3f}" + f"{self._ci.low[i]:>10.3f}" + f"{self._ci.high[i]:>10.3f}\n") + + return s + + def _allowance( + self, confidence_level: DecimalNumber = 0.95, tol: DecimalNumber = 1e-3 + ) -> float: + """Allowance. + + It is the quantity to add/subtract from the observed difference + between the means of observed groups and the mean of the control + group. The result gives confidence limits. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval. + Default is .95. + tol : float, optional + A tolerance for numerical optimization: the allowance will produce + a confidence within ``10*tol*(1 - confidence_level)`` of the + specified level, or a warning will be emitted. Tight tolerances + may be impractical due to noisy evaluation of the objective. + Default is 1e-3. + + Returns + ------- + allowance : float + Allowance around the mean. + """ + alpha = 1 - confidence_level + + def pvalue_from_stat(statistic): + statistic = np.array(statistic) + sf = _pvalue_dunnett( + rho=self._rho, df=self._df, + statistic=statistic, alternative=self._alternative, + rng=self._rng + ) + return abs(sf - alpha)/alpha + + # Evaluation of `pvalue_from_stat` is noisy due to the use of RQMC to + # evaluate `multivariate_t.cdf`. `minimize_scalar` is not designed + # to tolerate a noisy objective function and may fail to find the + # minimum accurately. We mitigate this possibility with the validation + # step below, but implementation of a noise-tolerant root finder or + # minimizer would be a welcome enhancement. See gh-18150. + res = minimize_scalar(pvalue_from_stat, method='brent', tol=tol) + critical_value = res.x + + # validation + # tol*10 because tol=1e-3 means we tolerate a 1% change at most + if res.success is False or res.fun >= tol*10: + warnings.warn( + "Computation of the confidence interval did not converge to " + "the desired level. The confidence level corresponding with " + f"the returned interval is approximately {alpha*(1+res.fun)}.", + stacklevel=3 + ) + + # From [1] p. 1101 between (1) and (3) + allowance = critical_value*self._std*np.sqrt( + 1/self._n_samples + 1/self._n_control + ) + return abs(allowance) + + def confidence_interval( + self, confidence_level: DecimalNumber = 0.95 + ) -> ConfidenceInterval: + """Compute the confidence interval for the specified confidence level. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval. + Default is .95. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence intervals for each + comparison. The high and low values are accessible for each + comparison at index ``i`` for each group ``i``. + + """ + # check to see if the supplied confidence level matches that of the + # previously computed CI. + if (self._ci is not None) and (confidence_level == self._ci_cl): + return self._ci + + if not (0 < confidence_level < 1): + raise ValueError("Confidence level must be between 0 and 1.") + + allowance = self._allowance(confidence_level=confidence_level) + diff_means = self._mean_samples - self._mean_control + + low = diff_means-allowance + high = diff_means+allowance + + if self._alternative == 'greater': + high = [np.inf] * len(diff_means) + elif self._alternative == 'less': + low = [-np.inf] * len(diff_means) + + self._ci_cl = confidence_level + self._ci = ConfidenceInterval( + low=low, + high=high + ) + return self._ci + + +def dunnett( + *samples: npt.ArrayLike, # noqa: D417 + control: npt.ArrayLike, + alternative: Literal['two-sided', 'less', 'greater'] = "two-sided", + random_state: SeedType = None +) -> DunnettResult: + """Dunnett's test: multiple comparisons of means against a control group. + + This is an implementation of Dunnett's original, single-step test as + described in [1]_. + + Parameters + ---------- + sample1, sample2, ... : 1D array_like + The sample measurements for each experimental group. + control : 1D array_like + The sample measurements for the control group. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + + The null hypothesis is that the means of the distributions underlying + the samples and control are equal. The following alternative + hypotheses are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + and control are unequal. + * 'less': the means of the distributions underlying the samples + are less than the mean of the distribution underlying the control. + * 'greater': the means of the distributions underlying the + samples are greater than the mean of the distribution underlying + the control. + random_state : {None, int, `numpy.random.Generator`}, optional + If `random_state` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(random_state)``. + If `random_state` is already a ``Generator`` instance, then the + provided instance is used. + + The random number generator is used to control the randomized + Quasi-Monte Carlo integration of the multivariate-t distribution. + + Returns + ------- + res : `~scipy.stats._result_classes.DunnettResult` + An object containing attributes: + + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``i`` is the statistic for the comparison between + groups ``i`` and the control. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``i`` is the p-value for the comparison between + group ``i`` and the control. + + And the following method: + + confidence_interval(confidence_level=0.95) : + Compute the difference in means of the groups + with the control +- the allowance. + + See Also + -------- + tukey_hsd : performs pairwise comparison of means. + + Notes + ----- + Like the independent-sample t-test, Dunnett's test [1]_ is used to make + inferences about the means of distributions from which samples were drawn. + However, when multiple t-tests are performed at a fixed significance level, + the "family-wise error rate" - the probability of incorrectly rejecting the + null hypothesis in at least one test - will exceed the significance level. + Dunnett's test is designed to perform multiple comparisons while + controlling the family-wise error rate. + + Dunnett's test compares the means of multiple experimental groups + against a single control group. Tukey's Honestly Significant Difference Test + is another multiple-comparison test that controls the family-wise error + rate, but `tukey_hsd` performs *all* pairwise comparisons between groups. + When pairwise comparisons between experimental groups are not needed, + Dunnett's test is preferable due to its higher power. + + + The use of this test relies on several assumptions. + + 1. The observations are independent within and among groups. + 2. The observations within each group are normally distributed. + 3. The distributions from which the samples are drawn have the same finite + variance. + + References + ---------- + .. [1] Charles W. Dunnett. "A Multiple Comparison Procedure for Comparing + Several Treatments with a Control." + Journal of the American Statistical Association, 50:272, 1096-1121, + :doi:`10.1080/01621459.1955.10501294`, 1955. + + Examples + -------- + In [1]_, the influence of drugs on blood count measurements on three groups + of animal is investigated. + + The following table summarizes the results of the experiment in which + two groups received different drugs, and one group acted as a control. + Blood counts (in millions of cells per cubic millimeter) were recorded:: + + >>> import numpy as np + >>> control = np.array([7.40, 8.50, 7.20, 8.24, 9.84, 8.32]) + >>> drug_a = np.array([9.76, 8.80, 7.68, 9.36]) + >>> drug_b = np.array([12.80, 9.68, 12.16, 9.20, 10.55]) + + We would like to see if the means between any of the groups are + significantly different. First, visually examine a box and whisker plot. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.boxplot([control, drug_a, drug_b]) + >>> ax.set_xticklabels(["Control", "Drug A", "Drug B"]) # doctest: +SKIP + >>> ax.set_ylabel("mean") # doctest: +SKIP + >>> plt.show() + + Note the overlapping interquartile ranges of the drug A group and control + group and the apparent separation between the drug B group and control + group. + + Next, we will use Dunnett's test to assess whether the difference + between group means is significant while controlling the family-wise error + rate: the probability of making any false discoveries. + Let the null hypothesis be that the experimental groups have the same + mean as the control and the alternative be that an experimental group does + not have the same mean as the control. We will consider a 5% family-wise + error rate to be acceptable, and therefore we choose 0.05 as the threshold + for significance. + + >>> from scipy.stats import dunnett + >>> res = dunnett(drug_a, drug_b, control=control) + >>> res.pvalue + array([0.62004941, 0.0059035 ]) # may vary + + The p-value corresponding with the comparison between group A and control + exceeds 0.05, so we do not reject the null hypothesis for that comparison. + However, the p-value corresponding with the comparison between group B + and control is less than 0.05, so we consider the experimental results + to be evidence against the null hypothesis in favor of the alternative: + group B has a different mean than the control group. + + """ + samples_, control_, rng = _iv_dunnett( + samples=samples, control=control, + alternative=alternative, random_state=random_state + ) + + rho, df, n_group, n_samples, n_control = _params_dunnett( + samples=samples_, control=control_ + ) + + statistic, std, mean_control, mean_samples = _statistic_dunnett( + samples_, control_, df, n_samples, n_control + ) + + pvalue = _pvalue_dunnett( + rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng + ) + + return DunnettResult( + statistic=statistic, pvalue=pvalue, + _alternative=alternative, + _rho=rho, _df=df, _std=std, + _mean_samples=mean_samples, + _mean_control=mean_control, + _n_samples=n_samples, + _n_control=n_control, + _rng=rng + ) + + +def _iv_dunnett( + samples: Sequence[npt.ArrayLike], + control: npt.ArrayLike, + alternative: Literal['two-sided', 'less', 'greater'], + random_state: SeedType +) -> tuple[list[np.ndarray], np.ndarray, SeedType]: + """Input validation for Dunnett's test.""" + rng = check_random_state(random_state) + + if alternative not in {'two-sided', 'less', 'greater'}: + raise ValueError( + "alternative must be 'less', 'greater' or 'two-sided'" + ) + + ndim_msg = "Control and samples groups must be 1D arrays" + n_obs_msg = "Control and samples groups must have at least 1 observation" + + control = np.asarray(control) + samples_ = [np.asarray(sample) for sample in samples] + + # samples checks + samples_control: list[np.ndarray] = samples_ + [control] + for sample in samples_control: + if sample.ndim > 1: + raise ValueError(ndim_msg) + + if sample.size < 1: + raise ValueError(n_obs_msg) + + return samples_, control, rng + + +def _params_dunnett( + samples: list[np.ndarray], control: np.ndarray +) -> tuple[np.ndarray, int, int, np.ndarray, int]: + """Specific parameters for Dunnett's test. + + Degree of freedom is the number of observations minus the number of groups + including the control. + """ + n_samples = np.array([sample.size for sample in samples]) + + # From [1] p. 1100 d.f. = (sum N)-(p+1) + n_sample = n_samples.sum() + n_control = control.size + n = n_sample + n_control + n_groups = len(samples) + df = n - n_groups - 1 + + # From [1] p. 1103 rho_ij = 1/sqrt((N0/Ni+1)(N0/Nj+1)) + rho = n_control/n_samples + 1 + rho = 1/np.sqrt(rho[:, None] * rho[None, :]) + np.fill_diagonal(rho, 1) + + return rho, df, n_groups, n_samples, n_control + + +def _statistic_dunnett( + samples: list[np.ndarray], control: np.ndarray, df: int, + n_samples: np.ndarray, n_control: int +) -> tuple[np.ndarray, float, np.ndarray, np.ndarray]: + """Statistic of Dunnett's test. + + Computation based on the original single-step test from [1]. + """ + mean_control = np.mean(control) + mean_samples = np.array([np.mean(sample) for sample in samples]) + all_samples = [control] + samples + all_means = np.concatenate([[mean_control], mean_samples]) + + # Variance estimate s^2 from [1] Eq. 1 + s2 = np.sum([_var(sample, mean=mean)*sample.size + for sample, mean in zip(all_samples, all_means)]) / df + std = np.sqrt(s2) + + # z score inferred from [1] unlabeled equation after Eq. 1 + z = (mean_samples - mean_control) / np.sqrt(1/n_samples + 1/n_control) + + return z / std, std, mean_control, mean_samples + + +def _pvalue_dunnett( + rho: np.ndarray, df: int, statistic: np.ndarray, + alternative: Literal['two-sided', 'less', 'greater'], + rng: SeedType = None +) -> np.ndarray: + """pvalue from the multivariate t-distribution. + + Critical values come from the multivariate student-t distribution. + """ + statistic = statistic.reshape(-1, 1) + + mvt = stats.multivariate_t(shape=rho, df=df, seed=rng) + if alternative == "two-sided": + statistic = abs(statistic) + pvalue = 1 - mvt.cdf(statistic, lower_limit=-statistic) + elif alternative == "greater": + pvalue = 1 - mvt.cdf(statistic, lower_limit=-np.inf) + else: + pvalue = 1 - mvt.cdf(np.inf, lower_limit=statistic) + + return np.atleast_1d(pvalue) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_multivariate.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_multivariate.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb59ae8c68ae0f0018f8cccfd79c7fadee742ec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_multivariate.py @@ -0,0 +1,6981 @@ +# +# Author: Joris Vankerschaver 2013 +# +import math +import numpy as np +import scipy.linalg +from scipy._lib import doccer +from scipy.special import (gammaln, psi, multigammaln, xlogy, entr, betaln, + ive, loggamma) +from scipy._lib._util import check_random_state, _lazywhere +from scipy.linalg.blas import drot, get_blas_funcs +from ._continuous_distns import norm +from ._discrete_distns import binom +from . import _mvn, _covariance, _rcont +from ._qmvnt import _qmvt +from ._morestats import directional_stats +from scipy.optimize import root_scalar + +__all__ = ['multivariate_normal', + 'matrix_normal', + 'dirichlet', + 'dirichlet_multinomial', + 'wishart', + 'invwishart', + 'multinomial', + 'special_ortho_group', + 'ortho_group', + 'random_correlation', + 'unitary_group', + 'multivariate_t', + 'multivariate_hypergeom', + 'random_table', + 'uniform_direction', + 'vonmises_fisher'] + +_LOG_2PI = np.log(2 * np.pi) +_LOG_2 = np.log(2) +_LOG_PI = np.log(np.pi) + + +_doc_random_state = """\ +seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. +""" + + +def _squeeze_output(out): + """ + Remove single-dimensional entries from array and convert to scalar, + if necessary. + """ + out = out.squeeze() + if out.ndim == 0: + out = out[()] + return out + + +def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): + """Determine which eigenvalues are "small" given the spectrum. + + This is for compatibility across various linear algebra functions + that should agree about whether or not a Hermitian matrix is numerically + singular and what is its numerical matrix rank. + This is designed to be compatible with scipy.linalg.pinvh. + + Parameters + ---------- + spectrum : 1d ndarray + Array of eigenvalues of a Hermitian matrix. + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + + Returns + ------- + eps : float + Magnitude cutoff for numerical negligibility. + + """ + if rcond is not None: + cond = rcond + if cond in [None, -1]: + t = spectrum.dtype.char.lower() + factor = {'f': 1E3, 'd': 1E6} + cond = factor[t] * np.finfo(t).eps + eps = cond * np.max(abs(spectrum)) + return eps + + +def _pinv_1d(v, eps=1e-5): + """A helper function for computing the pseudoinverse. + + Parameters + ---------- + v : iterable of numbers + This may be thought of as a vector of eigenvalues or singular values. + eps : float + Values with magnitude no greater than eps are considered negligible. + + Returns + ------- + v_pinv : 1d float ndarray + A vector of pseudo-inverted numbers. + + """ + return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) + + +class _PSD: + """ + Compute coordinated functions of a symmetric positive semidefinite matrix. + + This class addresses two issues. Firstly it allows the pseudoinverse, + the logarithm of the pseudo-determinant, and the rank of the matrix + to be computed using one call to eigh instead of three. + Secondly it allows these functions to be computed in a way + that gives mutually compatible results. + All of the functions are computed with a common understanding as to + which of the eigenvalues are to be considered negligibly small. + The functions are designed to coordinate with scipy.linalg.pinvh() + but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). + + Parameters + ---------- + M : array_like + Symmetric positive semidefinite matrix (2-D). + cond, rcond : float, optional + Cutoff for small eigenvalues. + Singular values smaller than rcond * largest_eigenvalue are + considered zero. + If None or -1, suitable machine precision is used. + lower : bool, optional + Whether the pertinent array data is taken from the lower + or upper triangle of M. (Default: lower) + check_finite : bool, optional + Whether to check that the input matrices contain only finite + numbers. Disabling may give a performance gain, but may result + in problems (crashes, non-termination) if the inputs do contain + infinities or NaNs. + allow_singular : bool, optional + Whether to allow a singular matrix. (Default: True) + + Notes + ----- + The arguments are similar to those of scipy.linalg.pinvh(). + + """ + + def __init__(self, M, cond=None, rcond=None, lower=True, + check_finite=True, allow_singular=True): + self._M = np.asarray(M) + + # Compute the symmetric eigendecomposition. + # Note that eigh takes care of array conversion, chkfinite, + # and assertion that the matrix is square. + s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) + + eps = _eigvalsh_to_eps(s, cond, rcond) + if np.min(s) < -eps: + msg = "The input matrix must be symmetric positive semidefinite." + raise ValueError(msg) + d = s[s > eps] + if len(d) < len(s) and not allow_singular: + msg = ("When `allow_singular is False`, the input matrix must be " + "symmetric positive definite.") + raise np.linalg.LinAlgError(msg) + s_pinv = _pinv_1d(s, eps) + U = np.multiply(u, np.sqrt(s_pinv)) + + # Save the eigenvector basis, and tolerance for testing support + self.eps = 1e3*eps + self.V = u[:, s <= eps] + + # Initialize the eagerly precomputed attributes. + self.rank = len(d) + self.U = U + self.log_pdet = np.sum(np.log(d)) + + # Initialize attributes to be lazily computed. + self._pinv = None + + def _support_mask(self, x): + """ + Check whether x lies in the support of the distribution. + """ + residual = np.linalg.norm(x @ self.V, axis=-1) + in_support = residual < self.eps + return in_support + + @property + def pinv(self): + if self._pinv is None: + self._pinv = np.dot(self.U, self.U.T) + return self._pinv + + +class multi_rv_generic: + """ + Class which encapsulates common functionality between all multivariate + distributions. + """ + def __init__(self, seed=None): + super().__init__() + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """ Get or set the Generator object for generating random variates. + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def _get_random_state(self, random_state): + if random_state is not None: + return check_random_state(random_state) + else: + return self._random_state + + +class multi_rv_frozen: + """ + Class which encapsulates common functionality between all frozen + multivariate distributions. + """ + @property + def random_state(self): + return self._dist._random_state + + @random_state.setter + def random_state(self, seed): + self._dist._random_state = check_random_state(seed) + + +_mvn_doc_default_callparams = """\ +mean : array_like, default: ``[0]`` + Mean of the distribution. +cov : array_like or `Covariance`, default: ``[1]`` + Symmetric positive (semi)definite covariance matrix of the distribution. +allow_singular : bool, default: ``False`` + Whether to allow a singular covariance matrix. This is ignored if `cov` is + a `Covariance` object. +""" + +_mvn_doc_callparams_note = """\ +Setting the parameter `mean` to `None` is equivalent to having `mean` +be the zero-vector. The parameter `cov` can be a scalar, in which case +the covariance matrix is the identity times that value, a vector of +diagonal entries for the covariance matrix, a two-dimensional array_like, +or a `Covariance` object. +""" + +_mvn_doc_frozen_callparams = "" + +_mvn_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mvn_docdict_params = { + '_mvn_doc_default_callparams': _mvn_doc_default_callparams, + '_mvn_doc_callparams_note': _mvn_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvn_docdict_noparams = { + '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams, + '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_normal_gen(multi_rv_generic): + r"""A multivariate normal random variable. + + The `mean` keyword specifies the mean. The `cov` keyword specifies the + covariance matrix. + + Methods + ------- + pdf(x, mean=None, cov=1, allow_singular=False) + Probability density function. + logpdf(x, mean=None, cov=1, allow_singular=False) + Log of the probability density function. + cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5, lower_limit=None) + Cumulative distribution function. + logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5) + Log of the cumulative distribution function. + rvs(mean=None, cov=1, size=1, random_state=None) + Draw random samples from a multivariate normal distribution. + entropy(mean=None, cov=1) + Compute the differential entropy of the multivariate normal. + fit(x, fix_mean=None, fix_cov=None) + Fit a multivariate normal distribution to data. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_mvn_doc_callparams_note)s + + The covariance matrix `cov` may be an instance of a subclass of + `Covariance`, e.g. `scipy.stats.CovViaPrecision`. If so, `allow_singular` + is ignored. + + Otherwise, `cov` must be a symmetric positive semidefinite + matrix when `allow_singular` is True; it must be (strictly) positive + definite when `allow_singular` is False. + Symmetry is not checked; only the lower triangular portion is used. + The determinant and inverse of `cov` are computed + as the pseudo-determinant and pseudo-inverse, respectively, so + that `cov` does not need to have full rank. + + The probability density function for `multivariate_normal` is + + .. math:: + + f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} + \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), + + where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, + :math:`k` the rank of :math:`\Sigma`. In case of singular :math:`\Sigma`, + SciPy extends this definition according to [1]_. + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] Multivariate Normal Distribution - Degenerate Case, Wikipedia, + https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Degenerate_case + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import multivariate_normal + + >>> x = np.linspace(0, 5, 10, endpoint=False) + >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y + array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, + 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) + >>> fig1 = plt.figure() + >>> ax = fig1.add_subplot(111) + >>> ax.plot(x, y) + >>> plt.show() + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" multivariate normal + random variable: + + >>> rv = multivariate_normal(mean=None, cov=1, allow_singular=False) + >>> # Frozen object with the same methods but holding the given + >>> # mean and covariance fixed. + + The input quantiles can be any shape of array, as long as the last + axis labels the components. This allows us for instance to + display the frozen pdf for a non-isotropic random variable in 2D as + follows: + + >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] + >>> pos = np.dstack((x, y)) + >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) + >>> fig2 = plt.figure() + >>> ax2 = fig2.add_subplot(111) + >>> ax2.contourf(x, y, rv.pdf(pos)) + + """ # noqa: E501 + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params) + + def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): + """Create a frozen multivariate normal distribution. + + See `multivariate_normal_frozen` for more information. + """ + return multivariate_normal_frozen(mean, cov, + allow_singular=allow_singular, + seed=seed) + + def _process_parameters(self, mean, cov, allow_singular=True): + """ + Infer dimensionality from mean or covariance matrix, ensure that + mean and covariance are full vector resp. matrix. + """ + if isinstance(cov, _covariance.Covariance): + return self._process_parameters_Covariance(mean, cov) + else: + # Before `Covariance` classes were introduced, + # `multivariate_normal` accepted plain arrays as `cov` and used the + # following input validation. To avoid disturbing the behavior of + # `multivariate_normal` when plain arrays are used, we use the + # original input validation here. + dim, mean, cov = self._process_parameters_psd(None, mean, cov) + # After input validation, some methods then processed the arrays + # with a `_PSD` object and used that to perform computation. + # To avoid branching statements in each method depending on whether + # `cov` is an array or `Covariance` object, we always process the + # array with `_PSD`, and then use wrapper that satisfies the + # `Covariance` interface, `CovViaPSD`. + psd = _PSD(cov, allow_singular=allow_singular) + cov_object = _covariance.CovViaPSD(psd) + return dim, mean, cov_object + + def _process_parameters_Covariance(self, mean, cov): + dim = cov.shape[-1] + mean = np.array([0.]) if mean is None else mean + message = (f"`cov` represents a covariance matrix in {dim} dimensions," + f"and so `mean` must be broadcastable to shape {(dim,)}") + try: + mean = np.broadcast_to(mean, dim) + except ValueError as e: + raise ValueError(message) from e + return dim, mean, cov + + def _process_parameters_psd(self, dim, mean, cov): + # Try to infer dimensionality + if dim is None: + if mean is None: + if cov is None: + dim = 1 + else: + cov = np.asarray(cov, dtype=float) + if cov.ndim < 2: + dim = 1 + else: + dim = cov.shape[0] + else: + mean = np.asarray(mean, dtype=float) + dim = mean.size + else: + if not np.isscalar(dim): + raise ValueError("Dimension of random variable must be " + "a scalar.") + + # Check input sizes and return full arrays for mean and cov if + # necessary + if mean is None: + mean = np.zeros(dim) + mean = np.asarray(mean, dtype=float) + + if cov is None: + cov = 1.0 + cov = np.asarray(cov, dtype=float) + + if dim == 1: + mean = mean.reshape(1) + cov = cov.reshape(1, 1) + + if mean.ndim != 1 or mean.shape[0] != dim: + raise ValueError("Array 'mean' must be a vector of length %d." % + dim) + if cov.ndim == 0: + cov = cov * np.eye(dim) + elif cov.ndim == 1: + cov = np.diag(cov) + elif cov.ndim == 2 and cov.shape != (dim, dim): + rows, cols = cov.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + " but cov.shape = %s." % str(cov.shape)) + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'mean' is a vector of length %d.") + msg = msg % (str(cov.shape), len(mean)) + raise ValueError(msg) + elif cov.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % cov.ndim) + + return dim, mean, cov + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + + return x + + def _logpdf(self, x, mean, cov_object): + """Log of the multivariate normal probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + cov_object : Covariance + An object representing the Covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + log_det_cov, rank = cov_object.log_pdet, cov_object.rank + dev = x - mean + if dev.ndim > 1: + log_det_cov = log_det_cov[..., np.newaxis] + rank = rank[..., np.newaxis] + maha = np.sum(np.square(cov_object.whiten(dev)), axis=-1) + return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) + + def logpdf(self, x, mean=None, cov=1, allow_singular=False): + """Log of the multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + x = self._process_quantiles(x, dim) + out = self._logpdf(x, mean, cov_object) + if np.any(cov_object.rank < dim): + out_of_bounds = ~cov_object._support_mask(x-mean) + out[out_of_bounds] = -np.inf + return _squeeze_output(out) + + def pdf(self, x, mean=None, cov=1, allow_singular=False): + """Multivariate normal probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + x = self._process_quantiles(x, dim) + out = np.exp(self._logpdf(x, mean, cov_object)) + if np.any(cov_object.rank < dim): + out_of_bounds = ~cov_object._support_mask(x-mean) + out[out_of_bounds] = 0.0 + return _squeeze_output(out) + + def _cdf(self, x, mean, cov, maxpts, abseps, releps, lower_limit): + """Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the cumulative distribution function. + mean : ndarray + Mean of the distribution + cov : array_like + Covariance matrix of the distribution + maxpts : integer + The maximum number of points to use for integration + abseps : float + Absolute error tolerance + releps : float + Relative error tolerance + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'cdf' instead. + + + .. versionadded:: 1.0.0 + + """ + lower = (np.full(mean.shape, -np.inf) + if lower_limit is None else lower_limit) + # In 2d, _mvn.mvnun accepts input in which `lower` bound elements + # are greater than `x`. Not so in other dimensions. Fix this by + # ensuring that lower bounds are indeed lower when passed, then + # set signs of resulting CDF manually. + b, a = np.broadcast_arrays(x, lower) + i_swap = b < a + signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a, b = a.copy(), b.copy() + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + n = x.shape[-1] + limits = np.concatenate((a, b), axis=-1) + + # mvnun expects 1-d arguments, so process points sequentially + def func1d(limits): + return _mvn.mvnun(limits[:n], limits[n:], mean, cov, + maxpts, abseps, releps)[0] + + out = np.apply_along_axis(func1d, -1, limits) * signs + return _squeeze_output(out) + + def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5, *, lower_limit=None): + """Log of the multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts : integer, optional + The maximum number of points to use for integration + (default `1000000*dim`) + abseps : float, optional + Absolute error tolerance (default 1e-5) + releps : float, optional + Relative error tolerance (default 1e-5) + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Returns + ------- + cdf : ndarray or scalar + Log of the cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + cov = cov_object.covariance + x = self._process_quantiles(x, dim) + if not maxpts: + maxpts = 1000000 * dim + cdf = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit) + # the log of a negative real is complex, and cdf can be negative + # if lower limit is greater than upper limit + cdf = cdf + 0j if np.any(cdf < 0) else cdf + out = np.log(cdf) + return out + + def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, + abseps=1e-5, releps=1e-5, *, lower_limit=None): + """Multivariate normal cumulative distribution function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_mvn_doc_default_callparams)s + maxpts : integer, optional + The maximum number of points to use for integration + (default `1000000*dim`) + abseps : float, optional + Absolute error tolerance (default 1e-5) + releps : float, optional + Relative error tolerance (default 1e-5) + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x` + + Notes + ----- + %(_mvn_doc_callparams_note)s + + .. versionadded:: 1.0.0 + + """ + params = self._process_parameters(mean, cov, allow_singular) + dim, mean, cov_object = params + cov = cov_object.covariance + x = self._process_quantiles(x, dim) + if not maxpts: + maxpts = 1000000 * dim + out = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit) + return out + + def rvs(self, mean=None, cov=1, size=1, random_state=None): + """Draw random samples from a multivariate normal distribution. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov_object = self._process_parameters(mean, cov) + random_state = self._get_random_state(random_state) + + if isinstance(cov_object, _covariance.CovViaPSD): + cov = cov_object.covariance + out = random_state.multivariate_normal(mean, cov, size) + out = _squeeze_output(out) + else: + size = size or tuple() + if not np.iterable(size): + size = (size,) + shape = tuple(size) + (cov_object.shape[-1],) + x = random_state.normal(size=shape) + out = mean + cov_object.colorize(x) + return out + + def entropy(self, mean=None, cov=1): + """Compute the differential entropy of the multivariate normal. + + Parameters + ---------- + %(_mvn_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + Notes + ----- + %(_mvn_doc_callparams_note)s + + """ + dim, mean, cov_object = self._process_parameters(mean, cov) + return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet) + + def fit(self, x, fix_mean=None, fix_cov=None): + """Fit a multivariate normal distribution to data. + + Parameters + ---------- + x : ndarray (m, n) + Data the distribution is fitted to. Must have two axes. + The first axis of length `m` represents the number of vectors + the distribution is fitted to. The second axis of length `n` + determines the dimensionality of the fitted distribution. + fix_mean : ndarray(n, ) + Fixed mean vector. Must have length `n`. + fix_cov: ndarray (n, n) + Fixed covariance matrix. Must have shape `(n, n)`. + + Returns + ------- + mean : ndarray (n, ) + Maximum likelihood estimate of the mean vector + cov : ndarray (n, n) + Maximum likelihood estimate of the covariance matrix + + """ + # input validation for data to be fitted + x = np.asarray(x) + if x.ndim != 2: + raise ValueError("`x` must be two-dimensional.") + + n_vectors, dim = x.shape + + # parameter estimation + # reference: https://home.ttic.edu/~shubhendu/Slides/Estimation.pdf + if fix_mean is not None: + # input validation for `fix_mean` + fix_mean = np.atleast_1d(fix_mean) + if fix_mean.shape != (dim, ): + msg = ("`fix_mean` must be a one-dimensional array the same " + "length as the dimensionality of the vectors `x`.") + raise ValueError(msg) + mean = fix_mean + else: + mean = x.mean(axis=0) + + if fix_cov is not None: + # input validation for `fix_cov` + fix_cov = np.atleast_2d(fix_cov) + # validate shape + if fix_cov.shape != (dim, dim): + msg = ("`fix_cov` must be a two-dimensional square array " + "of same side length as the dimensionality of the " + "vectors `x`.") + raise ValueError(msg) + # validate positive semidefiniteness + # a trimmed down copy from _PSD + s, u = scipy.linalg.eigh(fix_cov, lower=True, check_finite=True) + eps = _eigvalsh_to_eps(s) + if np.min(s) < -eps: + msg = "`fix_cov` must be symmetric positive semidefinite." + raise ValueError(msg) + cov = fix_cov + else: + centered_data = x - mean + cov = centered_data.T @ centered_data / n_vectors + return mean, cov + + +multivariate_normal = multivariate_normal_gen() + + +class multivariate_normal_frozen(multi_rv_frozen): + def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, + maxpts=None, abseps=1e-5, releps=1e-5): + """Create a frozen multivariate normal distribution. + + Parameters + ---------- + mean : array_like, default: ``[0]`` + Mean of the distribution. + cov : array_like, default: ``[1]`` + Symmetric positive (semi)definite covariance matrix of the + distribution. + allow_singular : bool, default: ``False`` + Whether to allow a singular covariance matrix. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + maxpts : integer, optional + The maximum number of points to use for integration of the + cumulative distribution function (default `1000000*dim`) + abseps : float, optional + Absolute error tolerance for the cumulative distribution function + (default 1e-5) + releps : float, optional + Relative error tolerance for the cumulative distribution function + (default 1e-5) + + Examples + -------- + When called with the default parameters, this will create a 1D random + variable with mean 0 and covariance 1: + + >>> from scipy.stats import multivariate_normal + >>> r = multivariate_normal() + >>> r.mean + array([ 0.]) + >>> r.cov + array([[1.]]) + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = multivariate_normal_gen(seed) + self.dim, self.mean, self.cov_object = ( + self._dist._process_parameters(mean, cov, allow_singular)) + self.allow_singular = allow_singular or self.cov_object._allow_singular + if not maxpts: + maxpts = 1000000 * self.dim + self.maxpts = maxpts + self.abseps = abseps + self.releps = releps + + @property + def cov(self): + return self.cov_object.covariance + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.mean, self.cov_object) + if np.any(self.cov_object.rank < self.dim): + out_of_bounds = ~self.cov_object._support_mask(x-self.mean) + out[out_of_bounds] = -np.inf + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def logcdf(self, x, *, lower_limit=None): + cdf = self.cdf(x, lower_limit=lower_limit) + # the log of a negative real is complex, and cdf can be negative + # if lower limit is greater than upper limit + cdf = cdf + 0j if np.any(cdf < 0) else cdf + out = np.log(cdf) + return out + + def cdf(self, x, *, lower_limit=None): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._cdf(x, self.mean, self.cov_object.covariance, + self.maxpts, self.abseps, self.releps, + lower_limit) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.cov_object, size, random_state) + + def entropy(self): + """Computes the differential entropy of the multivariate normal. + + Returns + ------- + h : scalar + Entropy of the multivariate normal distribution + + """ + log_pdet = self.cov_object.log_pdet + rank = self.cov_object.rank + return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']: + method = multivariate_normal_gen.__dict__[name] + method_frozen = multivariate_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params) + +_matnorm_doc_default_callparams = """\ +mean : array_like, optional + Mean of the distribution (default: `None`) +rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: `1`) +colcov : array_like, optional + Among-column covariance matrix of the distribution (default: `1`) +""" + +_matnorm_doc_callparams_note = """\ +If `mean` is set to `None` then a matrix of zeros is used for the mean. +The dimensions of this matrix are inferred from the shape of `rowcov` and +`colcov`, if these are provided, or set to `1` if ambiguous. + +`rowcov` and `colcov` can be two-dimensional array_likes specifying the +covariance matrices directly. Alternatively, a one-dimensional array will +be be interpreted as the entries of a diagonal matrix, and a scalar or +zero-dimensional array will be interpreted as this value times the +identity matrix. +""" + +_matnorm_doc_frozen_callparams = "" + +_matnorm_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +matnorm_docdict_params = { + '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +matnorm_docdict_noparams = { + '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams, + '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class matrix_normal_gen(multi_rv_generic): + r"""A matrix normal random variable. + + The `mean` keyword specifies the mean. The `rowcov` keyword specifies the + among-row covariance matrix. The 'colcov' keyword specifies the + among-column covariance matrix. + + Methods + ------- + pdf(X, mean=None, rowcov=1, colcov=1) + Probability density function. + logpdf(X, mean=None, rowcov=1, colcov=1) + Log of the probability density function. + rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None) + Draw random samples. + entropy(rowcol=1, colcov=1) + Differential entropy. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + The covariance matrices specified by `rowcov` and `colcov` must be + (symmetric) positive definite. If the samples in `X` are + :math:`m \times n`, then `rowcov` must be :math:`m \times m` and + `colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`. + + The probability density function for `matrix_normal` is + + .. math:: + + f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}} + \exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1} + (X-M)^T \right] \right), + + where :math:`M` is the mean, :math:`U` the among-row covariance matrix, + :math:`V` the among-column covariance matrix. + + The `allow_singular` behaviour of the `multivariate_normal` + distribution is not currently supported. Covariance matrices must be + full rank. + + The `matrix_normal` distribution is closely related to the + `multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)` + (the vector formed by concatenating the columns of :math:`X`) has a + multivariate normal distribution with mean :math:`\mathrm{Vec}(M)` + and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker + product). Sampling and pdf evaluation are + :math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but + :math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal, + making this equivalent form algorithmically inefficient. + + .. versionadded:: 0.17.0 + + Examples + -------- + + >>> import numpy as np + >>> from scipy.stats import matrix_normal + + >>> M = np.arange(6).reshape(3,2); M + array([[0, 1], + [2, 3], + [4, 5]]) + >>> U = np.diag([1,2,3]); U + array([[1, 0, 0], + [0, 2, 0], + [0, 0, 3]]) + >>> V = 0.3*np.identity(2); V + array([[ 0.3, 0. ], + [ 0. , 0.3]]) + >>> X = M + 0.1; X + array([[ 0.1, 1.1], + [ 2.1, 3.1], + [ 4.1, 5.1]]) + >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) + 0.023410202050005054 + + >>> # Equivalent multivariate normal + >>> from scipy.stats import multivariate_normal + >>> vectorised_X = X.T.flatten() + >>> equiv_mean = M.T.flatten() + >>> equiv_cov = np.kron(V,U) + >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov) + 0.023410202050005054 + + Alternatively, the object may be called (as a function) to fix the mean + and covariance parameters, returning a "frozen" matrix normal + random variable: + + >>> rv = matrix_normal(mean=None, rowcov=1, colcov=1) + >>> # Frozen object with the same methods but holding the given + >>> # mean and covariance fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params) + + def __call__(self, mean=None, rowcov=1, colcov=1, seed=None): + """Create a frozen matrix normal distribution. + + See `matrix_normal_frozen` for more information. + + """ + return matrix_normal_frozen(mean, rowcov, colcov, seed=seed) + + def _process_parameters(self, mean, rowcov, colcov): + """ + Infer dimensionality from mean or covariance matrices. Handle + defaults. Ensure compatible dimensions. + """ + + # Process mean + if mean is not None: + mean = np.asarray(mean, dtype=float) + meanshape = mean.shape + if len(meanshape) != 2: + raise ValueError("Array `mean` must be two dimensional.") + if np.any(meanshape == 0): + raise ValueError("Array `mean` has invalid shape.") + + # Process among-row covariance + rowcov = np.asarray(rowcov, dtype=float) + if rowcov.ndim == 0: + if mean is not None: + rowcov = rowcov * np.identity(meanshape[0]) + else: + rowcov = rowcov * np.identity(1) + elif rowcov.ndim == 1: + rowcov = np.diag(rowcov) + rowshape = rowcov.shape + if len(rowshape) != 2: + raise ValueError("`rowcov` must be a scalar or a 2D array.") + if rowshape[0] != rowshape[1]: + raise ValueError("Array `rowcov` must be square.") + if rowshape[0] == 0: + raise ValueError("Array `rowcov` has invalid shape.") + numrows = rowshape[0] + + # Process among-column covariance + colcov = np.asarray(colcov, dtype=float) + if colcov.ndim == 0: + if mean is not None: + colcov = colcov * np.identity(meanshape[1]) + else: + colcov = colcov * np.identity(1) + elif colcov.ndim == 1: + colcov = np.diag(colcov) + colshape = colcov.shape + if len(colshape) != 2: + raise ValueError("`colcov` must be a scalar or a 2D array.") + if colshape[0] != colshape[1]: + raise ValueError("Array `colcov` must be square.") + if colshape[0] == 0: + raise ValueError("Array `colcov` has invalid shape.") + numcols = colshape[0] + + # Ensure mean and covariances compatible + if mean is not None: + if meanshape[0] != numrows: + raise ValueError("Arrays `mean` and `rowcov` must have the " + "same number of rows.") + if meanshape[1] != numcols: + raise ValueError("Arrays `mean` and `colcov` must have the " + "same number of columns.") + else: + mean = np.zeros((numrows, numcols)) + + dims = (numrows, numcols) + + return dims, mean, rowcov, colcov + + def _process_quantiles(self, X, dims): + """ + Adjust quantiles array so that last two axes labels the components of + each data point. + """ + X = np.asarray(X, dtype=float) + if X.ndim == 2: + X = X[np.newaxis, :] + if X.shape[-2:] != dims: + raise ValueError("The shape of array `X` is not compatible " + "with the distribution parameters.") + return X + + def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov, + col_prec_rt, log_det_colcov): + """Log of the matrix normal probability density function. + + Parameters + ---------- + dims : tuple + Dimensions of the matrix variates + X : ndarray + Points at which to evaluate the log of the probability + density function + mean : ndarray + Mean of the distribution + row_prec_rt : ndarray + A decomposition such that np.dot(row_prec_rt, row_prec_rt.T) + is the inverse of the among-row covariance matrix + log_det_rowcov : float + Logarithm of the determinant of the among-row covariance matrix + col_prec_rt : ndarray + A decomposition such that np.dot(col_prec_rt, col_prec_rt.T) + is the inverse of the among-column covariance matrix + log_det_colcov : float + Logarithm of the determinant of the among-column covariance matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + numrows, numcols = dims + roll_dev = np.moveaxis(X-mean, -1, 0) + scale_dev = np.tensordot(col_prec_rt.T, + np.dot(roll_dev, row_prec_rt), 1) + maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0) + return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov + + numrows*log_det_colcov + maha) + + def logpdf(self, X, mean=None, rowcov=1, colcov=1): + """Log of the matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + logpdf : ndarray + Log of the probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + X = self._process_quantiles(X, dims) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, + colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X, mean=None, rowcov=1, colcov=1): + """Matrix normal probability density function. + + Parameters + ---------- + X : array_like + Quantiles, with the last two axes of `X` denoting the components. + %(_matnorm_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `X` + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + return np.exp(self.logpdf(X, mean, rowcov, colcov)) + + def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None): + """Draw random samples from a matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `dims`), where `dims` is the + dimension of the random matrices. + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + size = int(size) + dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, + colcov) + rowchol = scipy.linalg.cholesky(rowcov, lower=True) + colchol = scipy.linalg.cholesky(colcov, lower=True) + random_state = self._get_random_state(random_state) + # We aren't generating standard normal variates with size=(size, + # dims[0], dims[1]) directly to ensure random variates remain backwards + # compatible. See https://github.com/scipy/scipy/pull/12312 for more + # details. + std_norm = random_state.standard_normal( + size=(dims[1], size, dims[0]) + ).transpose(1, 2, 0) + out = mean + np.einsum('jp,ipq,kq->ijk', + rowchol, std_norm, colchol, + optimize=True) + if size == 1: + out = out.reshape(mean.shape) + return out + + def entropy(self, rowcov=1, colcov=1): + """Log of the matrix normal probability density function. + + Parameters + ---------- + rowcov : array_like, optional + Among-row covariance matrix of the distribution (default: `1`) + colcov : array_like, optional + Among-column covariance matrix of the distribution (default: `1`) + + Returns + ------- + entropy : float + Entropy of the distribution + + Notes + ----- + %(_matnorm_doc_callparams_note)s + + """ + dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0])) + dims, _, rowcov, colcov = self._process_parameters(dummy_mean, + rowcov, + colcov) + rowpsd = _PSD(rowcov, allow_singular=False) + colpsd = _PSD(colcov, allow_singular=False) + + return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet) + + def _entropy(self, dims, row_cov_logdet, col_cov_logdet): + n, p = dims + return (0.5 * n * p * (1 + _LOG_2PI) + 0.5 * p * row_cov_logdet + + 0.5 * n * col_cov_logdet) + + +matrix_normal = matrix_normal_gen() + + +class matrix_normal_frozen(multi_rv_frozen): + """ + Create a frozen matrix normal distribution. + + Parameters + ---------- + %(_matnorm_doc_default_callparams)s + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is `None` the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import matrix_normal + + >>> distn = matrix_normal(mean=np.zeros((3,3))) + >>> X = distn.rvs(); X + array([[-0.02976962, 0.93339138, -0.09663178], + [ 0.67405524, 0.28250467, -0.93308929], + [-0.31144782, 0.74535536, 1.30412916]]) + >>> distn.pdf(X) + 2.5160642368346784e-05 + >>> distn.logpdf(X) + -10.590229595124615 + """ + + def __init__(self, mean=None, rowcov=1, colcov=1, seed=None): + self._dist = matrix_normal_gen(seed) + self.dims, self.mean, self.rowcov, self.colcov = \ + self._dist._process_parameters(mean, rowcov, colcov) + self.rowpsd = _PSD(self.rowcov, allow_singular=False) + self.colpsd = _PSD(self.colcov, allow_singular=False) + + def logpdf(self, X): + X = self._dist._process_quantiles(X, self.dims) + out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U, + self.rowpsd.log_pdet, self.colpsd.U, + self.colpsd.log_pdet) + return _squeeze_output(out) + + def pdf(self, X): + return np.exp(self.logpdf(X)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.mean, self.rowcov, self.colcov, size, + random_state) + + def entropy(self): + return self._dist._entropy(self.dims, self.rowpsd.log_pdet, + self.colpsd.log_pdet) + + +# Set frozen generator docstrings from corresponding docstrings in +# matrix_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'entropy']: + method = matrix_normal_gen.__dict__[name] + method_frozen = matrix_normal_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + matnorm_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params) + +_dirichlet_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries determines the + dimensionality of the distribution. +""" +_dirichlet_doc_frozen_callparams = "" + +_dirichlet_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +dirichlet_docdict_params = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_docdict_noparams = { + '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_check_parameters(alpha): + alpha = np.asarray(alpha) + if np.min(alpha) <= 0: + raise ValueError("All parameters must be greater than 0") + elif alpha.ndim != 1: + raise ValueError("Parameter vector 'a' must be one dimensional, " + f"but a.shape = {alpha.shape}.") + return alpha + + +def _dirichlet_check_input(alpha, x): + x = np.asarray(x) + + if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: + raise ValueError("Vector 'x' must have either the same number " + "of entries as, or one entry fewer than, " + f"parameter vector 'a', but alpha.shape = {alpha.shape} " + f"and x.shape = {x.shape}.") + + if x.shape[0] != alpha.shape[0]: + xk = np.array([1 - np.sum(x, 0)]) + if xk.ndim == 1: + x = np.append(x, xk) + elif xk.ndim == 2: + x = np.vstack((x, xk)) + else: + raise ValueError("The input must be one dimensional or a two " + "dimensional matrix containing the entries.") + + if np.min(x) < 0: + raise ValueError("Each entry in 'x' must be greater than or equal " + "to zero.") + + if np.max(x) > 1: + raise ValueError("Each entry in 'x' must be smaller or equal one.") + + # Check x_i > 0 or alpha_i > 1 + xeq0 = (x == 0) + alphalt1 = (alpha < 1) + if x.shape != alpha.shape: + alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape) + chk = np.logical_and(xeq0, alphalt1) + + if np.sum(chk): + raise ValueError("Each entry in 'x' must be greater than zero if its " + "alpha is less than one.") + + if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): + raise ValueError("The input vector 'x' must lie within the normal " + "simplex. but np.sum(x, 0) = %s." % np.sum(x, 0)) + + return x + + +def _lnB(alpha): + r"""Internal helper function to compute the log of the useful quotient. + + .. math:: + + B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)} + {\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)} + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + B : scalar + Helper quotient, internal use only + + """ + return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) + + +class dirichlet_gen(multi_rv_generic): + r"""A Dirichlet random variable. + + The ``alpha`` keyword specifies the concentration parameters of the + distribution. + + .. versionadded:: 0.15.0 + + Methods + ------- + pdf(x, alpha) + Probability density function. + logpdf(x, alpha) + Log of the probability density function. + rvs(alpha, size=1, random_state=None) + Draw random samples from a Dirichlet distribution. + mean(alpha) + The mean of the Dirichlet distribution + var(alpha) + The variance of the Dirichlet distribution + cov(alpha) + The covariance of the Dirichlet distribution + entropy(alpha) + Compute the differential entropy of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + Each :math:`\alpha` entry must be positive. The distribution has only + support on the simplex defined by + + .. math:: + \sum_{i=1}^{K} x_i = 1 + + where :math:`0 < x_i < 1`. + + If the quantiles don't lie within the simplex, a ValueError is raised. + + The probability density function for `dirichlet` is + + .. math:: + + f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} + + where + + .. math:: + + \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} + {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} + + and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the + concentration parameters and :math:`K` is the dimension of the space + where :math:`x` takes values. + + Note that the `dirichlet` interface is somewhat inconsistent. + The array returned by the rvs function is transposed + with respect to the format expected by the pdf and logpdf. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import dirichlet + + Generate a dirichlet random variable + + >>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles + >>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters + >>> dirichlet.pdf(quantiles, alpha) + 0.2843831684937255 + + The same PDF but following a log scale + + >>> dirichlet.logpdf(quantiles, alpha) + -1.2574327653159187 + + Once we specify the dirichlet distribution + we can then calculate quantities of interest + + >>> dirichlet.mean(alpha) # get the mean of the distribution + array([0.01960784, 0.24509804, 0.73529412]) + >>> dirichlet.var(alpha) # get variance + array([0.00089829, 0.00864603, 0.00909517]) + >>> dirichlet.entropy(alpha) # calculate the differential entropy + -4.3280162474082715 + + We can also return random samples from the distribution + + >>> dirichlet.rvs(alpha, size=1, random_state=1) + array([[0.00766178, 0.24670518, 0.74563305]]) + >>> dirichlet.rvs(alpha, size=2, random_state=2) + array([[0.01639427, 0.1292273 , 0.85437844], + [0.00156917, 0.19033695, 0.80809388]]) + + Alternatively, the object may be called (as a function) to fix + concentration parameters, returning a "frozen" Dirichlet + random variable: + + >>> rv = dirichlet(alpha) + >>> # Frozen object with the same methods but holding the given + >>> # concentration parameters fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) + + def __call__(self, alpha, seed=None): + return dirichlet_frozen(alpha, seed=seed) + + def _logpdf(self, x, alpha): + """Log of the Dirichlet probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + %(_dirichlet_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + lnB = _lnB(alpha) + return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0) + + def logpdf(self, x, alpha): + """Log of the Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = self._logpdf(x, alpha) + return _squeeze_output(out) + + def pdf(self, x, alpha): + """The Dirichlet probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + pdf : ndarray or scalar + The probability density function evaluated at `x`. + + """ + alpha = _dirichlet_check_parameters(alpha) + x = _dirichlet_check_input(alpha, x) + + out = np.exp(self._logpdf(x, alpha)) + return _squeeze_output(out) + + def mean(self, alpha): + """Mean of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + mu : ndarray or scalar + Mean of the Dirichlet distribution. + + """ + alpha = _dirichlet_check_parameters(alpha) + + out = alpha / (np.sum(alpha)) + return _squeeze_output(out) + + def var(self, alpha): + """Variance of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + v : ndarray or scalar + Variance of the Dirichlet distribution. + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) + return _squeeze_output(out) + + def cov(self, alpha): + """Covariance matrix of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution. + """ + + alpha = _dirichlet_check_parameters(alpha) + alpha0 = np.sum(alpha) + a = alpha / alpha0 + + cov = (np.diag(a) - np.outer(a, a)) / (alpha0 + 1) + return _squeeze_output(cov) + + def entropy(self, alpha): + """ + Differential entropy of the Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Dirichlet distribution + + """ + + alpha = _dirichlet_check_parameters(alpha) + + alpha0 = np.sum(alpha) + lnB = _lnB(alpha) + K = alpha.shape[0] + + out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( + (alpha - 1) * scipy.special.psi(alpha)) + return _squeeze_output(out) + + def rvs(self, alpha, size=1, random_state=None): + """ + Draw random samples from a Dirichlet distribution. + + Parameters + ---------- + %(_dirichlet_doc_default_callparams)s + size : int, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the random variable. + + """ + alpha = _dirichlet_check_parameters(alpha) + random_state = self._get_random_state(random_state) + return random_state.dirichlet(alpha, size=size) + + +dirichlet = dirichlet_gen() + + +class dirichlet_frozen(multi_rv_frozen): + def __init__(self, alpha, seed=None): + self.alpha = _dirichlet_check_parameters(alpha) + self._dist = dirichlet_gen(seed) + + def logpdf(self, x): + return self._dist.logpdf(x, self.alpha) + + def pdf(self, x): + return self._dist.pdf(x, self.alpha) + + def mean(self): + return self._dist.mean(self.alpha) + + def var(self): + return self._dist.var(self.alpha) + + def cov(self): + return self._dist.cov(self.alpha) + + def entropy(self): + return self._dist.entropy(self.alpha) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.alpha, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_normal_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'cov', 'entropy']: + method = dirichlet_gen.__dict__[name] + method_frozen = dirichlet_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) + + +_wishart_doc_default_callparams = """\ +df : int + Degrees of freedom, must be greater than or equal to dimension of the + scale matrix +scale : array_like + Symmetric positive definite scale matrix of the distribution +""" + +_wishart_doc_callparams_note = "" + +_wishart_doc_frozen_callparams = "" + +_wishart_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +wishart_docdict_params = { + '_doc_default_callparams': _wishart_doc_default_callparams, + '_doc_callparams_note': _wishart_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +wishart_docdict_noparams = { + '_doc_default_callparams': _wishart_doc_frozen_callparams, + '_doc_callparams_note': _wishart_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class wishart_gen(multi_rv_generic): + r"""A Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal precision matrix (the inverse of the covariance + matrix). These arguments must satisfy the relationship + ``df > scale.ndim - 1``, but see notes on using the `rvs` method with + ``df < scale.ndim``. + + Methods + ------- + pdf(x, df, scale) + Probability density function. + logpdf(x, df, scale) + Log of the probability density function. + rvs(df, scale, size=1, random_state=None) + Draw random samples from a Wishart distribution. + entropy() + Compute the differential entropy of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Raises + ------ + scipy.linalg.LinAlgError + If the scale matrix `scale` is not positive definite. + + See Also + -------- + invwishart, chi2 + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. Symmetry is not checked; only the lower triangular + portion is used. + + The Wishart distribution is often denoted + + .. math:: + + W_p(\nu, \Sigma) + + where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the + :math:`p \times p` scale matrix. + + The probability density function for `wishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then + its PDF is given by: + + .. math:: + + f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } + |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} + \exp\left( -tr(\Sigma^{-1} S) / 2 \right) + + If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then + :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the Wishart + distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` + distribution. + + The algorithm [2]_ implemented by the `rvs` method may + produce numerically singular matrices with :math:`p - 1 < \nu < p`; the + user may wish to check for this condition and generate replacement samples + as necessary. + + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate + Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import wishart, chi2 + >>> x = np.linspace(1e-5, 8, 100) + >>> w = wishart.pdf(x, df=3, scale=1); w[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> c = chi2.pdf(x, 3); c[:5] + array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) + >>> plt.plot(x, w) + >>> plt.show() + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" Wishart random + variable: + + >>> rv = wishart(df=1, scale=1) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """Create a frozen Wishart distribution. + + See `wishart_frozen` for more information. + """ + return wishart_frozen(df, scale, seed) + + def _process_parameters(self, df, scale): + if scale is None: + scale = 1.0 + scale = np.asarray(scale, dtype=float) + + if scale.ndim == 0: + scale = scale[np.newaxis, np.newaxis] + elif scale.ndim == 1: + scale = np.diag(scale) + elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: + raise ValueError("Array 'scale' must be square if it is two" + " dimensional, but scale.scale = %s." + % str(scale.shape)) + elif scale.ndim > 2: + raise ValueError("Array 'scale' must be at most two-dimensional," + " but scale.ndim = %d" % scale.ndim) + + dim = scale.shape[0] + + if df is None: + df = dim + elif not np.isscalar(df): + raise ValueError("Degrees of freedom must be a scalar.") + elif df <= dim - 1: + raise ValueError("Degrees of freedom must be greater than the " + "dimension of scale matrix minus 1.") + + return dim, df, scale + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + + if x.ndim == 0: + x = x * np.eye(dim)[:, :, np.newaxis] + if x.ndim == 1: + if dim == 1: + x = x[np.newaxis, np.newaxis, :] + else: + x = np.diag(x)[:, :, np.newaxis] + elif x.ndim == 2: + if not x.shape[0] == x.shape[1]: + raise ValueError("Quantiles must be square if they are two" + " dimensional, but x.shape = %s." + % str(x.shape)) + x = x[:, :, np.newaxis] + elif x.ndim == 3: + if not x.shape[0] == x.shape[1]: + raise ValueError("Quantiles must be square in the first two" + " dimensions if they are three dimensional" + ", but x.shape = %s." % str(x.shape)) + elif x.ndim > 3: + raise ValueError("Quantiles must be at most two-dimensional with" + " an additional dimension for multiple" + "components, but x.ndim = %d" % x.ndim) + + # Now we have 3-dim array; should have shape [dim, dim, *] + if not x.shape[0:2] == (dim, dim): + raise ValueError('Quantiles have incompatible dimensions: should' + f' be {(dim, dim)}, got {x.shape[0:2]}.') + + return x + + def _process_size(self, size): + size = np.asarray(size) + + if size.ndim == 0: + size = size[np.newaxis] + elif size.ndim > 1: + raise ValueError('Size must be an integer or tuple of integers;' + ' thus must have dimension <= 1.' + ' Got size.ndim = %s' % str(tuple(size))) + n = size.prod() + shape = tuple(size) + + return n, shape + + def _logpdf(self, x, dim, df, scale, log_det_scale, C): + """Log of the Wishart probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + scale : ndarray + Scale matrix + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # log determinant of x + # Note: x has components along the last axis, so that x.T has + # components alone the 0-th axis. Then since det(A) = det(A'), this + # gives us a 1-dim vector of determinants + + # Retrieve tr(scale^{-1} x) + log_det_x = np.empty(x.shape[-1]) + scale_inv_x = np.empty(x.shape) + tr_scale_inv_x = np.empty(x.shape[-1]) + for i in range(x.shape[-1]): + _, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i]) + tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace() + + # Log PDF + out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - + (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + + multigammaln(0.5*df, dim))) + + return out + + def logpdf(self, x, df, scale): + """Log of the Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + + # Cholesky decomposition of scale, get log(det(scale)) + C, log_det_scale = self._cholesky_logdet(scale) + + out = self._logpdf(x, dim, df, scale, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """Mean of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + return df * scale + + def mean(self, df, scale): + """Mean of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) + + def _mode(self, dim, df, scale): + """Mode of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + if df >= dim + 1: + out = (df-dim-1) * scale + else: + out = None + return out + + def mode(self, df, scale): + """Mode of the Wishart distribution + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float or None + The Mode of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _var(self, dim, df, scale): + """Variance of the Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + var = scale**2 + diag = scale.diagonal() # 1 x dim array + var += np.outer(diag, diag) + var *= df + return var + + def var(self, df, scale): + """Variance of the Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) + + def _standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + # Random normal variates for off-diagonal elements + n_tril = dim * (dim-1) // 2 + covariances = random_state.normal( + size=n*n_tril).reshape(shape+(n_tril,)) + + # Random chi-square variates for diagonal elements + variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 + for i in range(dim)]].reshape((dim,) + + shape[::-1]).T) + + # Create the A matri(ces) - lower triangular + A = np.zeros(shape + (dim, dim)) + + # Input the covariances + size_idx = tuple([slice(None, None, None)]*len(shape)) + tril_idx = np.tril_indices(dim, k=-1) + A[size_idx + tril_idx] = covariances + + # Input the variances + diag_idx = np.diag_indices(dim) + A[size_idx + diag_idx] = variances + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """Draw random samples from a Wishart distribution. + + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triangular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Calculate the matrices A, which are actually lower triangular + # Cholesky factorizations of a matrix B such that B ~ W(df, I) + A = self._standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = C A A' C', where SA ~ W(df, scale) + # Note: this is the product of a (lower) (lower) (lower)' (lower)' + # or, denoting B = AA', it is C B C' where C is the lower + # triangular Cholesky factorization of the scale matrix. + # this appears to conflict with the instructions in [1]_, which + # suggest that it should be D' B D where D is the lower + # triangular factorization of the scale matrix. However, it is + # meant to refer to the Bartlett (1933) representation of a + # Wishart random variate as L A A' L' where L is lower triangular + # so it appears that understanding D' to be upper triangular + # is either a typo in or misreading of [1]_. + for index in np.ndindex(shape): + CA = np.dot(C, A[index]) + A[index] = np.dot(CA, CA.T) + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """Draw random samples from a Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (``dim``, ``dim``), where + ``dim`` is the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + """Compute the differential entropy of the Wishart. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'entropy' instead. + + """ + return ( + 0.5 * (dim+1) * log_det_scale + + 0.5 * dim * (dim+1) * _LOG_2 + + multigammaln(0.5*df, dim) - + 0.5 * (df - dim - 1) * np.sum( + [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] + ) + + 0.5 * df * dim + ) + + def entropy(self, df, scale): + """Compute the differential entropy of the Wishart. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Wishart distribution + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + def _cholesky_logdet(self, scale): + """Compute Cholesky decomposition and determine (log(det(scale)). + + Parameters + ---------- + scale : ndarray + Scale matrix. + + Returns + ------- + c_decomp : ndarray + The Cholesky decomposition of `scale`. + logdet : scalar + The log of the determinant of `scale`. + + Notes + ----- + This computation of ``logdet`` is equivalent to + ``np.linalg.slogdet(scale)``. It is ~2x faster though. + + """ + c_decomp = scipy.linalg.cholesky(scale, lower=True) + logdet = 2 * np.sum(np.log(c_decomp.diagonal())) + return c_decomp, logdet + + +wishart = wishart_gen() + + +class wishart_frozen(multi_rv_frozen): + """Create a frozen Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + """ + def __init__(self, df, scale, seed=None): + self._dist = wishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale) + self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + + out = self._dist._logpdf(x, self.dim, self.df, self.scale, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: + method = wishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + + +class invwishart_gen(wishart_gen): + r"""An inverse Wishart random variable. + + The `df` keyword specifies the degrees of freedom. The `scale` keyword + specifies the scale matrix, which must be symmetric and positive definite. + In this context, the scale matrix is often interpreted in terms of a + multivariate normal covariance matrix. + + Methods + ------- + pdf(x, df, scale) + Probability density function. + logpdf(x, df, scale) + Log of the probability density function. + rvs(df, scale, size=1, random_state=None) + Draw random samples from an inverse Wishart distribution. + entropy(df, scale) + Differential entropy of the distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Raises + ------ + scipy.linalg.LinAlgError + If the scale matrix `scale` is not positive definite. + + See Also + -------- + wishart + + Notes + ----- + %(_doc_callparams_note)s + + The scale matrix `scale` must be a symmetric positive definite + matrix. Singular matrices, including the symmetric positive semi-definite + case, are not supported. Symmetry is not checked; only the lower triangular + portion is used. + + The inverse Wishart distribution is often denoted + + .. math:: + + W_p^{-1}(\nu, \Psi) + + where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the + :math:`p \times p` scale matrix. + + The probability density function for `invwishart` has support over positive + definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, + then its PDF is given by: + + .. math:: + + f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } + |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} + \exp\left( -tr(\Sigma S^{-1}) / 2 \right) + + If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then + :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). + + If the scale matrix is 1-dimensional and equal to one, then the inverse + Wishart distribution :math:`W_1(\nu, 1)` collapses to the + inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` + and scale = :math:`\frac{1}{2}`. + + Instead of inverting a randomly generated Wishart matrix as described in [2], + here the algorithm in [4] is used to directly generate a random inverse-Wishart + matrix without inversion. + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", + Wiley, 1983. + .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications + in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, + 1985. + .. [3] Gupta, M. and Srivastava, S. "Parametric Bayesian Estimation of + Differential Entropy and Relative Entropy". Entropy 12, 818 - 843. + 2010. + .. [4] S.D. Axen, "Efficiently generating inverse-Wishart matrices and + their Cholesky factors", :arXiv:`2310.15884v1`. 2023. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import invwishart, invgamma + >>> x = np.linspace(0.01, 1, 100) + >>> iw = invwishart.pdf(x, df=6, scale=1) + >>> iw[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> ig = invgamma.pdf(x, 6/2., scale=1./2) + >>> ig[:3] + array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) + >>> plt.plot(x, iw) + >>> plt.show() + + The input quantiles can be any shape of array, as long as the last + axis labels the components. + + Alternatively, the object may be called (as a function) to fix the degrees + of freedom and scale parameters, returning a "frozen" inverse Wishart + random variable: + + >>> rv = invwishart(df=1, scale=1) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) + + def __call__(self, df=None, scale=None, seed=None): + """Create a frozen inverse Wishart distribution. + + See `invwishart_frozen` for more information. + + """ + return invwishart_frozen(df, scale, seed) + + def _logpdf(self, x, dim, df, log_det_scale, C): + """Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability + density function. + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + log_det_scale : float + Logarithm of the determinant of the scale matrix + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + # Retrieve tr(scale x^{-1}) + log_det_x = np.empty(x.shape[-1]) + tr_scale_x_inv = np.empty(x.shape[-1]) + trsm = get_blas_funcs(('trsm'), (x,)) + if dim > 1: + for i in range(x.shape[-1]): + Cx, log_det_x[i] = self._cholesky_logdet(x[:, :, i]) + A = trsm(1., Cx, C, side=0, lower=True) + tr_scale_x_inv[i] = np.linalg.norm(A)**2 + else: + log_det_x[:] = np.log(x[0, 0]) + tr_scale_x_inv[:] = C[0, 0]**2 / x[0, 0] + + # Log PDF + out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - + (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - + multigammaln(0.5*df, dim)) + + return out + + def logpdf(self, x, df, scale): + """Log of the inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Log of the probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + dim, df, scale = self._process_parameters(df, scale) + x = self._process_quantiles(x, dim) + C, log_det_scale = self._cholesky_logdet(scale) + out = self._logpdf(x, dim, df, log_det_scale, C) + return _squeeze_output(out) + + def pdf(self, x, df, scale): + """Inverse Wishart probability density function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + Each quantile must be a symmetric positive definite matrix. + %(_doc_default_callparams)s + + Returns + ------- + pdf : ndarray + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + + """ + return np.exp(self.logpdf(x, df, scale)) + + def _mean(self, dim, df, scale): + """Mean of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mean' instead. + + """ + if df > dim + 1: + out = scale / (df - dim - 1) + else: + out = None + return out + + def mean(self, df, scale): + """Mean of the inverse Wishart distribution. + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus one. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float or None + The mean of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mean(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _mode(self, dim, df, scale): + """Mode of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'mode' instead. + + """ + return scale / (df + dim + 1) + + def mode(self, df, scale): + """Mode of the inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mode : float + The Mode of the distribution + + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._mode(dim, df, scale) + return _squeeze_output(out) + + def _var(self, dim, df, scale): + """Variance of the inverse Wishart distribution. + + Parameters + ---------- + dim : int + Dimension of the scale matrix + %(_doc_default_callparams)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'var' instead. + + """ + if df > dim + 3: + var = (df - dim + 1) * scale**2 + diag = scale.diagonal() # 1 x dim array + var += (df - dim - 1) * np.outer(diag, diag) + var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) + else: + var = None + return var + + def var(self, df, scale): + """Variance of the inverse Wishart distribution. + + Only valid if the degrees of freedom are greater than the dimension of + the scale matrix plus three. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + var : float + The variance of the distribution + """ + dim, df, scale = self._process_parameters(df, scale) + out = self._var(dim, df, scale) + return _squeeze_output(out) if out is not None else out + + def _inv_standard_rvs(self, n, shape, dim, df, random_state): + """ + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Returns + ------- + A : ndarray + Random variates of shape (`shape`) + (``dim``, ``dim``). + Each slice `A[..., :, :]` is lower-triangular, and its + inverse is the lower Cholesky factor of a draw from + `invwishart(df, np.eye(dim))`. + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + A = np.zeros(shape + (dim, dim)) + + # Random normal variates for off-diagonal elements + tri_rows, tri_cols = np.tril_indices(dim, k=-1) + n_tril = dim * (dim-1) // 2 + A[..., tri_rows, tri_cols] = random_state.normal( + size=(*shape, n_tril), + ) + + # Random chi variates for diagonal elements + rows = np.arange(dim) + chi_dfs = (df - dim + 1) + rows + A[..., rows, rows] = random_state.chisquare( + df=chi_dfs, size=(*shape, dim), + )**0.5 + + return A + + def _rvs(self, n, shape, dim, df, C, random_state): + """Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + n : integer + Number of variates to generate + shape : iterable + Shape of the variates to generate + dim : int + Dimension of the scale matrix + df : int + Degrees of freedom + C : ndarray + Cholesky factorization of the scale matrix, lower triagular. + %(_doc_random_state)s + + Notes + ----- + As this function does no argument checking, it should not be + called directly; use 'rvs' instead. + + """ + random_state = self._get_random_state(random_state) + # Get random draws A such that inv(A) ~ iW(df, I) + A = self._inv_standard_rvs(n, shape, dim, df, random_state) + + # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) + trsm = get_blas_funcs(('trsm'), (A,)) + trmm = get_blas_funcs(('trmm'), (A,)) + + for index in np.ndindex(A.shape[:-2]): + if dim > 1: + # Calculate CA + # Get CA = C A^{-1} via triangular solver + CA = trsm(1., A[index], C, side=1, lower=True) + # get SA + A[index] = trmm(1., CA, CA, side=1, lower=True, trans_a=True) + else: + A[index][0, 0] = (C[0, 0] / A[index][0, 0])**2 + + return A + + def rvs(self, df, scale, size=1, random_state=None): + """Draw random samples from an inverse Wishart distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`) + (``dim``, ``dim``), where + ``dim`` is the dimension of the scale matrix. + + Notes + ----- + %(_doc_callparams_note)s + + """ + n, shape = self._process_size(size) + dim, df, scale = self._process_parameters(df, scale) + + # Cholesky decomposition of scale + C = scipy.linalg.cholesky(scale, lower=True) + + out = self._rvs(n, shape, dim, df, C, random_state) + + return _squeeze_output(out) + + def _entropy(self, dim, df, log_det_scale): + # reference: eq. (17) from ref. 3 + psi_eval_points = [0.5 * (df - dim + i) for i in range(1, dim + 1)] + psi_eval_points = np.asarray(psi_eval_points) + return multigammaln(0.5 * df, dim) + 0.5 * dim * df + \ + 0.5 * (dim + 1) * (log_det_scale - _LOG_2) - \ + 0.5 * (df + dim + 1) * \ + psi(psi_eval_points, out=psi_eval_points).sum() + + def entropy(self, df, scale): + dim, df, scale = self._process_parameters(df, scale) + _, log_det_scale = self._cholesky_logdet(scale) + return self._entropy(dim, df, log_det_scale) + + +invwishart = invwishart_gen() + + +class invwishart_frozen(multi_rv_frozen): + def __init__(self, df, scale, seed=None): + """Create a frozen inverse Wishart distribution. + + Parameters + ---------- + df : array_like + Degrees of freedom of the distribution + scale : array_like + Scale matrix of the distribution + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is None the `numpy.random.Generator` singleton is used. + If `seed` is an int, a new ``Generator`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` instance then that instance is + used. + + """ + self._dist = invwishart_gen(seed) + self.dim, self.df, self.scale = self._dist._process_parameters( + df, scale + ) + + # Get the determinant via Cholesky factorization + self.C = scipy.linalg.cholesky(self.scale, lower=True) + self.log_det_scale = 2 * np.sum(np.log(self.C.diagonal())) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + out = self._dist._logpdf(x, self.dim, self.df, + self.log_det_scale, self.C) + return _squeeze_output(out) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def mean(self): + out = self._dist._mean(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def mode(self): + out = self._dist._mode(self.dim, self.df, self.scale) + return _squeeze_output(out) + + def var(self): + out = self._dist._var(self.dim, self.df, self.scale) + return _squeeze_output(out) if out is not None else out + + def rvs(self, size=1, random_state=None): + n, shape = self._dist._process_size(size) + + out = self._dist._rvs(n, shape, self.dim, self.df, + self.C, random_state) + + return _squeeze_output(out) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.log_det_scale) + + +# Set frozen generator docstrings from corresponding docstrings in +# inverse Wishart and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: + method = invwishart_gen.__dict__[name] + method_frozen = wishart_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, wishart_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) + +_multinomial_doc_default_callparams = """\ +n : int + Number of trials +p : array_like + Probability of a trial falling into each category; should sum to 1 +""" + +_multinomial_doc_callparams_note = """\ +`n` should be a nonnegative integer. Each element of `p` should be in the +interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to +1, the last element of the `p` array is not used and is replaced with the +remaining probability left over from the earlier elements. +""" + +_multinomial_doc_frozen_callparams = "" + +_multinomial_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +multinomial_docdict_params = { + '_doc_default_callparams': _multinomial_doc_default_callparams, + '_doc_callparams_note': _multinomial_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +multinomial_docdict_noparams = { + '_doc_default_callparams': _multinomial_doc_frozen_callparams, + '_doc_callparams_note': _multinomial_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multinomial_gen(multi_rv_generic): + r"""A multinomial random variable. + + Methods + ------- + pmf(x, n, p) + Probability mass function. + logpmf(x, n, p) + Log of the probability mass function. + rvs(n, p, size=1, random_state=None) + Draw random samples from a multinomial distribution. + entropy(n, p) + Compute the entropy of the multinomial distribution. + cov(n, p) + Compute the covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + The probability mass function for `multinomial` is + + .. math:: + + f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k}, + + supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a + nonnegative integer and their sum is :math:`n`. + + .. versionadded:: 0.19.0 + + Examples + -------- + + >>> from scipy.stats import multinomial + >>> rv = multinomial(8, [0.3, 0.2, 0.5]) + >>> rv.pmf([1, 3, 4]) + 0.042000000000000072 + + The multinomial distribution for :math:`k=2` is identical to the + corresponding binomial distribution (tiny numerical differences + notwithstanding): + + >>> from scipy.stats import binom + >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6]) + 0.29030399999999973 + >>> binom.pmf(3, 7, 0.4) + 0.29030400000000012 + + The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support + broadcasting, under the convention that the vector parameters (``x`` and + ``p``) are interpreted as if each row along the last axis is a single + object. For instance: + + >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7]) + array([0.2268945, 0.25412184]) + + Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``, + but following the rules mentioned above they behave as if the rows + ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single + object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and + ``p.shape = ()``. To obtain the individual elements without broadcasting, + we would do this: + + >>> multinomial.pmf([3, 4], n=7, p=[.3, .7]) + 0.2268945 + >>> multinomial.pmf([3, 5], 8, p=[.3, .7]) + 0.25412184 + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``p.shape[-1]``. For example: + + >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) + array([[[ 0.84, -0.84], + [-0.84, 0.84]], + [[ 1.2 , -1.2 ], + [-1.2 , 1.2 ]]]) + + In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and + following the rules above, these broadcast as if ``p.shape == (2,)``. + Thus the result should also be of shape ``(2,)``, but since each output is + a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``, + where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and + ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``. + + Alternatively, the object may be called (as a function) to fix the `n` and + `p` parameters, returning a "frozen" multinomial random variable: + + >>> rv = multinomial(n=7, p=[.3, .7]) + >>> # Frozen object with the same methods but holding the given + >>> # degrees of freedom and scale fixed. + + See also + -------- + scipy.stats.binom : The binomial distribution. + numpy.random.Generator.multinomial : Sampling from the multinomial distribution. + scipy.stats.multivariate_hypergeom : + The multivariate hypergeometric distribution. + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = \ + doccer.docformat(self.__doc__, multinomial_docdict_params) + + def __call__(self, n, p, seed=None): + """Create a frozen multinomial distribution. + + See `multinomial_frozen` for more information. + """ + return multinomial_frozen(n, p, seed) + + def _process_parameters(self, n, p, eps=1e-15): + """Returns: n_, p_, npcond. + + n_ and p_ are arrays of the correct shape; npcond is a boolean array + flagging values out of the domain. + """ + p = np.array(p, dtype=np.float64, copy=True) + p_adjusted = 1. - p[..., :-1].sum(axis=-1) + i_adjusted = np.abs(p_adjusted) > eps + p[i_adjusted, -1] = p_adjusted[i_adjusted] + + # true for bad p + pcond = np.any(p < 0, axis=-1) + pcond |= np.any(p > 1, axis=-1) + + n = np.array(n, dtype=int, copy=True) + + # true for bad n + ncond = n < 0 + + return n, p, ncond | pcond + + def _process_quantiles(self, x, n, p): + """Returns: x_, xcond. + + x_ is an int array; xcond is a boolean array flagging values out of the + domain. + """ + xx = np.asarray(x, dtype=int) + + if xx.ndim == 0: + raise ValueError("x must be an array.") + + if xx.size != 0 and not xx.shape[-1] == p.shape[-1]: + raise ValueError("Size of each quantile should be size of p: " + "received %d, but expected %d." % + (xx.shape[-1], p.shape[-1])) + + # true for x out of the domain + cond = np.any(xx != x, axis=-1) + cond |= np.any(xx < 0, axis=-1) + cond = cond | (np.sum(xx, axis=-1) != n) + + return xx, cond + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + if result.ndim == 0: + return bad_value + result[...] = bad_value + return result + + def _logpmf(self, x, n, p): + return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1) + + def logpmf(self, x, n, p): + """Log of the Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + x, xcond = self._process_quantiles(x, n, p) + + result = self._logpmf(x, n, p) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, -np.inf) + + # replace values bad for n or p; broadcast npcond to the right shape + npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) + return self._checkresult(result, npcond_, np.nan) + + def pmf(self, x, n, p): + """Multinomial probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + return np.exp(self.logpmf(x, n, p)) + + def mean(self, n, p): + """Mean of the Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : float + The mean of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + result = n[..., np.newaxis]*p + return self._checkresult(result, npcond, np.nan) + + def cov(self, n, p): + """Covariance matrix of the multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : ndarray + The covariance matrix of the distribution + """ + n, p, npcond = self._process_parameters(n, p) + + nn = n[..., np.newaxis, np.newaxis] + result = nn * np.einsum('...j,...k->...jk', -p, p) + + # change the diagonal + for i in range(p.shape[-1]): + result[..., i, i] += n*p[..., i] + + return self._checkresult(result, npcond, np.nan) + + def entropy(self, n, p): + r"""Compute the entropy of the multinomial distribution. + + The entropy is computed using this expression: + + .. math:: + + f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i + + \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x! + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + h : scalar + Entropy of the Multinomial distribution + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + + x = np.r_[1:np.max(n)+1] + + term1 = n*np.sum(entr(p), axis=-1) + term1 -= gammaln(n+1) + + n = n[..., np.newaxis] + new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 + x.shape += (1,)*new_axes_needed + + term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1), + axis=(-1, -1-new_axes_needed)) + + return self._checkresult(term1 + term2, npcond, np.nan) + + def rvs(self, n, p, size=None, random_state=None): + """Draw random samples from a Multinomial distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of shape (`size`, `len(p)`) + + Notes + ----- + %(_doc_callparams_note)s + """ + n, p, npcond = self._process_parameters(n, p) + random_state = self._get_random_state(random_state) + return random_state.multinomial(n, p, size) + + +multinomial = multinomial_gen() + + +class multinomial_frozen(multi_rv_frozen): + r"""Create a frozen Multinomial distribution. + + Parameters + ---------- + n : int + number of trials + p: array_like + probability of a trial falling into each category; should sum to 1 + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + """ + def __init__(self, n, p, seed=None): + self._dist = multinomial_gen(seed) + self.n, self.p, self.npcond = self._dist._process_parameters(n, p) + + # monkey patch self._dist + def _process_parameters(n, p): + return self.n, self.p, self.npcond + + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.n, self.p) + + def pmf(self, x): + return self._dist.pmf(x, self.n, self.p) + + def mean(self): + return self._dist.mean(self.n, self.p) + + def cov(self): + return self._dist.cov(self.n, self.p) + + def entropy(self): + return self._dist.entropy(self.n, self.p) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.n, self.p, size, random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multinomial and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']: + method = multinomial_gen.__dict__[name] + method_frozen = multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, multinomial_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + multinomial_docdict_params) + + +class special_ortho_group_gen(multi_rv_generic): + r"""A Special Orthogonal matrix (SO(N)) random variable. + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(N)) with a determinant of +1. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from SO(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is wrapping the random_rot code from the MDP Toolkit, + https://github.com/mdp-toolkit/mdp-toolkit + + Return a random rotation matrix, drawn from the Haar distribution + (the only uniform distribution on SO(N)). + The algorithm is described in the paper + Stewart, G.W., "The efficient generation of random orthogonal + matrices with an application to condition estimators", SIAM Journal + on Numerical Analysis, 17(3), pp. 403-409, 1980. + For more information see + https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization + + See also the similar `ortho_group`. For a random rotation in three + dimensions, see `scipy.spatial.transform.Rotation.random`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import special_ortho_group + >>> x = special_ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> scipy.linalg.det(x) + 1.0 + + This generates one random matrix from SO(3). It is orthogonal and + has a determinant of 1. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, returning a "frozen" special_ortho_group random variable: + + >>> rv = special_ortho_group(5) + >>> # Frozen object with the same methods but holding the + >>> # dimension parameter fixed. + + See Also + -------- + ortho_group, scipy.spatial.transform.Rotation.random + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen SO(N) distribution. + + See `special_ortho_group_frozen` for more information. + """ + return special_ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("""Dimension of rotation must be specified, + and must be a scalar greater than 1.""") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from SO(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + size = (size,) if size > 1 else () + + dim = self._process_parameters(dim) + + # H represents a (dim, dim) matrix, while D represents the diagonal of + # a (dim, dim) diagonal matrix. The algorithm that follows is + # broadcasted on the leading shape in `size` to vectorize along + # samples. + H = np.empty(size + (dim, dim)) + H[..., :, :] = np.eye(dim) + D = np.empty(size + (dim,)) + + for n in range(dim-1): + + # x is a vector with length dim-n, xrow and xcol are views of it as + # a row vector and column vector respectively. It's important they + # are views and not copies because we are going to modify x + # in-place. + x = random_state.normal(size=size + (dim-n,)) + xrow = x[..., None, :] + xcol = x[..., :, None] + + # This is the squared norm of x, without vectorization it would be + # dot(x, x), to have proper broadcasting we use matmul and squeeze + # out (convert to scalar) the resulting 1x1 matrix + norm2 = np.matmul(xrow, xcol).squeeze((-2, -1)) + + x0 = x[..., 0].copy() + D[..., n] = np.where(x0 != 0, np.sign(x0), 1) + x[..., 0] += D[..., n]*np.sqrt(norm2) + + # In renormalizing x we have to append an additional axis with + # [..., None] to broadcast the scalar against the vector x + x /= np.sqrt((norm2 - x0**2 + x[..., 0]**2) / 2.)[..., None] + + # Householder transformation, without vectorization the RHS can be + # written as outer(H @ x, x) (apart from the slicing) + H[..., :, n:] -= np.matmul(H[..., :, n:], xcol) * xrow + + D[..., -1] = (-1)**(dim-1)*D[..., :-1].prod(axis=-1) + + # Without vectorization this could be written as H = diag(D) @ H, + # left-multiplication by a diagonal matrix amounts to multiplying each + # row of H by an element of the diagonal, so we add a dummy axis for + # the column index + H *= D[..., :, None] + return H + + +special_ortho_group = special_ortho_group_gen() + + +class special_ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen SO(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import special_ortho_group + >>> g = special_ortho_group(5) + >>> x = g.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = special_ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class ortho_group_gen(multi_rv_generic): + r"""An Orthogonal matrix (O(N)) random variable. + + Return a random orthogonal matrix, drawn from the O(N) Haar + distribution (the only uniform distribution on O(N)). + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from O(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is closely related to `special_ortho_group`. + + Some care is taken to avoid numerical error, as per the paper by Mezzadri. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import ortho_group + >>> x = ortho_group.rvs(3) + + >>> np.dot(x, x.T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + >>> import scipy.linalg + >>> np.fabs(scipy.linalg.det(x)) + 1.0 + + This generates one random matrix from O(3). It is orthogonal and + has a determinant of +1 or -1. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, returning a "frozen" ortho_group random variable: + + >>> rv = ortho_group(5) + >>> # Frozen object with the same methods but holding the + >>> # dimension parameter fixed. + + See Also + -------- + special_ortho_group + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen O(N) distribution. + + See `ortho_group_frozen` for more information. + """ + return ortho_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from O(N). + + Parameters + ---------- + dim : integer + Dimension of rotation space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + + dim = self._process_parameters(dim) + + size = (size,) if size > 1 else () + z = random_state.normal(size=size + (dim, dim)) + q, r = np.linalg.qr(z) + # The last two dimensions are the rows and columns of R matrices. + # Extract the diagonals. Note that this eliminates a dimension. + d = r.diagonal(offset=0, axis1=-2, axis2=-1) + # Add back a dimension for proper broadcasting: we're dividing + # each row of each R matrix by the diagonal of the R matrix. + q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly + return q + + +ortho_group = ortho_group_gen() + + +class ortho_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen O(N) distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import ortho_group + >>> g = ortho_group(5) + >>> x = g.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = ortho_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +class random_correlation_gen(multi_rv_generic): + r"""A random correlation matrix. + + Return a random correlation matrix, given a vector of eigenvalues. + + The `eigs` keyword specifies the eigenvalues of the correlation matrix, + and implies the dimension. + + Methods + ------- + rvs(eigs=None, random_state=None) + Draw random correlation matrices, all with eigenvalues eigs. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + Notes + ----- + + Generates a random correlation matrix following a numerically stable + algorithm spelled out by Davies & Higham. This algorithm uses a single O(N) + similarity transformation to construct a symmetric positive semi-definite + matrix, and applies a series of Givens rotations to scale it to have ones + on the diagonal. + + References + ---------- + + .. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation + of correlation matrices and their factors", BIT 2000, Vol. 40, + No. 4, pp. 640 651 + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import random_correlation + >>> rng = np.random.default_rng() + >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng) + >>> x + array([[ 1. , -0.02423399, 0.03130519, 0.4946965 ], + [-0.02423399, 1. , 0.20334736, 0.04039817], + [ 0.03130519, 0.20334736, 1. , 0.02694275], + [ 0.4946965 , 0.04039817, 0.02694275, 1. ]]) + >>> import scipy.linalg + >>> e, v = scipy.linalg.eigh(x) + >>> e + array([ 0.5, 0.8, 1.2, 1.5]) + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7): + """Create a frozen random correlation matrix. + + See `random_correlation_frozen` for more information. + """ + return random_correlation_frozen(eigs, seed=seed, tol=tol, + diag_tol=diag_tol) + + def _process_parameters(self, eigs, tol): + eigs = np.asarray(eigs, dtype=float) + dim = eigs.size + + if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1: + raise ValueError("Array 'eigs' must be a vector of length " + "greater than 1.") + + if np.fabs(np.sum(eigs) - dim) > tol: + raise ValueError("Sum of eigenvalues must equal dimensionality.") + + for x in eigs: + if x < -tol: + raise ValueError("All eigenvalues must be non-negative.") + + return dim, eigs + + def _givens_to_1(self, aii, ajj, aij): + """Computes a 2x2 Givens matrix to put 1's on the diagonal. + + The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ]. + + The output matrix g is a 2x2 anti-symmetric matrix of the form + [ c s ; -s c ]; the elements c and s are returned. + + Applying the output matrix to the input matrix (as b=g.T M g) + results in a matrix with bii=1, provided tr(M) - det(M) >= 1 + and floating point issues do not occur. Otherwise, some other + valid rotation is returned. When tr(M)==2, also bjj=1. + + """ + aiid = aii - 1. + ajjd = ajj - 1. + + if ajjd == 0: + # ajj==1, so swap aii and ajj to avoid division by zero + return 0., 1. + + dd = math.sqrt(max(aij**2 - aiid*ajjd, 0)) + + # The choice of t should be chosen to avoid cancellation [1] + t = (aij + math.copysign(dd, aij)) / ajjd + c = 1. / math.sqrt(1. + t*t) + if c == 0: + # Underflow + s = 1.0 + else: + s = c*t + return c, s + + def _to_corr(self, m): + """ + Given a psd matrix m, rotate to put one's on the diagonal, turning it + into a correlation matrix. This also requires the trace equal the + dimensionality. Note: modifies input matrix + """ + # Check requirements for in-place Givens + if not (m.flags.c_contiguous and m.dtype == np.float64 and + m.shape[0] == m.shape[1]): + raise ValueError() + + d = m.shape[0] + for i in range(d-1): + if m[i, i] == 1: + continue + elif m[i, i] > 1: + for j in range(i+1, d): + if m[j, j] < 1: + break + else: + for j in range(i+1, d): + if m[j, j] > 1: + break + + c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j]) + + # Use BLAS to apply Givens rotations in-place. Equivalent to: + # g = np.eye(d) + # g[i, i] = g[j,j] = c + # g[j, i] = -s; g[i, j] = s + # m = np.dot(g.T, np.dot(m, g)) + mv = m.ravel() + drot(mv, mv, c, -s, n=d, + offx=i*d, incx=1, offy=j*d, incy=1, + overwrite_x=True, overwrite_y=True) + drot(mv, mv, c, -s, n=d, + offx=i, incx=d, offy=j, incy=d, + overwrite_x=True, overwrite_y=True) + + return m + + def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7): + """Draw random correlation matrices. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + + """ + dim, eigs = self._process_parameters(eigs, tol=tol) + + random_state = self._get_random_state(random_state) + + m = ortho_group.rvs(dim, random_state=random_state) + m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m + m = self._to_corr(m) # Carefully rotate to unit diagonal + + # Check diagonal + if abs(m.diagonal() - 1).max() > diag_tol: + raise RuntimeError("Failed to generate a valid correlation matrix") + + return m + + +random_correlation = random_correlation_gen() + + +class random_correlation_frozen(multi_rv_frozen): + def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7): + """Create a frozen random correlation matrix distribution. + + Parameters + ---------- + eigs : 1d ndarray + Eigenvalues of correlation matrix + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + tol : float, optional + Tolerance for input parameter checks + diag_tol : float, optional + Tolerance for deviation of the diagonal of the resulting + matrix. Default: 1e-7 + + Raises + ------ + RuntimeError + Floating point error prevented generating a valid correlation + matrix. + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim), + each having eigenvalues eigs. + """ # numpy/numpydoc#87 # noqa: E501 + + self._dist = random_correlation_gen(seed) + self.tol = tol + self.diag_tol = diag_tol + _, self.eigs = self._dist._process_parameters(eigs, tol=self.tol) + + def rvs(self, random_state=None): + return self._dist.rvs(self.eigs, random_state=random_state, + tol=self.tol, diag_tol=self.diag_tol) + + +class unitary_group_gen(multi_rv_generic): + r"""A matrix-valued U(N) random variable. + + Return a random unitary matrix. + + The `dim` keyword specifies the dimension N. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random samples from U(N). + + Parameters + ---------- + dim : scalar + Dimension of matrices, must be greater than 1. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This class is similar to `ortho_group`. + + References + ---------- + .. [1] F. Mezzadri, "How to generate random matrices from the classical + compact groups", :arXiv:`math-ph/0609050v2`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import unitary_group + >>> x = unitary_group.rvs(3) + + >>> np.dot(x, x.conj().T) + array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], + [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], + [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) + + This generates one random matrix from U(3). The dot product confirms that + it is unitary up to machine precision. + + Alternatively, the object may be called (as a function) to fix the `dim` + parameter, return a "frozen" unitary_group random variable: + + >>> rv = unitary_group(5) + + See Also + -------- + ortho_group + + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen (U(N)) n-dimensional unitary matrix distribution. + + See `unitary_group_frozen` for more information. + """ + return unitary_group_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): + raise ValueError("Dimension of rotation must be specified," + "and must be a scalar greater than 1.") + + return dim + + def rvs(self, dim, size=1, random_state=None): + """Draw random samples from U(N). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : integer, optional + Number of samples to draw (default 1). + + Returns + ------- + rvs : ndarray or scalar + Random size N-dimensional matrices, dimension (size, dim, dim) + + """ + random_state = self._get_random_state(random_state) + + size = int(size) + + dim = self._process_parameters(dim) + + size = (size,) if size > 1 else () + z = 1/math.sqrt(2)*(random_state.normal(size=size + (dim, dim)) + + 1j*random_state.normal(size=size + (dim, dim))) + q, r = np.linalg.qr(z) + # The last two dimensions are the rows and columns of R matrices. + # Extract the diagonals. Note that this eliminates a dimension. + d = r.diagonal(offset=0, axis1=-2, axis2=-1) + # Add back a dimension for proper broadcasting: we're dividing + # each row of each R matrix by the diagonal of the R matrix. + q *= (d/abs(d))[..., np.newaxis, :] # to broadcast properly + return q + + +unitary_group = unitary_group_gen() + + +class unitary_group_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen (U(N)) n-dimensional unitary matrix distribution. + + Parameters + ---------- + dim : scalar + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import unitary_group + >>> x = unitary_group(3) + >>> x.rvs() + + """ # numpy/numpydoc#87 # noqa: E501 + self._dist = unitary_group_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +_mvt_doc_default_callparams = """\ +loc : array_like, optional + Location of the distribution. (default ``0``) +shape : array_like, optional + Positive semidefinite matrix of the distribution. (default ``1``) +df : float, optional + Degrees of freedom of the distribution; must be greater than zero. + If ``np.inf`` then results are multivariate normal. The default is ``1``. +allow_singular : bool, optional + Whether to allow a singular matrix. (default ``False``) +""" + +_mvt_doc_callparams_note = """\ +Setting the parameter `loc` to ``None`` is equivalent to having `loc` +be the zero-vector. The parameter `shape` can be a scalar, in which case +the shape matrix is the identity times that value, a vector of +diagonal entries for the shape matrix, or a two-dimensional array_like. +""" + +_mvt_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mvt_docdict_params = { + '_mvt_doc_default_callparams': _mvt_doc_default_callparams, + '_mvt_doc_callparams_note': _mvt_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mvt_docdict_noparams = { + '_mvt_doc_default_callparams': "", + '_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_t_gen(multi_rv_generic): + r"""A multivariate t-distributed random variable. + + The `loc` parameter specifies the location. The `shape` parameter specifies + the positive semidefinite shape matrix. The `df` parameter specifies the + degrees of freedom. + + In addition to calling the methods below, the object itself may be called + as a function to fix the location, shape matrix, and degrees of freedom + parameters, returning a "frozen" multivariate t-distribution random. + + Methods + ------- + pdf(x, loc=None, shape=1, df=1, allow_singular=False) + Probability density function. + logpdf(x, loc=None, shape=1, df=1, allow_singular=False) + Log of the probability density function. + cdf(x, loc=None, shape=1, df=1, allow_singular=False, *, + maxpts=None, lower_limit=None, random_state=None) + Cumulative distribution function. + rvs(loc=None, shape=1, df=1, size=1, random_state=None) + Draw random samples from a multivariate t-distribution. + entropy(loc=None, shape=1, df=1) + Differential entropy of a multivariate t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_mvt_doc_callparams_note)s + The matrix `shape` must be a (symmetric) positive semidefinite matrix. The + determinant and inverse of `shape` are computed as the pseudo-determinant + and pseudo-inverse, respectively, so that `shape` does not need to have + full rank. + + The probability density function for `multivariate_t` is + + .. math:: + + f(x) = \frac{\Gamma((\nu + p)/2)}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}} + \left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top} + \boldsymbol{\Sigma}^{-1} + (\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2}, + + where :math:`p` is the dimension of :math:`\mathbf{x}`, + :math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location, + :math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape + matrix, and :math:`\nu` is the degrees of freedom. + + .. versionadded:: 1.6.0 + + References + ---------- + .. [1] Arellano-Valle et al. "Shannon Entropy and Mutual Information for + Multivariate Skew-Elliptical Distributions". Scandinavian Journal + of Statistics. Vol. 40, issue 1. + + Examples + -------- + The object may be called (as a function) to fix the `loc`, `shape`, + `df`, and `allow_singular` parameters, returning a "frozen" + multivariate_t random variable: + + >>> import numpy as np + >>> from scipy.stats import multivariate_t + >>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2) + >>> # Frozen object with the same methods but holding the given location, + >>> # scale, and degrees of freedom fixed. + + Create a contour plot of the PDF. + + >>> import matplotlib.pyplot as plt + >>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01] + >>> pos = np.dstack((x, y)) + >>> fig, ax = plt.subplots(1, 1) + >>> ax.set_aspect('equal') + >>> plt.contourf(x, y, rv.pdf(pos)) + + """ + + def __init__(self, seed=None): + """Initialize a multivariate t-distributed random variable. + + Parameters + ---------- + seed : Random state. + + """ + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params) + self._random_state = check_random_state(seed) + + def __call__(self, loc=None, shape=1, df=1, allow_singular=False, + seed=None): + """Create a frozen multivariate t-distribution. + + See `multivariate_t_frozen` for parameters. + """ + if df == np.inf: + return multivariate_normal_frozen(mean=loc, cov=shape, + allow_singular=allow_singular, + seed=seed) + return multivariate_t_frozen(loc=loc, shape=shape, df=df, + allow_singular=allow_singular, seed=seed) + + def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False): + """Multivariate t-distribution probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the probability density function. + %(_mvt_doc_default_callparams)s + + Returns + ------- + pdf : Probability density function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.pdf(x, loc, shape, df) + 0.00075713 + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + x = self._process_quantiles(x, dim) + shape_info = _PSD(shape, allow_singular=allow_singular) + logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, + dim, shape_info.rank) + return np.exp(logpdf) + + def logpdf(self, x, loc=None, shape=1, df=1): + """Log of the multivariate t-distribution probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability density + function. + %(_mvt_doc_default_callparams)s + + Returns + ------- + logpdf : Log of the probability density function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.logpdf(x, loc, shape, df) + -7.1859802 + + See Also + -------- + pdf : Probability density function. + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + x = self._process_quantiles(x, dim) + shape_info = _PSD(shape) + return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim, + shape_info.rank) + + def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank): + """Utility method `pdf`, `logpdf` for parameters. + + Parameters + ---------- + x : ndarray + Points at which to evaluate the log of the probability density + function. + loc : ndarray + Location of the distribution. + prec_U : ndarray + A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse + of the shape matrix. + log_pdet : float + Logarithm of the determinant of the shape matrix. + df : float + Degrees of freedom of the distribution. + dim : int + Dimension of the quantiles x. + rank : int + Rank of the shape matrix. + + Notes + ----- + As this function does no argument checking, it should not be called + directly; use 'logpdf' instead. + + """ + if df == np.inf: + return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank) + + dev = x - loc + maha = np.square(np.dot(dev, prec_U)).sum(axis=-1) + + t = 0.5 * (df + dim) + A = gammaln(t) + B = gammaln(0.5 * df) + C = dim/2. * np.log(df * np.pi) + D = 0.5 * log_pdet + E = -t * np.log(1 + (1./df) * maha) + + return _squeeze_output(A - B - C - D + E) + + def _cdf(self, x, loc, shape, df, dim, maxpts=None, lower_limit=None, + random_state=None): + + # All of this - random state validation, maxpts, apply_along_axis, + # etc. needs to go in this private method unless we want + # frozen distribution's `cdf` method to duplicate it or call `cdf`, + # which would require re-processing parameters + if random_state is not None: + rng = check_random_state(random_state) + else: + rng = self._random_state + + if not maxpts: + maxpts = 1000 * dim + + x = self._process_quantiles(x, dim) + lower_limit = (np.full(loc.shape, -np.inf) + if lower_limit is None else lower_limit) + + # remove the mean + x, lower_limit = x - loc, lower_limit - loc + + b, a = np.broadcast_arrays(x, lower_limit) + i_swap = b < a + signs = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a, b = a.copy(), b.copy() + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + n = x.shape[-1] + limits = np.concatenate((a, b), axis=-1) + + def func1d(limits): + a, b = limits[:n], limits[n:] + return _qmvt(maxpts, df, shape, a, b, rng)[0] + + res = np.apply_along_axis(func1d, -1, limits) * signs + # Fixing the output shape for existing distributions is a separate + # issue. For now, let's keep this consistent with pdf. + return _squeeze_output(res) + + def cdf(self, x, loc=None, shape=1, df=1, allow_singular=False, *, + maxpts=None, lower_limit=None, random_state=None): + """Multivariate t-distribution cumulative distribution function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the cumulative distribution function. + %(_mvt_doc_default_callparams)s + maxpts : int, optional + Maximum number of points to use for integration. The default is + 1000 times the number of dimensions. + lower_limit : array_like, optional + Lower limit of integration of the cumulative distribution function. + Default is negative infinity. Must be broadcastable with `x`. + %(_doc_random_state)s + + Returns + ------- + cdf : ndarray or scalar + Cumulative distribution function evaluated at `x`. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.cdf(x, loc, shape, df) + 0.64798491 + + """ + dim, loc, shape, df = self._process_parameters(loc, shape, df) + shape = _PSD(shape, allow_singular=allow_singular)._M + + return self._cdf(x, loc, shape, df, dim, maxpts, + lower_limit, random_state) + + def _entropy(self, dim, df=1, shape=1): + if df == np.inf: + return multivariate_normal(None, cov=shape).entropy() + + shape_info = _PSD(shape) + shape_term = 0.5 * shape_info.log_pdet + + def regular(dim, df): + halfsum = 0.5 * (dim + df) + half_df = 0.5 * df + return ( + -gammaln(halfsum) + gammaln(half_df) + + 0.5 * dim * np.log(df * np.pi) + halfsum + * (psi(halfsum) - psi(half_df)) + + shape_term + ) + + def asymptotic(dim, df): + # Formula from Wolfram Alpha: + # "asymptotic expansion -gammaln((m+d)/2) + gammaln(d/2) + (m*log(d*pi))/2 + # + ((m+d)/2) * (digamma((m+d)/2) - digamma(d/2))" + return ( + dim * norm._entropy() + dim / df + - dim * (dim - 2) * df**-2.0 / 4 + + dim**2 * (dim - 2) * df**-3.0 / 6 + + dim * (-3 * dim**3 + 8 * dim**2 - 8) * df**-4.0 / 24 + + dim**2 * (3 * dim**3 - 10 * dim**2 + 16) * df**-5.0 / 30 + + shape_term + )[()] + + # preserves ~12 digits accuracy up to at least `dim=1e5`. See gh-18465. + threshold = dim * 100 * 4 / (np.log(dim) + 1) + return _lazywhere(df >= threshold, (dim, df), f=asymptotic, f2=regular) + + def entropy(self, loc=None, shape=1, df=1): + """Calculate the differential entropy of a multivariate + t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + + Returns + ------- + h : float + Differential entropy + + """ + dim, loc, shape, df = self._process_parameters(None, shape, df) + return self._entropy(dim, df, shape) + + def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None): + """Draw random samples from a multivariate t-distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + size : integer, optional + Number of samples to draw (default 1). + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `P`), where `P` is the + dimension of the random variable. + + Examples + -------- + >>> from scipy.stats import multivariate_t + >>> x = [0.4, 5] + >>> loc = [0, 1] + >>> shape = [[1, 0.1], [0.1, 1]] + >>> df = 7 + >>> multivariate_t.rvs(loc, shape, df) + array([[0.93477495, 3.00408716]]) + + """ + # For implementation details, see equation (3): + # + # Hofert, "On Sampling from the Multivariatet Distribution", 2013 + # http://rjournal.github.io/archive/2013-2/hofert.pdf + # + dim, loc, shape, df = self._process_parameters(loc, shape, df) + if random_state is not None: + rng = check_random_state(random_state) + else: + rng = self._random_state + + if np.isinf(df): + x = np.ones(size) + else: + x = rng.chisquare(df, size=size) / df + + z = rng.multivariate_normal(np.zeros(dim), shape, size=size) + samples = loc + z / np.sqrt(x)[..., None] + return _squeeze_output(samples) + + def _process_quantiles(self, x, dim): + """ + Adjust quantiles array so that last axis labels the components of + each data point. + """ + x = np.asarray(x, dtype=float) + if x.ndim == 0: + x = x[np.newaxis] + elif x.ndim == 1: + if dim == 1: + x = x[:, np.newaxis] + else: + x = x[np.newaxis, :] + return x + + def _process_parameters(self, loc, shape, df): + """ + Infer dimensionality from location array and shape matrix, handle + defaults, and ensure compatible dimensions. + """ + if loc is None and shape is None: + loc = np.asarray(0, dtype=float) + shape = np.asarray(1, dtype=float) + dim = 1 + elif loc is None: + shape = np.asarray(shape, dtype=float) + if shape.ndim < 2: + dim = 1 + else: + dim = shape.shape[0] + loc = np.zeros(dim) + elif shape is None: + loc = np.asarray(loc, dtype=float) + dim = loc.size + shape = np.eye(dim) + else: + shape = np.asarray(shape, dtype=float) + loc = np.asarray(loc, dtype=float) + dim = loc.size + + if dim == 1: + loc = loc.reshape(1) + shape = shape.reshape(1, 1) + + if loc.ndim != 1 or loc.shape[0] != dim: + raise ValueError("Array 'loc' must be a vector of length %d." % + dim) + if shape.ndim == 0: + shape = shape * np.eye(dim) + elif shape.ndim == 1: + shape = np.diag(shape) + elif shape.ndim == 2 and shape.shape != (dim, dim): + rows, cols = shape.shape + if rows != cols: + msg = ("Array 'cov' must be square if it is two dimensional," + " but cov.shape = %s." % str(shape.shape)) + else: + msg = ("Dimension mismatch: array 'cov' is of shape %s," + " but 'loc' is a vector of length %d.") + msg = msg % (str(shape.shape), len(loc)) + raise ValueError(msg) + elif shape.ndim > 2: + raise ValueError("Array 'cov' must be at most two-dimensional," + " but cov.ndim = %d" % shape.ndim) + + # Process degrees of freedom. + if df is None: + df = 1 + elif df <= 0: + raise ValueError("'df' must be greater than zero.") + elif np.isnan(df): + raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.") + + return dim, loc, shape, df + + +class multivariate_t_frozen(multi_rv_frozen): + + def __init__(self, loc=None, shape=1, df=1, allow_singular=False, + seed=None): + """Create a frozen multivariate t distribution. + + Parameters + ---------- + %(_mvt_doc_default_callparams)s + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import multivariate_t + >>> loc = np.zeros(3) + >>> shape = np.eye(3) + >>> df = 10 + >>> dist = multivariate_t(loc, shape, df) + >>> dist.rvs() + array([[ 0.81412036, -1.53612361, 0.42199647]]) + >>> dist.pdf([1, 1, 1]) + array([0.01237803]) + + """ + self._dist = multivariate_t_gen(seed) + dim, loc, shape, df = self._dist._process_parameters(loc, shape, df) + self.dim, self.loc, self.shape, self.df = dim, loc, shape, df + self.shape_info = _PSD(shape, allow_singular=allow_singular) + + def logpdf(self, x): + x = self._dist._process_quantiles(x, self.dim) + U = self.shape_info.U + log_pdet = self.shape_info.log_pdet + return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim, + self.shape_info.rank) + + def cdf(self, x, *, maxpts=None, lower_limit=None, random_state=None): + x = self._dist._process_quantiles(x, self.dim) + return self._dist._cdf(x, self.loc, self.shape, self.df, self.dim, + maxpts, lower_limit, random_state) + + def pdf(self, x): + return np.exp(self.logpdf(x)) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(loc=self.loc, + shape=self.shape, + df=self.df, + size=size, + random_state=random_state) + + def entropy(self): + return self._dist._entropy(self.dim, self.df, self.shape) + + +multivariate_t = multivariate_t_gen() + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_t_gen and fill in default strings in class docstrings +for name in ['logpdf', 'pdf', 'rvs', 'cdf', 'entropy']: + method = multivariate_t_gen.__dict__[name] + method_frozen = multivariate_t_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat(method.__doc__, + mvt_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params) + + +_mhg_doc_default_callparams = """\ +m : array_like + The number of each type of object in the population. + That is, :math:`m[i]` is the number of objects of + type :math:`i`. +n : array_like + The number of samples taken from the population. +""" + +_mhg_doc_callparams_note = """\ +`m` must be an array of positive integers. If the quantile +:math:`i` contains values out of the range :math:`[0, m_i]` +where :math:`m_i` is the number of objects of type :math:`i` +in the population or if the parameters are inconsistent with one +another (e.g. ``x.sum() != n``), methods return the appropriate +value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative +values, the result will contain ``nan`` there. +""" + +_mhg_doc_frozen_callparams = "" + +_mhg_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +mhg_docdict_params = { + '_doc_default_callparams': _mhg_doc_default_callparams, + '_doc_callparams_note': _mhg_doc_callparams_note, + '_doc_random_state': _doc_random_state +} + +mhg_docdict_noparams = { + '_doc_default_callparams': _mhg_doc_frozen_callparams, + '_doc_callparams_note': _mhg_doc_frozen_callparams_note, + '_doc_random_state': _doc_random_state +} + + +class multivariate_hypergeom_gen(multi_rv_generic): + r"""A multivariate hypergeometric random variable. + + Methods + ------- + pmf(x, m, n) + Probability mass function. + logpmf(x, m, n) + Log of the probability mass function. + rvs(m, n, size=1, random_state=None) + Draw random samples from a multivariate hypergeometric + distribution. + mean(m, n) + Mean of the multivariate hypergeometric distribution. + var(m, n) + Variance of the multivariate hypergeometric distribution. + cov(m, n) + Compute the covariance matrix of the multivariate + hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_callparams_note)s + + The probability mass function for `multivariate_hypergeom` is + + .. math:: + + P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1} + \binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad + (x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with } + \sum_{i=1}^k x_i = n + + where :math:`m_i` are the number of objects of type :math:`i`, :math:`M` + is the total number of objects in the population (sum of all the + :math:`m_i`), and :math:`n` is the size of the sample to be taken + from the population. + + .. versionadded:: 1.6.0 + + Examples + -------- + To evaluate the probability mass function of the multivariate + hypergeometric distribution, with a dichotomous population of size + :math:`10` and :math:`20`, at a sample of size :math:`12` with + :math:`8` objects of the first type and :math:`4` objects of the + second type, use: + + >>> from scipy.stats import multivariate_hypergeom + >>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12) + 0.0025207176631464523 + + The `multivariate_hypergeom` distribution is identical to the + corresponding `hypergeom` distribution (tiny numerical differences + notwithstanding) when only two types (good and bad) of objects + are present in the population as in the example above. Consider + another example for a comparison with the hypergeometric distribution: + + >>> from scipy.stats import hypergeom + >>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4) + 0.4395604395604395 + >>> hypergeom.pmf(k=3, M=15, n=4, N=10) + 0.43956043956044005 + + The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs`` + support broadcasting, under the convention that the vector parameters + (``x``, ``m``, and ``n``) are interpreted as if each row along the last + axis is a single object. For instance, we can combine the previous two + calls to `multivariate_hypergeom` as + + >>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]], + ... n=[12, 4]) + array([0.00252072, 0.43956044]) + + This broadcasting also works for ``cov``, where the output objects are + square matrices of size ``m.shape[-1]``. For example: + + >>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12]) + array([[[ 1.05, -1.05], + [-1.05, 1.05]], + [[ 1.56, -1.56], + [-1.56, 1.56]]]) + + That is, ``result[0]`` is equal to + ``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal + to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``. + + Alternatively, the object may be called (as a function) to fix the `m` + and `n` parameters, returning a "frozen" multivariate hypergeometric + random variable. + + >>> rv = multivariate_hypergeom(m=[10, 20], n=12) + >>> rv.pmf(x=[8, 4]) + 0.0025207176631464523 + + See Also + -------- + scipy.stats.hypergeom : The hypergeometric distribution. + scipy.stats.multinomial : The multinomial distribution. + + References + ---------- + .. [1] The Multivariate Hypergeometric Distribution, + http://www.randomservices.org/random/urn/MultiHypergeometric.html + .. [2] Thomas J. Sargent and John Stachurski, 2020, + Multivariate Hypergeometric Distribution + https://python.quantecon.org/multi_hyper.html + """ + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params) + + def __call__(self, m, n, seed=None): + """Create a frozen multivariate_hypergeom distribution. + + See `multivariate_hypergeom_frozen` for more information. + """ + return multivariate_hypergeom_frozen(m, n, seed=seed) + + def _process_parameters(self, m, n): + m = np.asarray(m) + n = np.asarray(n) + if m.size == 0: + m = m.astype(int) + if n.size == 0: + n = n.astype(int) + if not np.issubdtype(m.dtype, np.integer): + raise TypeError("'m' must an array of integers.") + if not np.issubdtype(n.dtype, np.integer): + raise TypeError("'n' must an array of integers.") + if m.ndim == 0: + raise ValueError("'m' must be an array with" + " at least one dimension.") + + # check for empty arrays + if m.size != 0: + n = n[..., np.newaxis] + + m, n = np.broadcast_arrays(m, n) + + # check for empty arrays + if m.size != 0: + n = n[..., 0] + + mcond = m < 0 + + M = m.sum(axis=-1) + + ncond = (n < 0) | (n > M) + return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond + + def _process_quantiles(self, x, M, m, n): + x = np.asarray(x) + if not np.issubdtype(x.dtype, np.integer): + raise TypeError("'x' must an array of integers.") + if x.ndim == 0: + raise ValueError("'x' must be an array with" + " at least one dimension.") + if not x.shape[-1] == m.shape[-1]: + raise ValueError(f"Size of each quantile must be size of 'm': " + f"received {x.shape[-1]}, " + f"but expected {m.shape[-1]}.") + + # check for empty arrays + if m.size != 0: + n = n[..., np.newaxis] + M = M[..., np.newaxis] + + x, m, n, M = np.broadcast_arrays(x, m, n, M) + + # check for empty arrays + if m.size != 0: + n, M = n[..., 0], M[..., 0] + + xcond = (x < 0) | (x > m) + return (x, M, m, n, xcond, + np.any(xcond, axis=-1) | (x.sum(axis=-1) != n)) + + def _checkresult(self, result, cond, bad_value): + result = np.asarray(result) + if cond.ndim != 0: + result[cond] = bad_value + elif cond: + return bad_value + if result.ndim == 0: + return result[()] + return result + + def _logpmf(self, x, M, m, n, mxcond, ncond): + # This equation of the pmf comes from the relation, + # n combine r = beta(n+1, 1) / beta(r+1, n-r+1) + num = np.zeros_like(m, dtype=np.float64) + den = np.zeros_like(n, dtype=np.float64) + m, x = m[~mxcond], x[~mxcond] + M, n = M[~ncond], n[~ncond] + num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1)) + den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1)) + num[mxcond] = np.nan + den[ncond] = np.nan + num = num.sum(axis=-1) + return num - den + + def logpmf(self, x, m, n): + """Log of the multivariate hypergeometric probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + M, m, n, mcond, ncond, mncond = self._process_parameters(m, n) + (x, M, m, n, xcond, + xcond_reduced) = self._process_quantiles(x, M, m, n) + mxcond = mcond | xcond + ncond = ncond | np.zeros(n.shape, dtype=np.bool_) + + result = self._logpmf(x, M, m, n, mxcond, ncond) + + # replace values for which x was out of the domain; broadcast + # xcond to the right shape + xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_) + result = self._checkresult(result, xcond_, -np.inf) + + # replace values bad for n or m; broadcast + # mncond to the right shape + mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_) + return self._checkresult(result, mncond_, np.nan) + + def pmf(self, x, m, n): + """Multivariate hypergeometric probability mass function. + + Parameters + ---------- + x : array_like + Quantiles, with the last axis of `x` denoting the components. + %(_doc_default_callparams)s + + Returns + ------- + pmf : ndarray or scalar + Probability density function evaluated at `x` + + Notes + ----- + %(_doc_callparams_note)s + """ + out = np.exp(self.logpmf(x, m, n)) + return out + + def mean(self, m, n): + """Mean of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + mean : array_like or scalar + The mean of the distribution + """ + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M, n = M[..., np.newaxis], n[..., np.newaxis] + cond = (M == 0) + M = np.ma.masked_array(M, mask=cond) + mu = n*(m/M) + if m.size != 0: + mncond = (mncond[..., np.newaxis] | + np.zeros(mu.shape, dtype=np.bool_)) + return self._checkresult(mu, mncond, np.nan) + + def var(self, m, n): + """Variance of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + array_like + The variances of the components of the distribution. This is + the diagonal of the covariance matrix of the distribution + """ + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M, n = M[..., np.newaxis], n[..., np.newaxis] + cond = (M == 0) & (M-1 == 0) + M = np.ma.masked_array(M, mask=cond) + output = n * m/M * (M-m)/M * (M-n)/(M-1) + if m.size != 0: + mncond = (mncond[..., np.newaxis] | + np.zeros(output.shape, dtype=np.bool_)) + return self._checkresult(output, mncond, np.nan) + + def cov(self, m, n): + """Covariance matrix of the multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + + Returns + ------- + cov : array_like + The covariance matrix of the distribution + """ + # see [1]_ for the formula and [2]_ for implementation + # cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2) + M, m, n, _, _, mncond = self._process_parameters(m, n) + # check for empty arrays + if m.size != 0: + M = M[..., np.newaxis, np.newaxis] + n = n[..., np.newaxis, np.newaxis] + cond = (M == 0) & (M-1 == 0) + M = np.ma.masked_array(M, mask=cond) + output = (-n * (M-n)/(M-1) * + np.einsum("...i,...j->...ij", m, m) / (M**2)) + # check for empty arrays + if m.size != 0: + M, n = M[..., 0, 0], n[..., 0, 0] + cond = cond[..., 0, 0] + dim = m.shape[-1] + # diagonal entries need to be computed differently + for i in range(dim): + output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i])) + output[..., i, i] = output[..., i, i] / (M-1) + output[..., i, i] = output[..., i, i] / (M**2) + if m.size != 0: + mncond = (mncond[..., np.newaxis, np.newaxis] | + np.zeros(output.shape, dtype=np.bool_)) + return self._checkresult(output, mncond, np.nan) + + def rvs(self, m, n, size=None, random_state=None): + """Draw random samples from a multivariate hypergeometric distribution. + + Parameters + ---------- + %(_doc_default_callparams)s + size : integer or iterable of integers, optional + Number of samples to draw. Default is ``None``, in which case a + single variate is returned as an array with shape ``m.shape``. + %(_doc_random_state)s + + Returns + ------- + rvs : array_like + Random variates of shape ``size`` or ``m.shape`` + (if ``size=None``). + + Notes + ----- + %(_doc_callparams_note)s + + Also note that NumPy's `multivariate_hypergeometric` sampler is not + used as it doesn't support broadcasting. + """ + M, m, n, _, _, _ = self._process_parameters(m, n) + + random_state = self._get_random_state(random_state) + + if size is not None and isinstance(size, int): + size = (size, ) + + if size is None: + rvs = np.empty(m.shape, dtype=m.dtype) + else: + rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype) + rem = M + + # This sampler has been taken from numpy gh-13794 + # https://github.com/numpy/numpy/pull/13794 + for c in range(m.shape[-1] - 1): + rem = rem - m[..., c] + n0mask = n == 0 + rvs[..., c] = (~n0mask * + random_state.hypergeometric(m[..., c], + rem + n0mask, + n + n0mask, + size=size)) + n = n - rvs[..., c] + rvs[..., m.shape[-1] - 1] = n + + return rvs + + +multivariate_hypergeom = multivariate_hypergeom_gen() + + +class multivariate_hypergeom_frozen(multi_rv_frozen): + def __init__(self, m, n, seed=None): + self._dist = multivariate_hypergeom_gen(seed) + (self.M, self.m, self.n, + self.mcond, self.ncond, + self.mncond) = self._dist._process_parameters(m, n) + + # monkey patch self._dist + def _process_parameters(m, n): + return (self.M, self.m, self.n, + self.mcond, self.ncond, + self.mncond) + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, self.m, self.n) + + def pmf(self, x): + return self._dist.pmf(x, self.m, self.n) + + def mean(self): + return self._dist.mean(self.m, self.n) + + def var(self): + return self._dist.var(self.m, self.n) + + def cov(self): + return self._dist.cov(self.m, self.n) + + def rvs(self, size=1, random_state=None): + return self._dist.rvs(self.m, self.n, + size=size, + random_state=random_state) + + +# Set frozen generator docstrings from corresponding docstrings in +# multivariate_hypergeom and fill in default strings in class docstrings +for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']: + method = multivariate_hypergeom_gen.__dict__[name] + method_frozen = multivariate_hypergeom_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, mhg_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + mhg_docdict_params) + + +class random_table_gen(multi_rv_generic): + r"""Contingency tables from independent samples with fixed marginal sums. + + This is the distribution of random tables with given row and column vector + sums. This distribution represents the set of random tables under the null + hypothesis that rows and columns are independent. It is used in hypothesis + tests of independence. + + Because of assumed independence, the expected frequency of each table + element can be computed from the row and column sums, so that the + distribution is completely determined by these two vectors. + + Methods + ------- + logpmf(x) + Log-probability of table `x` to occur in the distribution. + pmf(x) + Probability of table `x` to occur in the distribution. + mean(row, col) + Mean table. + rvs(row, col, size=None, method=None, random_state=None) + Draw random tables with given row and column vector sums. + + Parameters + ---------- + %(_doc_row_col)s + %(_doc_random_state)s + + Notes + ----- + %(_doc_row_col_note)s + + Random elements from the distribution are generated either with Boyett's + [1]_ or Patefield's algorithm [2]_. Boyett's algorithm has + O(N) time and space complexity, where N is the total sum of entries in the + table. Patefield's algorithm has O(K x log(N)) time complexity, where K is + the number of cells in the table and requires only a small constant work + space. By default, the `rvs` method selects the fastest algorithm based on + the input, but you can specify the algorithm with the keyword `method`. + Allowed values are "boyett" and "patefield". + + .. versionadded:: 1.10.0 + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.mean(row, col) + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> dist = random_table(row, col) + >>> dist.rvs(random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + + References + ---------- + .. [1] J. Boyett, AS 144 Appl. Statist. 28 (1979) 329-332 + .. [2] W.M. Patefield, AS 159 Appl. Statist. 30 (1981) 91-97 + """ + + def __init__(self, seed=None): + super().__init__(seed) + + def __call__(self, row, col, *, seed=None): + """Create a frozen distribution of tables with given marginals. + + See `random_table_frozen` for more information. + """ + return random_table_frozen(row, col, seed=seed) + + def logpmf(self, x, row, col): + """Log-probability of table to occur in the distribution. + + Parameters + ---------- + %(_doc_x)s + %(_doc_row_col)s + + Returns + ------- + logpmf : ndarray or scalar + Log of the probability mass function evaluated at `x`. + + Notes + ----- + %(_doc_row_col_note)s + + If row and column marginals of `x` do not match `row` and `col`, + negative infinity is returned. + + Examples + -------- + >>> from scipy.stats import random_table + >>> import numpy as np + + >>> x = [[1, 5, 1], [2, 3, 1]] + >>> row = np.sum(x, axis=1) + >>> col = np.sum(x, axis=0) + >>> random_table.logpmf(x, row, col) + -1.6306401200847027 + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.logpmf(x) + -1.6306401200847027 + """ + r, c, n = self._process_parameters(row, col) + x = np.asarray(x) + + if x.ndim < 2: + raise ValueError("`x` must be at least two-dimensional") + + dtype_is_int = np.issubdtype(x.dtype, np.integer) + with np.errstate(invalid='ignore'): + if not dtype_is_int and not np.all(x.astype(int) == x): + raise ValueError("`x` must contain only integral values") + + # x does not contain NaN if we arrive here + if np.any(x < 0): + raise ValueError("`x` must contain only non-negative values") + + r2 = np.sum(x, axis=-1) + c2 = np.sum(x, axis=-2) + + if r2.shape[-1] != len(r): + raise ValueError("shape of `x` must agree with `row`") + + if c2.shape[-1] != len(c): + raise ValueError("shape of `x` must agree with `col`") + + res = np.empty(x.shape[:-2]) + + mask = np.all(r2 == r, axis=-1) & np.all(c2 == c, axis=-1) + + def lnfac(x): + return gammaln(x + 1) + + res[mask] = (np.sum(lnfac(r), axis=-1) + np.sum(lnfac(c), axis=-1) + - lnfac(n) - np.sum(lnfac(x[mask]), axis=(-1, -2))) + res[~mask] = -np.inf + + return res[()] + + def pmf(self, x, row, col): + """Probability of table to occur in the distribution. + + Parameters + ---------- + %(_doc_x)s + %(_doc_row_col)s + + Returns + ------- + pmf : ndarray or scalar + Probability mass function evaluated at `x`. + + Notes + ----- + %(_doc_row_col_note)s + + If row and column marginals of `x` do not match `row` and `col`, + zero is returned. + + Examples + -------- + >>> from scipy.stats import random_table + >>> import numpy as np + + >>> x = [[1, 5, 1], [2, 3, 1]] + >>> row = np.sum(x, axis=1) + >>> col = np.sum(x, axis=0) + >>> random_table.pmf(x, row, col) + 0.19580419580419592 + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.pmf(x) + 0.19580419580419592 + """ + return np.exp(self.logpmf(x, row, col)) + + def mean(self, row, col): + """Mean of distribution of conditional tables. + %(_doc_mean_params)s + + Returns + ------- + mean: ndarray + Mean of the distribution. + + Notes + ----- + %(_doc_row_col_note)s + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.mean(row, col) + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.mean() + array([[0.33333333, 0.5 , 0.16666667], + [1.66666667, 2.5 , 0.83333333]]) + """ + r, c, n = self._process_parameters(row, col) + return np.outer(r, c) / n + + def rvs(self, row, col, *, size=None, method=None, random_state=None): + """Draw random tables with fixed column and row marginals. + + Parameters + ---------- + %(_doc_row_col)s + size : integer, optional + Number of samples to draw (default 1). + method : str, optional + Which method to use, "boyett" or "patefield". If None (default), + selects the fastest method for this input. + %(_doc_random_state)s + + Returns + ------- + rvs : ndarray + Random 2D tables of shape (`size`, `len(row)`, `len(col)`). + + Notes + ----- + %(_doc_row_col_note)s + + Examples + -------- + >>> from scipy.stats import random_table + + >>> row = [1, 5] + >>> col = [2, 3, 1] + >>> random_table.rvs(row, col, random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + + Alternatively, the object may be called (as a function) to fix the row + and column vector sums, returning a "frozen" distribution. + + >>> d = random_table(row, col) + >>> d.rvs(random_state=123) + array([[1., 0., 0.], + [1., 3., 1.]]) + """ + r, c, n = self._process_parameters(row, col) + size, shape = self._process_size_shape(size, r, c) + + random_state = self._get_random_state(random_state) + meth = self._process_rvs_method(method, r, c, n) + + return meth(r, c, n, size, random_state).reshape(shape) + + @staticmethod + def _process_parameters(row, col): + """ + Check that row and column vectors are one-dimensional, that they do + not contain negative or non-integer entries, and that the sums over + both vectors are equal. + """ + r = np.array(row, dtype=np.int64, copy=True) + c = np.array(col, dtype=np.int64, copy=True) + + if np.ndim(r) != 1: + raise ValueError("`row` must be one-dimensional") + if np.ndim(c) != 1: + raise ValueError("`col` must be one-dimensional") + + if np.any(r < 0): + raise ValueError("each element of `row` must be non-negative") + if np.any(c < 0): + raise ValueError("each element of `col` must be non-negative") + + n = np.sum(r) + if n != np.sum(c): + raise ValueError("sums over `row` and `col` must be equal") + + if not np.all(r == np.asarray(row)): + raise ValueError("each element of `row` must be an integer") + if not np.all(c == np.asarray(col)): + raise ValueError("each element of `col` must be an integer") + + return r, c, n + + @staticmethod + def _process_size_shape(size, r, c): + """ + Compute the number of samples to be drawn and the shape of the output + """ + shape = (len(r), len(c)) + + if size is None: + return 1, shape + + size = np.atleast_1d(size) + if not np.issubdtype(size.dtype, np.integer) or np.any(size < 0): + raise ValueError("`size` must be a non-negative integer or `None`") + + return np.prod(size), tuple(size) + shape + + @classmethod + def _process_rvs_method(cls, method, r, c, n): + known_methods = { + None: cls._rvs_select(r, c, n), + "boyett": cls._rvs_boyett, + "patefield": cls._rvs_patefield, + } + try: + return known_methods[method] + except KeyError: + raise ValueError(f"'{method}' not recognized, " + f"must be one of {set(known_methods)}") + + @classmethod + def _rvs_select(cls, r, c, n): + fac = 1.0 # benchmarks show that this value is about 1 + k = len(r) * len(c) # number of cells + # n + 1 guards against failure if n == 0 + if n > fac * np.log(n + 1) * k: + return cls._rvs_patefield + return cls._rvs_boyett + + @staticmethod + def _rvs_boyett(row, col, ntot, size, random_state): + return _rcont.rvs_rcont1(row, col, ntot, size, random_state) + + @staticmethod + def _rvs_patefield(row, col, ntot, size, random_state): + return _rcont.rvs_rcont2(row, col, ntot, size, random_state) + + +random_table = random_table_gen() + + +class random_table_frozen(multi_rv_frozen): + def __init__(self, row, col, *, seed=None): + self._dist = random_table_gen(seed) + self._params = self._dist._process_parameters(row, col) + + # monkey patch self._dist + def _process_parameters(r, c): + return self._params + self._dist._process_parameters = _process_parameters + + def logpmf(self, x): + return self._dist.logpmf(x, None, None) + + def pmf(self, x): + return self._dist.pmf(x, None, None) + + def mean(self): + return self._dist.mean(None, None) + + def rvs(self, size=None, method=None, random_state=None): + # optimisations are possible here + return self._dist.rvs(None, None, size=size, method=method, + random_state=random_state) + + +_ctab_doc_row_col = """\ +row : array_like + Sum of table entries in each row. +col : array_like + Sum of table entries in each column.""" + +_ctab_doc_x = """\ +x : array-like + Two-dimensional table of non-negative integers, or a + multi-dimensional array with the last two dimensions + corresponding with the tables.""" + +_ctab_doc_row_col_note = """\ +The row and column vectors must be one-dimensional, not empty, +and each sum up to the same value. They cannot contain negative +or noninteger entries.""" + +_ctab_doc_mean_params = f""" +Parameters +---------- +{_ctab_doc_row_col}""" + +_ctab_doc_row_col_note_frozen = """\ +See class definition for a detailed description of parameters.""" + +_ctab_docdict = { + "_doc_random_state": _doc_random_state, + "_doc_row_col": _ctab_doc_row_col, + "_doc_x": _ctab_doc_x, + "_doc_mean_params": _ctab_doc_mean_params, + "_doc_row_col_note": _ctab_doc_row_col_note, +} + +_ctab_docdict_frozen = _ctab_docdict.copy() +_ctab_docdict_frozen.update({ + "_doc_row_col": "", + "_doc_mean_params": "", + "_doc_row_col_note": _ctab_doc_row_col_note_frozen, +}) + + +def _docfill(obj, docdict, template=None): + obj.__doc__ = doccer.docformat(template or obj.__doc__, docdict) + + +# Set frozen generator docstrings from corresponding docstrings in +# random_table and fill in default strings in class docstrings +_docfill(random_table_gen, _ctab_docdict) +for name in ['logpmf', 'pmf', 'mean', 'rvs']: + method = random_table_gen.__dict__[name] + method_frozen = random_table_frozen.__dict__[name] + _docfill(method_frozen, _ctab_docdict_frozen, method.__doc__) + _docfill(method, _ctab_docdict) + + +class uniform_direction_gen(multi_rv_generic): + r"""A vector-valued uniform direction. + + Return a random direction (unit vector). The `dim` keyword specifies + the dimensionality of the space. + + Methods + ------- + rvs(dim=None, size=1, random_state=None) + Draw random directions. + + Parameters + ---------- + dim : scalar + Dimension of directions. + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Notes + ----- + This distribution generates unit vectors uniformly distributed on + the surface of a hypersphere. These can be interpreted as random + directions. + For example, if `dim` is 3, 3D vectors from the surface of :math:`S^2` + will be sampled. + + References + ---------- + .. [1] Marsaglia, G. (1972). "Choosing a Point from the Surface of a + Sphere". Annals of Mathematical Statistics. 43 (2): 645-646. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import uniform_direction + >>> x = uniform_direction.rvs(3) + >>> np.linalg.norm(x) + 1. + + This generates one random direction, a vector on the surface of + :math:`S^2`. + + Alternatively, the object may be called (as a function) to return a frozen + distribution with fixed `dim` parameter. Here, + we create a `uniform_direction` with ``dim=3`` and draw 5 observations. + The samples are then arranged in an array of shape 5x3. + + >>> rng = np.random.default_rng() + >>> uniform_sphere_dist = uniform_direction(3) + >>> unit_vectors = uniform_sphere_dist.rvs(5, random_state=rng) + >>> unit_vectors + array([[ 0.56688642, -0.1332634 , -0.81294566], + [-0.427126 , -0.74779278, 0.50830044], + [ 0.3793989 , 0.92346629, 0.05715323], + [ 0.36428383, -0.92449076, -0.11231259], + [-0.27733285, 0.94410968, -0.17816678]]) + """ + + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__) + + def __call__(self, dim=None, seed=None): + """Create a frozen n-dimensional uniform direction distribution. + + See `uniform_direction` for more information. + """ + return uniform_direction_frozen(dim, seed=seed) + + def _process_parameters(self, dim): + """Dimension N must be specified; it cannot be inferred.""" + if dim is None or not np.isscalar(dim) or dim < 1 or dim != int(dim): + raise ValueError("Dimension of vector must be specified, " + "and must be an integer greater than 0.") + + return int(dim) + + def rvs(self, dim, size=None, random_state=None): + """Draw random samples from S(N-1). + + Parameters + ---------- + dim : integer + Dimension of space (N). + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + rvs : ndarray + Random direction vectors + + """ + random_state = self._get_random_state(random_state) + if size is None: + size = np.array([], dtype=int) + size = np.atleast_1d(size) + + dim = self._process_parameters(dim) + + samples = _sample_uniform_direction(dim, size, random_state) + return samples + + +uniform_direction = uniform_direction_gen() + + +class uniform_direction_frozen(multi_rv_frozen): + def __init__(self, dim=None, seed=None): + """Create a frozen n-dimensional uniform direction distribution. + + Parameters + ---------- + dim : int + Dimension of matrices + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Examples + -------- + >>> from scipy.stats import uniform_direction + >>> x = uniform_direction(3) + >>> x.rvs() + + """ + self._dist = uniform_direction_gen(seed) + self.dim = self._dist._process_parameters(dim) + + def rvs(self, size=None, random_state=None): + return self._dist.rvs(self.dim, size, random_state) + + +def _sample_uniform_direction(dim, size, random_state): + """ + Private method to generate uniform directions + Reference: Marsaglia, G. (1972). "Choosing a Point from the Surface of a + Sphere". Annals of Mathematical Statistics. 43 (2): 645-646. + """ + samples_shape = np.append(size, dim) + samples = random_state.standard_normal(samples_shape) + samples /= np.linalg.norm(samples, axis=-1, keepdims=True) + return samples + + +_dirichlet_mn_doc_default_callparams = """\ +alpha : array_like + The concentration parameters. The number of entries along the last axis + determines the dimensionality of the distribution. Each entry must be + strictly positive. +n : int or array_like + The number of trials. Each element must be a strictly positive integer. +""" + +_dirichlet_mn_doc_frozen_callparams = "" + +_dirichlet_mn_doc_frozen_callparams_note = """\ +See class definition for a detailed description of parameters.""" + +dirichlet_mn_docdict_params = { + '_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_default_callparams, + '_doc_random_state': _doc_random_state +} + +dirichlet_mn_docdict_noparams = { + '_dirichlet_mn_doc_default_callparams': _dirichlet_mn_doc_frozen_callparams, + '_doc_random_state': _doc_random_state +} + + +def _dirichlet_multinomial_check_parameters(alpha, n, x=None): + + alpha = np.asarray(alpha) + n = np.asarray(n) + + if x is not None: + # Ensure that `x` and `alpha` are arrays. If the shapes are + # incompatible, NumPy will raise an appropriate error. + try: + x, alpha = np.broadcast_arrays(x, alpha) + except ValueError as e: + msg = "`x` and `alpha` must be broadcastable." + raise ValueError(msg) from e + + x_int = np.floor(x) + if np.any(x < 0) or np.any(x != x_int): + raise ValueError("`x` must contain only non-negative integers.") + x = x_int + + if np.any(alpha <= 0): + raise ValueError("`alpha` must contain only positive values.") + + n_int = np.floor(n) + if np.any(n <= 0) or np.any(n != n_int): + raise ValueError("`n` must be a positive integer.") + n = n_int + + sum_alpha = np.sum(alpha, axis=-1) + sum_alpha, n = np.broadcast_arrays(sum_alpha, n) + + return (alpha, sum_alpha, n) if x is None else (alpha, sum_alpha, n, x) + + +class dirichlet_multinomial_gen(multi_rv_generic): + r"""A Dirichlet multinomial random variable. + + The Dirichlet multinomial distribution is a compound probability + distribution: it is the multinomial distribution with number of trials + `n` and class probabilities ``p`` randomly sampled from a Dirichlet + distribution with concentration parameters ``alpha``. + + Methods + ------- + logpmf(x, alpha, n): + Log of the probability mass function. + pmf(x, alpha, n): + Probability mass function. + mean(alpha, n): + Mean of the Dirichlet multinomial distribution. + var(alpha, n): + Variance of the Dirichlet multinomial distribution. + cov(alpha, n): + The covariance of the Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + %(_doc_random_state)s + + See Also + -------- + scipy.stats.dirichlet : The dirichlet distribution. + scipy.stats.multinomial : The multinomial distribution. + + References + ---------- + .. [1] Dirichlet-multinomial distribution, Wikipedia, + https://www.wikipedia.org/wiki/Dirichlet-multinomial_distribution + + Examples + -------- + >>> from scipy.stats import dirichlet_multinomial + + Get the PMF + + >>> n = 6 # number of trials + >>> alpha = [3, 4, 5] # concentration parameters + >>> x = [1, 2, 3] # counts + >>> dirichlet_multinomial.pmf(x, alpha, n) + 0.08484162895927604 + + If the sum of category counts does not equal the number of trials, + the probability mass is zero. + + >>> dirichlet_multinomial.pmf(x, alpha, n=7) + 0.0 + + Get the log of the PMF + + >>> dirichlet_multinomial.logpmf(x, alpha, n) + -2.4669689491013327 + + Get the mean + + >>> dirichlet_multinomial.mean(alpha, n) + array([1.5, 2. , 2.5]) + + Get the variance + + >>> dirichlet_multinomial.var(alpha, n) + array([1.55769231, 1.84615385, 2.01923077]) + + Get the covariance + + >>> dirichlet_multinomial.cov(alpha, n) + array([[ 1.55769231, -0.69230769, -0.86538462], + [-0.69230769, 1.84615385, -1.15384615], + [-0.86538462, -1.15384615, 2.01923077]]) + + Alternatively, the object may be called (as a function) to fix the + `alpha` and `n` parameters, returning a "frozen" Dirichlet multinomial + random variable. + + >>> dm = dirichlet_multinomial(alpha, n) + >>> dm.pmf(x) + 0.08484162895927579 + + All methods are fully vectorized. Each element of `x` and `alpha` is + a vector (along the last axis), each element of `n` is an + integer (scalar), and the result is computed element-wise. + + >>> x = [[1, 2, 3], [4, 5, 6]] + >>> alpha = [[1, 2, 3], [4, 5, 6]] + >>> n = [6, 15] + >>> dirichlet_multinomial.pmf(x, alpha, n) + array([0.06493506, 0.02626937]) + + >>> dirichlet_multinomial.cov(alpha, n).shape # both covariance matrices + (2, 3, 3) + + Broadcasting according to standard NumPy conventions is supported. Here, + we have four sets of concentration parameters (each a two element vector) + for each of three numbers of trials (each a scalar). + + >>> alpha = [[3, 4], [4, 5], [5, 6], [6, 7]] + >>> n = [[6], [7], [8]] + >>> dirichlet_multinomial.mean(alpha, n).shape + (3, 4, 2) + + """ + def __init__(self, seed=None): + super().__init__(seed) + self.__doc__ = doccer.docformat(self.__doc__, + dirichlet_mn_docdict_params) + + def __call__(self, alpha, n, seed=None): + return dirichlet_multinomial_frozen(alpha, n, seed=seed) + + def logpmf(self, x, alpha, n): + """The log of the probability mass function. + + Parameters + ---------- + x: ndarray + Category counts (non-negative integers). Must be broadcastable + with shape parameter ``alpha``. If multidimensional, the last axis + must correspond with the categories. + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray or scalar + Log of the probability mass function. + + """ + + a, Sa, n, x = _dirichlet_multinomial_check_parameters(alpha, n, x) + + out = np.asarray(loggamma(Sa) + loggamma(n + 1) - loggamma(n + Sa)) + out += (loggamma(x + a) - (loggamma(a) + loggamma(x + 1))).sum(axis=-1) + np.place(out, n != x.sum(axis=-1), -np.inf) + return out[()] + + def pmf(self, x, alpha, n): + """Probability mass function for a Dirichlet multinomial distribution. + + Parameters + ---------- + x: ndarray + Category counts (non-negative integers). Must be broadcastable + with shape parameter ``alpha``. If multidimensional, the last axis + must correspond with the categories. + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray or scalar + Probability mass function. + + """ + return np.exp(self.logpmf(x, alpha, n)) + + def mean(self, alpha, n): + """Mean of a Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: ndarray + Mean of a Dirichlet multinomial distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + n, Sa = n[..., np.newaxis], Sa[..., np.newaxis] + return n * a / Sa + + def var(self, alpha, n): + """The variance of the Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out: array_like + The variances of the components of the distribution. This is + the diagonal of the covariance matrix of the distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + n, Sa = n[..., np.newaxis], Sa[..., np.newaxis] + return n * a / Sa * (1 - a/Sa) * (n + Sa) / (1 + Sa) + + def cov(self, alpha, n): + """Covariance matrix of a Dirichlet multinomial distribution. + + Parameters + ---------- + %(_dirichlet_mn_doc_default_callparams)s + + Returns + ------- + out : array_like + The covariance matrix of the distribution. + + """ + a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + var = dirichlet_multinomial.var(a, n) + + n, Sa = n[..., np.newaxis, np.newaxis], Sa[..., np.newaxis, np.newaxis] + aiaj = a[..., :, np.newaxis] * a[..., np.newaxis, :] + cov = -n * aiaj / Sa ** 2 * (n + Sa) / (1 + Sa) + + ii = np.arange(cov.shape[-1]) + cov[..., ii, ii] = var + return cov + + +dirichlet_multinomial = dirichlet_multinomial_gen() + + +class dirichlet_multinomial_frozen(multi_rv_frozen): + def __init__(self, alpha, n, seed=None): + alpha, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n) + self.alpha = alpha + self.n = n + self._dist = dirichlet_multinomial_gen(seed) + + def logpmf(self, x): + return self._dist.logpmf(x, self.alpha, self.n) + + def pmf(self, x): + return self._dist.pmf(x, self.alpha, self.n) + + def mean(self): + return self._dist.mean(self.alpha, self.n) + + def var(self): + return self._dist.var(self.alpha, self.n) + + def cov(self): + return self._dist.cov(self.alpha, self.n) + + +# Set frozen generator docstrings from corresponding docstrings in +# dirichlet_multinomial and fill in default strings in class docstrings. +for name in ['logpmf', 'pmf', 'mean', 'var', 'cov']: + method = dirichlet_multinomial_gen.__dict__[name] + method_frozen = dirichlet_multinomial_frozen.__dict__[name] + method_frozen.__doc__ = doccer.docformat( + method.__doc__, dirichlet_mn_docdict_noparams) + method.__doc__ = doccer.docformat(method.__doc__, + dirichlet_mn_docdict_params) + + +class vonmises_fisher_gen(multi_rv_generic): + r"""A von Mises-Fisher variable. + + The `mu` keyword specifies the mean direction vector. The `kappa` keyword + specifies the concentration parameter. + + Methods + ------- + pdf(x, mu=None, kappa=1) + Probability density function. + logpdf(x, mu=None, kappa=1) + Log of the probability density function. + rvs(mu=None, kappa=1, size=1, random_state=None) + Draw random samples from a von Mises-Fisher distribution. + entropy(mu=None, kappa=1) + Compute the differential entropy of the von Mises-Fisher distribution. + fit(data) + Fit a von Mises-Fisher distribution to data. + + Parameters + ---------- + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + seed : {None, int, np.random.RandomState, np.random.Generator}, optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + See Also + -------- + scipy.stats.vonmises : Von-Mises Fisher distribution in 2D on a circle + uniform_direction : uniform distribution on the surface of a hypersphere + + Notes + ----- + The von Mises-Fisher distribution is a directional distribution on the + surface of the unit hypersphere. The probability density + function of a unit vector :math:`\mathbf{x}` is + + .. math:: + + f(\mathbf{x}) = \frac{\kappa^{d/2-1}}{(2\pi)^{d/2}I_{d/2-1}(\kappa)} + \exp\left(\kappa \mathbf{\mu}^T\mathbf{x}\right), + + where :math:`\mathbf{\mu}` is the mean direction, :math:`\kappa` the + concentration parameter, :math:`d` the dimension and :math:`I` the + modified Bessel function of the first kind. As :math:`\mu` represents + a direction, it must be a unit vector or in other words, a point + on the hypersphere: :math:`\mathbf{\mu}\in S^{d-1}`. :math:`\kappa` is a + concentration parameter, which means that it must be positive + (:math:`\kappa>0`) and that the distribution becomes more narrow with + increasing :math:`\kappa`. In that sense, the reciprocal value + :math:`1/\kappa` resembles the variance parameter of the normal + distribution. + + The von Mises-Fisher distribution often serves as an analogue of the + normal distribution on the sphere. Intuitively, for unit vectors, a + useful distance measure is given by the angle :math:`\alpha` between + them. This is exactly what the scalar product + :math:`\mathbf{\mu}^T\mathbf{x}=\cos(\alpha)` in the + von Mises-Fisher probability density function describes: the angle + between the mean direction :math:`\mathbf{\mu}` and the vector + :math:`\mathbf{x}`. The larger the angle between them, the smaller the + probability to observe :math:`\mathbf{x}` for this particular mean + direction :math:`\mathbf{\mu}`. + + In dimensions 2 and 3, specialized algorithms are used for fast sampling + [2]_, [3]_. For dimensions of 4 or higher the rejection sampling algorithm + described in [4]_ is utilized. This implementation is partially based on + the geomstats package [5]_, [6]_. + + .. versionadded:: 1.11 + + References + ---------- + .. [1] Von Mises-Fisher distribution, Wikipedia, + https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution + .. [2] Mardia, K., and Jupp, P. Directional statistics. Wiley, 2000. + .. [3] J. Wenzel. Numerically stable sampling of the von Mises Fisher + distribution on S2. + https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf + .. [4] Wood, A. Simulation of the von mises fisher distribution. + Communications in statistics-simulation and computation 23, + 1 (1994), 157-164. https://doi.org/10.1080/03610919408813161 + .. [5] geomstats, Github. MIT License. Accessed: 06.01.2023. + https://github.com/geomstats/geomstats + .. [6] Miolane, N. et al. Geomstats: A Python Package for Riemannian + Geometry in Machine Learning. Journal of Machine Learning Research + 21 (2020). http://jmlr.org/papers/v21/19-027.html + + Examples + -------- + **Visualization of the probability density** + + Plot the probability density in three dimensions for increasing + concentration parameter. The density is calculated by the ``pdf`` + method. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import vonmises_fisher + >>> from matplotlib.colors import Normalize + >>> n_grid = 100 + >>> u = np.linspace(0, np.pi, n_grid) + >>> v = np.linspace(0, 2 * np.pi, n_grid) + >>> u_grid, v_grid = np.meshgrid(u, v) + >>> vertices = np.stack([np.cos(v_grid) * np.sin(u_grid), + ... np.sin(v_grid) * np.sin(u_grid), + ... np.cos(u_grid)], + ... axis=2) + >>> x = np.outer(np.cos(v), np.sin(u)) + >>> y = np.outer(np.sin(v), np.sin(u)) + >>> z = np.outer(np.ones_like(u), np.cos(u)) + >>> def plot_vmf_density(ax, x, y, z, vertices, mu, kappa): + ... vmf = vonmises_fisher(mu, kappa) + ... pdf_values = vmf.pdf(vertices) + ... pdfnorm = Normalize(vmin=pdf_values.min(), vmax=pdf_values.max()) + ... ax.plot_surface(x, y, z, rstride=1, cstride=1, + ... facecolors=plt.cm.viridis(pdfnorm(pdf_values)), + ... linewidth=0) + ... ax.set_aspect('equal') + ... ax.view_init(azim=-130, elev=0) + ... ax.axis('off') + ... ax.set_title(rf"$\kappa={kappa}$") + >>> fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4), + ... subplot_kw={"projection": "3d"}) + >>> left, middle, right = axes + >>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0]) + >>> plot_vmf_density(left, x, y, z, vertices, mu, 5) + >>> plot_vmf_density(middle, x, y, z, vertices, mu, 20) + >>> plot_vmf_density(right, x, y, z, vertices, mu, 100) + >>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, right=1.0, wspace=0.) + >>> plt.show() + + As we increase the concentration parameter, the points are getting more + clustered together around the mean direction. + + **Sampling** + + Draw 5 samples from the distribution using the ``rvs`` method resulting + in a 5x3 array. + + >>> rng = np.random.default_rng() + >>> mu = np.array([0, 0, 1]) + >>> samples = vonmises_fisher(mu, 20).rvs(5, random_state=rng) + >>> samples + array([[ 0.3884594 , -0.32482588, 0.86231516], + [ 0.00611366, -0.09878289, 0.99509023], + [-0.04154772, -0.01637135, 0.99900239], + [-0.14613735, 0.12553507, 0.98126695], + [-0.04429884, -0.23474054, 0.97104814]]) + + These samples are unit vectors on the sphere :math:`S^2`. To verify, + let us calculate their euclidean norms: + + >>> np.linalg.norm(samples, axis=1) + array([1., 1., 1., 1., 1.]) + + Plot 20 observations drawn from the von Mises-Fisher distribution for + increasing concentration parameter :math:`\kappa`. The red dot highlights + the mean direction :math:`\mu`. + + >>> def plot_vmf_samples(ax, x, y, z, mu, kappa): + ... vmf = vonmises_fisher(mu, kappa) + ... samples = vmf.rvs(20) + ... ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0, + ... alpha=0.2) + ... ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c='k', s=5) + ... ax.scatter(mu[0], mu[1], mu[2], c='r', s=30) + ... ax.set_aspect('equal') + ... ax.view_init(azim=-130, elev=0) + ... ax.axis('off') + ... ax.set_title(rf"$\kappa={kappa}$") + >>> mu = np.array([-np.sqrt(0.5), -np.sqrt(0.5), 0]) + >>> fig, axes = plt.subplots(nrows=1, ncols=3, + ... subplot_kw={"projection": "3d"}, + ... figsize=(9, 4)) + >>> left, middle, right = axes + >>> plot_vmf_samples(left, x, y, z, mu, 5) + >>> plot_vmf_samples(middle, x, y, z, mu, 20) + >>> plot_vmf_samples(right, x, y, z, mu, 100) + >>> plt.subplots_adjust(top=1, bottom=0.0, left=0.0, + ... right=1.0, wspace=0.) + >>> plt.show() + + The plots show that with increasing concentration :math:`\kappa` the + resulting samples are centered more closely around the mean direction. + + **Fitting the distribution parameters** + + The distribution can be fitted to data using the ``fit`` method returning + the estimated parameters. As a toy example let's fit the distribution to + samples drawn from a known von Mises-Fisher distribution. + + >>> mu, kappa = np.array([0, 0, 1]), 20 + >>> samples = vonmises_fisher(mu, kappa).rvs(1000, random_state=rng) + >>> mu_fit, kappa_fit = vonmises_fisher.fit(samples) + >>> mu_fit, kappa_fit + (array([0.01126519, 0.01044501, 0.99988199]), 19.306398751730995) + + We see that the estimated parameters `mu_fit` and `kappa_fit` are + very close to the ground truth parameters. + + """ + def __init__(self, seed=None): + super().__init__(seed) + + def __call__(self, mu=None, kappa=1, seed=None): + """Create a frozen von Mises-Fisher distribution. + + See `vonmises_fisher_frozen` for more information. + """ + return vonmises_fisher_frozen(mu, kappa, seed=seed) + + def _process_parameters(self, mu, kappa): + """ + Infer dimensionality from mu and ensure that mu is a one-dimensional + unit vector and kappa positive. + """ + mu = np.asarray(mu) + if mu.ndim > 1: + raise ValueError("'mu' must have one-dimensional shape.") + if not np.allclose(np.linalg.norm(mu), 1.): + raise ValueError("'mu' must be a unit vector of norm 1.") + if not mu.size > 1: + raise ValueError("'mu' must have at least two entries.") + kappa_error_msg = "'kappa' must be a positive scalar." + if not np.isscalar(kappa) or kappa < 0: + raise ValueError(kappa_error_msg) + if float(kappa) == 0.: + raise ValueError("For 'kappa=0' the von Mises-Fisher distribution " + "becomes the uniform distribution on the sphere " + "surface. Consider using " + "'scipy.stats.uniform_direction' instead.") + dim = mu.size + + return dim, mu, kappa + + def _check_data_vs_dist(self, x, dim): + if x.shape[-1] != dim: + raise ValueError("The dimensionality of the last axis of 'x' must " + "match the dimensionality of the " + "von Mises Fisher distribution.") + if not np.allclose(np.linalg.norm(x, axis=-1), 1.): + msg = "'x' must be unit vectors of norm 1 along last dimension." + raise ValueError(msg) + + def _log_norm_factor(self, dim, kappa): + # normalization factor is given by + # c = kappa**(dim/2-1)/((2*pi)**(dim/2)*I[dim/2-1](kappa)) + # = kappa**(dim/2-1)*exp(-kappa) / + # ((2*pi)**(dim/2)*I[dim/2-1](kappa)*exp(-kappa) + # = kappa**(dim/2-1)*exp(-kappa) / + # ((2*pi)**(dim/2)*ive[dim/2-1](kappa) + # Then the log is given by + # log c = 1/2*(dim -1)*log(kappa) - kappa - -1/2*dim*ln(2*pi) - + # ive[dim/2-1](kappa) + halfdim = 0.5 * dim + return (0.5 * (dim - 2)*np.log(kappa) - halfdim * _LOG_2PI - + np.log(ive(halfdim - 1, kappa)) - kappa) + + def _logpdf(self, x, dim, mu, kappa): + """Log of the von Mises-Fisher probability density function. + + As this function does no argument checking, it should not be + called directly; use 'logpdf' instead. + + """ + x = np.asarray(x) + self._check_data_vs_dist(x, dim) + dotproducts = np.einsum('i,...i->...', mu, x) + return self._log_norm_factor(dim, kappa) + kappa * dotproducts + + def logpdf(self, x, mu=None, kappa=1): + """Log of the von Mises-Fisher probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + mu : array_like, default: None + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float, default: 1 + Concentration parameter. Must be positive. + + Returns + ------- + logpdf : ndarray or scalar + Log of the probability density function evaluated at `x`. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + return self._logpdf(x, dim, mu, kappa) + + def pdf(self, x, mu=None, kappa=1): + """Von Mises-Fisher probability density function. + + Parameters + ---------- + x : array_like + Points at which to evaluate the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x`. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + return np.exp(self._logpdf(x, dim, mu, kappa)) + + def _rvs_2d(self, mu, kappa, size, random_state): + """ + In 2D, the von Mises-Fisher distribution reduces to the + von Mises distribution which can be efficiently sampled by numpy. + This method is much faster than the general rejection + sampling based algorithm. + + """ + mean_angle = np.arctan2(mu[1], mu[0]) + angle_samples = random_state.vonmises(mean_angle, kappa, size=size) + samples = np.stack([np.cos(angle_samples), np.sin(angle_samples)], + axis=-1) + return samples + + def _rvs_3d(self, kappa, size, random_state): + """ + Generate samples from a von Mises-Fisher distribution + with mu = [1, 0, 0] and kappa. Samples then have to be + rotated towards the desired mean direction mu. + This method is much faster than the general rejection + sampling based algorithm. + Reference: https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf + + """ + if size is None: + sample_size = 1 + else: + sample_size = size + + # compute x coordinate acc. to equation from section 3.1 + x = random_state.random(sample_size) + x = 1. + np.log(x + (1. - x) * np.exp(-2 * kappa))/kappa + + # (y, z) are random 2D vectors that only have to be + # normalized accordingly. Then (x, y z) follow a VMF distribution + temp = np.sqrt(1. - np.square(x)) + uniformcircle = _sample_uniform_direction(2, sample_size, random_state) + samples = np.stack([x, temp * uniformcircle[..., 0], + temp * uniformcircle[..., 1]], + axis=-1) + if size is None: + samples = np.squeeze(samples) + return samples + + def _rejection_sampling(self, dim, kappa, size, random_state): + """ + Generate samples from a n-dimensional von Mises-Fisher distribution + with mu = [1, 0, ..., 0] and kappa via rejection sampling. + Samples then have to be rotated towards the desired mean direction mu. + Reference: https://doi.org/10.1080/03610919408813161 + """ + dim_minus_one = dim - 1 + # calculate number of requested samples + if size is not None: + if not np.iterable(size): + size = (size, ) + n_samples = math.prod(size) + else: + n_samples = 1 + # calculate envelope for rejection sampler (eq. 4) + sqrt = np.sqrt(4 * kappa ** 2. + dim_minus_one ** 2) + envelop_param = (-2 * kappa + sqrt) / dim_minus_one + if envelop_param == 0: + # the regular formula suffers from loss of precision for high + # kappa. This can only be detected by checking for 0 here. + # Workaround: expansion for sqrt variable + # https://www.wolframalpha.com/input?i=sqrt%284*x%5E2%2Bd%5E2%29 + # e = (-2 * k + sqrt(k**2 + d**2)) / d + # ~ (-2 * k + 2 * k + d**2/(4 * k) - d**4/(64 * k**3)) / d + # = d/(4 * k) - d**3/(64 * k**3) + envelop_param = (dim_minus_one/4 * kappa**-1. + - dim_minus_one**3/64 * kappa**-3.) + # reference step 0 + node = (1. - envelop_param) / (1. + envelop_param) + # t = ln(1 - ((1-x)/(1+x))**2) + # = ln(4 * x / (1+x)**2) + # = ln(4) + ln(x) - 2*log1p(x) + correction = (kappa * node + dim_minus_one + * (np.log(4) + np.log(envelop_param) + - 2 * np.log1p(envelop_param))) + n_accepted = 0 + x = np.zeros((n_samples, )) + halfdim = 0.5 * dim_minus_one + # main loop + while n_accepted < n_samples: + # generate candidates acc. to reference step 1 + sym_beta = random_state.beta(halfdim, halfdim, + size=n_samples - n_accepted) + coord_x = (1 - (1 + envelop_param) * sym_beta) / ( + 1 - (1 - envelop_param) * sym_beta) + # accept or reject: reference step 2 + # reformulation for numerical stability: + # t = ln(1 - (1-x)/(1+x) * y) + # = ln((1 + x - y +x*y)/(1 +x)) + accept_tol = random_state.random(n_samples - n_accepted) + criterion = ( + kappa * coord_x + + dim_minus_one * (np.log((1 + envelop_param - coord_x + + coord_x * envelop_param) / (1 + envelop_param))) + - correction) > np.log(accept_tol) + accepted_iter = np.sum(criterion) + x[n_accepted:n_accepted + accepted_iter] = coord_x[criterion] + n_accepted += accepted_iter + # concatenate x and remaining coordinates: step 3 + coord_rest = _sample_uniform_direction(dim_minus_one, n_accepted, + random_state) + coord_rest = np.einsum( + '...,...i->...i', np.sqrt(1 - x ** 2), coord_rest) + samples = np.concatenate([x[..., None], coord_rest], axis=1) + # reshape output to (size, dim) + if size is not None: + samples = samples.reshape(size + (dim, )) + else: + samples = np.squeeze(samples) + return samples + + def _rotate_samples(self, samples, mu, dim): + """A QR decomposition is used to find the rotation that maps the + north pole (1, 0,...,0) to the vector mu. This rotation is then + applied to all samples. + + Parameters + ---------- + samples: array_like, shape = [..., n] + mu : array-like, shape=[n, ] + Point to parametrise the rotation. + + Returns + ------- + samples : rotated samples + + """ + base_point = np.zeros((dim, )) + base_point[0] = 1. + embedded = np.concatenate([mu[None, :], np.zeros((dim - 1, dim))]) + rotmatrix, _ = np.linalg.qr(np.transpose(embedded)) + if np.allclose(np.matmul(rotmatrix, base_point[:, None])[:, 0], mu): + rotsign = 1 + else: + rotsign = -1 + + # apply rotation + samples = np.einsum('ij,...j->...i', rotmatrix, samples) * rotsign + return samples + + def _rvs(self, dim, mu, kappa, size, random_state): + if dim == 2: + samples = self._rvs_2d(mu, kappa, size, random_state) + elif dim == 3: + samples = self._rvs_3d(kappa, size, random_state) + else: + samples = self._rejection_sampling(dim, kappa, size, + random_state) + + if dim != 2: + samples = self._rotate_samples(samples, mu, dim) + return samples + + def rvs(self, mu=None, kappa=1, size=1, random_state=None): + """Draw random samples from a von Mises-Fisher distribution. + + Parameters + ---------- + mu : array_like + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float + Concentration parameter. Must be positive. + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, np.random.RandomState, np.random.Generator}, + optional + Used for drawing random variates. + If `seed` is `None`, the `~np.random.RandomState` singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, seeded + with seed. + If `seed` is already a ``RandomState`` or ``Generator`` instance, + then that object is used. + Default is `None`. + + Returns + ------- + rvs : ndarray + Random variates of shape (`size`, `N`), where `N` is the + dimension of the distribution. + + """ + dim, mu, kappa = self._process_parameters(mu, kappa) + random_state = self._get_random_state(random_state) + samples = self._rvs(dim, mu, kappa, size, random_state) + return samples + + def _entropy(self, dim, kappa): + halfdim = 0.5 * dim + return (-self._log_norm_factor(dim, kappa) - kappa * + ive(halfdim, kappa) / ive(halfdim - 1, kappa)) + + def entropy(self, mu=None, kappa=1): + """Compute the differential entropy of the von Mises-Fisher + distribution. + + Parameters + ---------- + mu : array_like, default: None + Mean direction of the distribution. Must be a one-dimensional unit + vector of norm 1. + kappa : float, default: 1 + Concentration parameter. Must be positive. + + Returns + ------- + h : scalar + Entropy of the von Mises-Fisher distribution. + + """ + dim, _, kappa = self._process_parameters(mu, kappa) + return self._entropy(dim, kappa) + + def fit(self, x): + """Fit the von Mises-Fisher distribution to data. + + Parameters + ---------- + x : array-like + Data the distribution is fitted to. Must be two dimensional. + The second axis of `x` must be unit vectors of norm 1 and + determine the dimensionality of the fitted + von Mises-Fisher distribution. + + Returns + ------- + mu : ndarray + Estimated mean direction. + kappa : float + Estimated concentration parameter. + + """ + # validate input data + x = np.asarray(x) + if x.ndim != 2: + raise ValueError("'x' must be two dimensional.") + if not np.allclose(np.linalg.norm(x, axis=-1), 1.): + msg = "'x' must be unit vectors of norm 1 along last dimension." + raise ValueError(msg) + dim = x.shape[-1] + + # mu is simply the directional mean + dirstats = directional_stats(x) + mu = dirstats.mean_direction + r = dirstats.mean_resultant_length + + # kappa is the solution to the equation: + # r = I[dim/2](kappa) / I[dim/2 -1](kappa) + # = I[dim/2](kappa) * exp(-kappa) / I[dim/2 -1](kappa) * exp(-kappa) + # = ive(dim/2, kappa) / ive(dim/2 -1, kappa) + + halfdim = 0.5 * dim + + def solve_for_kappa(kappa): + bessel_vals = ive([halfdim, halfdim - 1], kappa) + return bessel_vals[0]/bessel_vals[1] - r + + root_res = root_scalar(solve_for_kappa, method="brentq", + bracket=(1e-8, 1e9)) + kappa = root_res.root + return mu, kappa + + +vonmises_fisher = vonmises_fisher_gen() + + +class vonmises_fisher_frozen(multi_rv_frozen): + def __init__(self, mu=None, kappa=1, seed=None): + """Create a frozen von Mises-Fisher distribution. + + Parameters + ---------- + mu : array_like, default: None + Mean direction of the distribution. + kappa : float, default: 1 + Concentration parameter. Must be positive. + seed : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + """ + self._dist = vonmises_fisher_gen(seed) + self.dim, self.mu, self.kappa = ( + self._dist._process_parameters(mu, kappa) + ) + + def logpdf(self, x): + """ + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + + Returns + ------- + logpdf : ndarray or scalar + Log of probability density function evaluated at `x`. + + """ + return self._dist._logpdf(x, self.dim, self.mu, self.kappa) + + def pdf(self, x): + """ + Parameters + ---------- + x : array_like + Points at which to evaluate the log of the probability + density function. The last axis of `x` must correspond + to unit vectors of the same dimensionality as the distribution. + + Returns + ------- + pdf : ndarray or scalar + Probability density function evaluated at `x`. + + """ + return np.exp(self.logpdf(x)) + + def rvs(self, size=1, random_state=None): + """Draw random variates from the Von Mises-Fisher distribution. + + Parameters + ---------- + size : int or tuple of ints, optional + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. + Because each sample is N-dimensional, the output shape + is (m,n,k,N). If no shape is specified, a single (N-D) + sample is returned. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance + then that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of size (`size`, `N`), where `N` is the + dimension of the distribution. + + """ + random_state = self._dist._get_random_state(random_state) + return self._dist._rvs(self.dim, self.mu, self.kappa, size, + random_state) + + def entropy(self): + """ + Calculate the differential entropy of the von Mises-Fisher + distribution. + + Returns + ------- + h: float + Entropy of the Von Mises-Fisher distribution. + + """ + return self._dist._entropy(self.dim, self.kappa) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd588c4972b39e720a4359099ff8157460f5497 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py @@ -0,0 +1,482 @@ +import numpy as np + +from scipy.special import ndtri +from scipy.optimize import brentq +from ._discrete_distns import nchypergeom_fisher +from ._common import ConfidenceInterval + + +def _sample_odds_ratio(table): + """ + Given a table [[a, b], [c, d]], compute a*d/(b*c). + + Return nan if the numerator and denominator are 0. + Return inf if just the denominator is 0. + """ + # table must be a 2x2 numpy array. + if table[1, 0] > 0 and table[0, 1] > 0: + oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1]) + elif table[0, 0] == 0 or table[1, 1] == 0: + oddsratio = np.nan + else: + oddsratio = np.inf + return oddsratio + + +def _solve(func): + """ + Solve func(nc) = 0. func must be an increasing function. + """ + # We could just as well call the variable `x` instead of `nc`, but we + # always call this function with functions for which nc (the noncentrality + # parameter) is the variable for which we are solving. + nc = 1.0 + value = func(nc) + if value == 0: + return nc + + # Multiplicative factor by which to increase or decrease nc when + # searching for a bracketing interval. + factor = 2.0 + # Find a bracketing interval. + if value > 0: + nc /= factor + while func(nc) > 0: + nc /= factor + lo = nc + hi = factor*nc + else: + nc *= factor + while func(nc) < 0: + nc *= factor + lo = nc/factor + hi = nc + + # lo and hi bracket the solution for nc. + nc = brentq(func, lo, hi, xtol=1e-13) + return nc + + +def _nc_hypergeom_mean_inverse(x, M, n, N): + """ + For the given noncentral hypergeometric parameters x, M, n,and N + (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2 + contingency table), find the noncentrality parameter of Fisher's + noncentral hypergeometric distribution whose mean is x. + """ + nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x) + return nc + + +def _hypergeom_params_from_table(table): + # The notation M, n and N is consistent with stats.hypergeom and + # stats.nchypergeom_fisher. + x = table[0, 0] + M = table.sum() + n = table[0].sum() + N = table[:, 0].sum() + return x, M, n, N + + +def _ci_upper(table, alpha): + """ + Compute the upper end of the confidence interval. + """ + if _sample_odds_ratio(table) == np.inf: + return np.inf + + x, M, n, N = _hypergeom_params_from_table(table) + + # nchypergeom_fisher.cdf is a decreasing function of nc, so we negate + # it in the lambda expression. + nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha) + return nc + + +def _ci_lower(table, alpha): + """ + Compute the lower end of the confidence interval. + """ + if _sample_odds_ratio(table) == 0: + return 0 + + x, M, n, N = _hypergeom_params_from_table(table) + + nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha) + return nc + + +def _conditional_oddsratio(table): + """ + Conditional MLE of the odds ratio for the 2x2 contingency table. + """ + x, M, n, N = _hypergeom_params_from_table(table) + # Get the bounds of the support. The support of the noncentral + # hypergeometric distribution with parameters M, n, and N is the same + # for all values of the noncentrality parameter, so we can use 1 here. + lo, hi = nchypergeom_fisher.support(M, n, N, 1) + + # Check if x is at one of the extremes of the support. If so, we know + # the odds ratio is either 0 or inf. + if x == lo: + # x is at the low end of the support. + return 0 + if x == hi: + # x is at the high end of the support. + return np.inf + + nc = _nc_hypergeom_mean_inverse(x, M, n, N) + return nc + + +def _conditional_oddsratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + """ + Conditional exact confidence interval for the odds ratio. + """ + if alternative == 'two-sided': + alpha = 0.5*(1 - confidence_level) + lower = _ci_lower(table, alpha) + upper = _ci_upper(table, alpha) + elif alternative == 'less': + lower = 0.0 + upper = _ci_upper(table, 1 - confidence_level) + else: + # alternative == 'greater' + lower = _ci_lower(table, 1 - confidence_level) + upper = np.inf + + return lower, upper + + +def _sample_odds_ratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + oddsratio = _sample_odds_ratio(table) + log_or = np.log(oddsratio) + se = np.sqrt((1/table).sum()) + if alternative == 'less': + z = ndtri(confidence_level) + loglow = -np.inf + loghigh = log_or + z*se + elif alternative == 'greater': + z = ndtri(confidence_level) + loglow = log_or - z*se + loghigh = np.inf + else: + # alternative is 'two-sided' + z = ndtri(0.5*confidence_level + 0.5) + loglow = log_or - z*se + loghigh = log_or + z*se + + return np.exp(loglow), np.exp(loghigh) + + +class OddsRatioResult: + """ + Result of `scipy.stats.contingency.odds_ratio`. See the + docstring for `odds_ratio` for more details. + + Attributes + ---------- + statistic : float + The computed odds ratio. + + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + Methods + ------- + confidence_interval : + Confidence interval for the odds ratio. + """ + + def __init__(self, _table, _kind, statistic): + # for now, no need to make _table and _kind public, since this sort of + # information is returned in very few `scipy.stats` results + self._table = _table + self._kind = _kind + self.statistic = statistic + + def __repr__(self): + return f"OddsRatioResult(statistic={self.statistic})" + + def confidence_interval(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the odds ratio. + + Parameters + ---------- + confidence_level: float + Desired confidence level for the confidence interval. + The value must be given as a fraction between 0 and 1. + Default is 0.95 (meaning 95%). + + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis of the hypothesis test to which the + confidence interval corresponds. That is, suppose the null + hypothesis is that the true odds ratio equals ``OR`` and the + confidence interval is ``(low, high)``. Then the following options + for `alternative` are available (default is 'two-sided'): + + * 'two-sided': the true odds ratio is not equal to ``OR``. There + is evidence against the null hypothesis at the chosen + `confidence_level` if ``high < OR`` or ``low > OR``. + * 'less': the true odds ratio is less than ``OR``. The ``low`` end + of the confidence interval is 0, and there is evidence against + the null hypothesis at the chosen `confidence_level` if + ``high < OR``. + * 'greater': the true odds ratio is greater than ``OR``. The + ``high`` end of the confidence interval is ``np.inf``, and there + is evidence against the null hypothesis at the chosen + `confidence_level` if ``low > OR``. + + Returns + ------- + ci : ``ConfidenceInterval`` instance + The confidence interval, represented as an object with + attributes ``low`` and ``high``. + + Notes + ----- + When `kind` is ``'conditional'``, the limits of the confidence + interval are the conditional "exact confidence limits" as described + by Fisher [1]_. The conditional odds ratio and confidence interval are + also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_. + + When `kind` is ``'sample'``, the confidence interval is computed + under the assumption that the logarithm of the odds ratio is normally + distributed with standard error given by:: + + se = sqrt(1/a + 1/b + 1/c + 1/d) + + where ``a``, ``b``, ``c`` and ``d`` are the elements of the + contingency table. (See, for example, [2]_, section 3.1.3.2, + or [3]_, section 2.3.3). + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [3] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + """ + if alternative not in ['two-sided', 'less', 'greater']: + raise ValueError("`alternative` must be 'two-sided', 'less' or " + "'greater'.") + + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + if self._kind == 'conditional': + ci = self._conditional_odds_ratio_ci(confidence_level, alternative) + else: + ci = self._sample_odds_ratio_ci(confidence_level, alternative) + return ci + + def _conditional_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the conditional odds ratio. + """ + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _conditional_oddsratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + def _sample_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the sample odds ratio. + """ + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _sample_odds_ratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + +def odds_ratio(table, *, kind='conditional'): + r""" + Compute the odds ratio for a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements must be non-negative integers. + kind : str, optional + Which kind of odds ratio to compute, either the sample + odds ratio (``kind='sample'``) or the conditional odds ratio + (``kind='conditional'``). Default is ``'conditional'``. + + Returns + ------- + result : `~scipy.stats._result_classes.OddsRatioResult` instance + The returned object has two computed attributes: + + statistic : float + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + The object has the method `confidence_interval` that computes + the confidence interval of the odds ratio. + + See Also + -------- + scipy.stats.fisher_exact + relative_risk + + Notes + ----- + The conditional odds ratio was discussed by Fisher (see "Example 1" + of [1]_). Texts that cover the odds ratio include [2]_ and [3]_. + + .. versionadded:: 1.10.0 + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research. + Volume I - The analysis of case-control studies. IARC Sci Publ. + (32):5-338. PMID: 7216345. (See section 4.2.) + .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of + Cardiovascular Events in Women and Men: A Sex-Specific + Meta-analysis of Randomized Controlled Trials." + JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006. + + Examples + -------- + In epidemiology, individuals are classified as "exposed" or + "unexposed" to some factor or treatment. If the occurrence of some + illness is under study, those who have the illness are often + classified as "cases", and those without it are "noncases". The + counts of the occurrences of these classes gives a contingency + table:: + + exposed unexposed + cases a b + noncases c d + + The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can + be interpreted as the odds of a case occurring in the exposed group, + and ``b/d`` as the odds of a case occurring in the unexposed group. + The sample odds ratio is the ratio of these odds. If the odds ratio + is greater than 1, it suggests that there is a positive association + between being exposed and being a case. + + Interchanging the rows or columns of the contingency table inverts + the odds ratio, so it is import to understand the meaning of labels + given to the rows and columns of the table when interpreting the + odds ratio. + + In [4]_, the use of aspirin to prevent cardiovascular events in women + and men was investigated. The study notably concluded: + + ...aspirin therapy reduced the risk of a composite of + cardiovascular events due to its effect on reducing the risk of + ischemic stroke in women [...] + + The article lists studies of various cardiovascular events. Let's + focus on the ischemic stoke in women. + + The following table summarizes the results of the experiment in which + participants took aspirin or a placebo on a regular basis for several + years. Cases of ischemic stroke were recorded:: + + Aspirin Control/Placebo + Ischemic stroke 176 230 + No stroke 21035 21018 + + The question we ask is "Is there evidence that the aspirin reduces the + risk of ischemic stroke?" + + Compute the odds ratio: + + >>> from scipy.stats.contingency import odds_ratio + >>> res = odds_ratio([[176, 230], [21035, 21018]]) + >>> res.statistic + 0.7646037659999126 + + For this sample, the odds of getting an ischemic stroke for those who have + been taking aspirin are 0.76 times that of those + who have received the placebo. + + To make statistical inferences about the population under study, + we can compute the 95% confidence interval for the odds ratio: + + >>> res.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372) + + The 95% confidence interval for the conditional odds ratio is + approximately (0.62, 0.94). + + The fact that the entire 95% confidence interval falls below 1 supports + the authors' conclusion that the aspirin was associated with a + statistically significant reduction in ischemic stroke. + """ + if kind not in ['conditional', 'sample']: + raise ValueError("`kind` must be 'conditional' or 'sample'.") + + c = np.asarray(table) + + if c.shape != (2, 2): + raise ValueError(f"Invalid shape {c.shape}. The input `table` must be " + "of shape (2, 2).") + + if not np.issubdtype(c.dtype, np.integer): + raise ValueError("`table` must be an array of integers, but got " + f"type {c.dtype}") + c = c.astype(np.int64) + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is NaN and + # the odds ratio is NaN. + result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan) + return result + + if kind == 'sample': + oddsratio = _sample_odds_ratio(c) + else: # kind is 'conditional' + oddsratio = _conditional_oddsratio(c) + + result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmvnt.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmvnt.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e9c5ebb3cba91e0bfa5e600a1a04d2459280ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmvnt.py @@ -0,0 +1,533 @@ +# Integration of multivariate normal and t distributions. + +# Adapted from the MATLAB original implementations by Dr. Alan Genz. + +# http://www.math.wsu.edu/faculty/genz/software/software.html + +# Copyright (C) 2013, Alan Genz, All rights reserved. +# Python implementation is copyright (C) 2022, Robert Kern, All rights +# reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided the following conditions are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# 3. The contributor name(s) may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import numpy as np + +from scipy.fft import fft, ifft +from scipy.special import gammaincinv, ndtr, ndtri +from scipy.stats._qmc import primes_from_2_to + + +phi = ndtr +phinv = ndtri + + +def _factorize_int(n): + """Return a sorted list of the unique prime factors of a positive integer. + """ + # NOTE: There are lots faster ways to do this, but this isn't terrible. + factors = set() + for p in primes_from_2_to(int(np.sqrt(n)) + 1): + while not (n % p): + factors.add(p) + n //= p + if n == 1: + break + if n != 1: + factors.add(n) + return sorted(factors) + + +def _primitive_root(p): + """Compute a primitive root of the prime number `p`. + + Used in the CBC lattice construction. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Primitive_root_modulo_n + """ + # p is prime + pm = p - 1 + factors = _factorize_int(pm) + n = len(factors) + r = 2 + k = 0 + while k < n: + d = pm // factors[k] + # pow() doesn't like numpy scalar types. + rd = pow(int(r), int(d), int(p)) + if rd == 1: + r += 1 + k = 0 + else: + k += 1 + return r + + +def _cbc_lattice(n_dim, n_qmc_samples): + """Compute a QMC lattice generator using a Fast CBC construction. + + Parameters + ---------- + n_dim : int > 0 + The number of dimensions for the lattice. + n_qmc_samples : int > 0 + The desired number of QMC samples. This will be rounded down to the + nearest prime to enable the CBC construction. + + Returns + ------- + q : float array : shape=(n_dim,) + The lattice generator vector. All values are in the open interval + `(0, 1)`. + actual_n_qmc_samples : int + The prime number of QMC samples that must be used with this lattice, + no more, no less. + + References + ---------- + .. [1] Nuyens, D. and Cools, R. "Fast Component-by-Component Construction, + a Reprise for Different Kernels", In H. Niederreiter and D. Talay, + editors, Monte-Carlo and Quasi-Monte Carlo Methods 2004, + Springer-Verlag, 2006, 371-385. + """ + # Round down to the nearest prime number. + primes = primes_from_2_to(n_qmc_samples + 1) + n_qmc_samples = primes[-1] + + bt = np.ones(n_dim) + gm = np.hstack([1.0, 0.8 ** np.arange(n_dim - 1)]) + q = 1 + w = 0 + z = np.arange(1, n_dim + 1) + m = (n_qmc_samples - 1) // 2 + g = _primitive_root(n_qmc_samples) + # Slightly faster way to compute perm[j] = pow(g, j, n_qmc_samples) + # Shame that we don't have modulo pow() implemented as a ufunc. + perm = np.ones(m, dtype=int) + for j in range(m - 1): + perm[j + 1] = (g * perm[j]) % n_qmc_samples + perm = np.minimum(n_qmc_samples - perm, perm) + pn = perm / n_qmc_samples + c = pn * pn - pn + 1.0 / 6 + fc = fft(c) + for s in range(1, n_dim): + reordered = np.hstack([ + c[:w+1][::-1], + c[w+1:m][::-1], + ]) + q = q * (bt[s-1] + gm[s-1] * reordered) + w = ifft(fc * fft(q)).real.argmin() + z[s] = perm[w] + q = z / n_qmc_samples + return q, n_qmc_samples + + +# Note: this function is not currently used or tested by any SciPy code. It is +# included in this file to facilitate the development of a parameter for users +# to set the desired CDF accuracy, but must be reviewed and tested before use. +def _qauto(func, covar, low, high, rng, error=1e-3, limit=10_000, **kwds): + """Automatically rerun the integration to get the required error bound. + + Parameters + ---------- + func : callable + Either :func:`_qmvn` or :func:`_qmvt`. + covar, low, high : array + As specified in :func:`_qmvn` and :func:`_qmvt`. + rng : Generator, optional + default_rng(), yada, yada + error : float > 0 + The desired error bound. + limit : int > 0: + The rough limit of the number of integration points to consider. The + integration will stop looping once this limit has been *exceeded*. + **kwds : + Other keyword arguments to pass to `func`. When using :func:`_qmvt`, be + sure to include ``nu=`` as one of these. + + Returns + ------- + prob : float + The estimated probability mass within the bounds. + est_error : float + 3 times the standard error of the batch estimates. + n_samples : int + The number of integration points actually used. + """ + n = len(covar) + n_samples = 0 + if n == 1: + prob = phi(high) - phi(low) + # More or less + est_error = 1e-15 + else: + mi = min(limit, n * 1000) + prob = 0.0 + est_error = 1.0 + ei = 0.0 + while est_error > error and n_samples < limit: + mi = round(np.sqrt(2) * mi) + pi, ei, ni = func(mi, covar, low, high, rng=rng, **kwds) + n_samples += ni + wt = 1.0 / (1 + (ei / est_error)**2) + prob += wt * (pi - prob) + est_error = np.sqrt(wt) * ei + return prob, est_error, n_samples + + +# Note: this function is not currently used or tested by any SciPy code. It is +# included in this file to facilitate the resolution of gh-8367, gh-16142, and +# possibly gh-14286, but must be reviewed and tested before use. +def _qmvn(m, covar, low, high, rng, lattice='cbc', n_batches=10): + """Multivariate normal integration over box bounds. + + Parameters + ---------- + m : int > n_batches + The number of points to sample. This number will be divided into + `n_batches` batches that apply random offsets of the sampling lattice + for each batch in order to estimate the error. + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + rng : Generator, optional + default_rng(), yada, yada + lattice : 'cbc' or callable + The type of lattice rule to use to construct the integration points. + n_batches : int > 0, optional + The number of QMC batches to apply. + + Returns + ------- + prob : float + The estimated probability mass within the bounds. + est_error : float + 3 times the standard error of the batch estimates. + """ + cho, lo, hi = _permuted_cholesky(covar, low, high) + n = cho.shape[0] + ct = cho[0, 0] + c = phi(lo[0] / ct) + d = phi(hi[0] / ct) + ci = c + dci = d - ci + prob = 0.0 + error_var = 0.0 + q, n_qmc_samples = _cbc_lattice(n - 1, max(m // n_batches, 1)) + y = np.zeros((n - 1, n_qmc_samples)) + i_samples = np.arange(n_qmc_samples) + 1 + for j in range(n_batches): + c = np.full(n_qmc_samples, ci) + dc = np.full(n_qmc_samples, dci) + pv = dc.copy() + for i in range(1, n): + # Pseudorandomly-shifted lattice coordinate. + z = q[i - 1] * i_samples + rng.random() + # Fast remainder(z, 1.0) + z -= z.astype(int) + # Tent periodization transform. + x = abs(2 * z - 1) + y[i - 1, :] = phinv(c + x * dc) + s = cho[i, :i] @ y[:i, :] + ct = cho[i, i] + c = phi((lo[i] - s) / ct) + d = phi((hi[i] - s) / ct) + dc = d - c + pv = pv * dc + # Accumulate the mean and error variances with online formulations. + d = (pv.mean() - prob) / (j + 1) + prob += d + error_var = (j - 1) * error_var / (j + 1) + d * d + # Error bounds are 3 times the standard error of the estimates. + est_error = 3 * np.sqrt(error_var) + n_samples = n_qmc_samples * n_batches + return prob, est_error, n_samples + + +# Note: this function is not currently used or tested by any SciPy code. It is +# included in this file to facilitate the resolution of gh-8367, gh-16142, and +# possibly gh-14286, but must be reviewed and tested before use. +def _mvn_qmc_integrand(covar, low, high, use_tent=False): + """Transform the multivariate normal integration into a QMC integrand over + a unit hypercube. + + The dimensionality of the resulting hypercube integration domain is one + less than the dimensionality of the original integrand. Note that this + transformation subsumes the integration bounds in order to account for + infinite bounds. The QMC integration one does with the returned integrand + should be on the unit hypercube. + + Parameters + ---------- + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + use_tent : bool, optional + If True, then use tent periodization. Only helpful for lattice rules. + + Returns + ------- + integrand : Callable[[NDArray], NDArray] + The QMC-integrable integrand. It takes an + ``(n_qmc_samples, ndim_integrand)`` array of QMC samples in the unit + hypercube and returns the ``(n_qmc_samples,)`` evaluations of at these + QMC points. + ndim_integrand : int + The dimensionality of the integrand. Equal to ``n-1``. + """ + cho, lo, hi = _permuted_cholesky(covar, low, high) + n = cho.shape[0] + ndim_integrand = n - 1 + ct = cho[0, 0] + c = phi(lo[0] / ct) + d = phi(hi[0] / ct) + ci = c + dci = d - ci + + def integrand(*zs): + ndim_qmc = len(zs) + n_qmc_samples = len(np.atleast_1d(zs[0])) + assert ndim_qmc == ndim_integrand + y = np.zeros((ndim_qmc, n_qmc_samples)) + c = np.full(n_qmc_samples, ci) + dc = np.full(n_qmc_samples, dci) + pv = dc.copy() + for i in range(1, n): + if use_tent: + # Tent periodization transform. + x = abs(2 * zs[i-1] - 1) + else: + x = zs[i-1] + y[i - 1, :] = phinv(c + x * dc) + s = cho[i, :i] @ y[:i, :] + ct = cho[i, i] + c = phi((lo[i] - s) / ct) + d = phi((hi[i] - s) / ct) + dc = d - c + pv = pv * dc + return pv + + return integrand, ndim_integrand + + +def _qmvt(m, nu, covar, low, high, rng, lattice='cbc', n_batches=10): + """Multivariate t integration over box bounds. + + Parameters + ---------- + m : int > n_batches + The number of points to sample. This number will be divided into + `n_batches` batches that apply random offsets of the sampling lattice + for each batch in order to estimate the error. + nu : float >= 0 + The shape parameter of the multivariate t distribution. + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + rng : Generator, optional + default_rng(), yada, yada + lattice : 'cbc' or callable + The type of lattice rule to use to construct the integration points. + n_batches : int > 0, optional + The number of QMC batches to apply. + + Returns + ------- + prob : float + The estimated probability mass within the bounds. + est_error : float + 3 times the standard error of the batch estimates. + n_samples : int + The number of samples actually used. + """ + sn = max(1.0, np.sqrt(nu)) + low = np.asarray(low, dtype=np.float64) + high = np.asarray(high, dtype=np.float64) + cho, lo, hi = _permuted_cholesky(covar, low / sn, high / sn) + n = cho.shape[0] + prob = 0.0 + error_var = 0.0 + q, n_qmc_samples = _cbc_lattice(n, max(m // n_batches, 1)) + i_samples = np.arange(n_qmc_samples) + 1 + for j in range(n_batches): + pv = np.ones(n_qmc_samples) + s = np.zeros((n, n_qmc_samples)) + for i in range(n): + # Pseudorandomly-shifted lattice coordinate. + z = q[i] * i_samples + rng.random() + # Fast remainder(z, 1.0) + z -= z.astype(int) + # Tent periodization transform. + x = abs(2 * z - 1) + # FIXME: Lift the i==0 case out of the loop to make the logic + # easier to follow. + if i == 0: + # We'll use one of the QR variates to pull out the + # t-distribution scaling. + if nu > 0: + r = np.sqrt(2 * gammaincinv(nu / 2, x)) + else: + r = np.ones_like(x) + else: + y = phinv(c + x * dc) # noqa: F821 + with np.errstate(invalid='ignore'): + s[i:, :] += cho[i:, i - 1][:, np.newaxis] * y + si = s[i, :] + + c = np.ones(n_qmc_samples) + d = np.ones(n_qmc_samples) + with np.errstate(invalid='ignore'): + lois = lo[i] * r - si + hiis = hi[i] * r - si + c[lois < -9] = 0.0 + d[hiis < -9] = 0.0 + lo_mask = abs(lois) < 9 + hi_mask = abs(hiis) < 9 + c[lo_mask] = phi(lois[lo_mask]) + d[hi_mask] = phi(hiis[hi_mask]) + + dc = d - c + pv *= dc + + # Accumulate the mean and error variances with online formulations. + d = (pv.mean() - prob) / (j + 1) + prob += d + error_var = (j - 1) * error_var / (j + 1) + d * d + # Error bounds are 3 times the standard error of the estimates. + est_error = 3 * np.sqrt(error_var) + n_samples = n_qmc_samples * n_batches + return prob, est_error, n_samples + + +def _permuted_cholesky(covar, low, high, tol=1e-10): + """Compute a scaled, permuted Cholesky factor, with integration bounds. + + The scaling and permuting of the dimensions accomplishes part of the + transformation of the original integration problem into a more numerically + tractable form. The lower-triangular Cholesky factor will then be used in + the subsequent integration. The integration bounds will be scaled and + permuted as well. + + Parameters + ---------- + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + tol : float, optional + The singularity tolerance. + + Returns + ------- + cho : (n, n) float array + Lower Cholesky factor, scaled and permuted. + new_low, new_high : (n,) float array + The scaled and permuted low and high integration bounds. + """ + # Make copies for outputting. + cho = np.array(covar, dtype=np.float64) + new_lo = np.array(low, dtype=np.float64) + new_hi = np.array(high, dtype=np.float64) + n = cho.shape[0] + if cho.shape != (n, n): + raise ValueError("expected a square symmetric array") + if new_lo.shape != (n,) or new_hi.shape != (n,): + raise ValueError( + "expected integration boundaries the same dimensions " + "as the covariance matrix" + ) + # Scale by the sqrt of the diagonal. + dc = np.sqrt(np.maximum(np.diag(cho), 0.0)) + # But don't divide by 0. + dc[dc == 0.0] = 1.0 + new_lo /= dc + new_hi /= dc + cho /= dc + cho /= dc[:, np.newaxis] + + y = np.zeros(n) + sqtp = np.sqrt(2 * np.pi) + for k in range(n): + epk = (k + 1) * tol + im = k + ck = 0.0 + dem = 1.0 + s = 0.0 + lo_m = 0.0 + hi_m = 0.0 + for i in range(k, n): + if cho[i, i] > tol: + ci = np.sqrt(cho[i, i]) + if i > 0: + s = cho[i, :k] @ y[:k] + lo_i = (new_lo[i] - s) / ci + hi_i = (new_hi[i] - s) / ci + de = phi(hi_i) - phi(lo_i) + if de <= dem: + ck = ci + dem = de + lo_m = lo_i + hi_m = hi_i + im = i + if im > k: + # Swap im and k + cho[im, im] = cho[k, k] + _swap_slices(cho, np.s_[im, :k], np.s_[k, :k]) + _swap_slices(cho, np.s_[im + 1:, im], np.s_[im + 1:, k]) + _swap_slices(cho, np.s_[k + 1:im, k], np.s_[im, k + 1:im]) + _swap_slices(new_lo, k, im) + _swap_slices(new_hi, k, im) + if ck > epk: + cho[k, k] = ck + cho[k, k + 1:] = 0.0 + for i in range(k + 1, n): + cho[i, k] /= ck + cho[i, k + 1:i + 1] -= cho[i, k] * cho[k + 1:i + 1, k] + if abs(dem) > tol: + y[k] = ((np.exp(-lo_m * lo_m / 2) - np.exp(-hi_m * hi_m / 2)) / + (sqtp * dem)) + else: + y[k] = (lo_m + hi_m) / 2 + if lo_m < -10: + y[k] = hi_m + elif hi_m > 10: + y[k] = lo_m + cho[k, :k + 1] /= ck + new_lo[k] /= ck + new_hi[k] /= ck + else: + cho[k:, k] = 0.0 + y[k] = (new_lo[k] + new_hi[k]) / 2 + return cho, new_lo, new_hi + + +def _swap_slices(x, slc1, slc2): + t = x[slc1].copy() + x[slc1] = x[slc2].copy() + x[slc2] = t diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_py.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_py.py new file mode 100644 index 0000000000000000000000000000000000000000..a0bcb13c74b2b1b9bb75dd1ecad59dbab08ac249 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_py.py @@ -0,0 +1,11053 @@ +# Copyright 2002 Gary Strangman. All rights reserved +# Copyright 2002-2016 The SciPy Developers +# +# The original code from Gary Strangman was heavily adapted for +# use in SciPy by Travis Oliphant. The original code came with the +# following disclaimer: +# +# This software is provided "as-is". There are no expressed or implied +# warranties of any kind, including, but not limited to, the warranties +# of merchantability and fitness for a given application. In no event +# shall Gary Strangman be liable for any direct, indirect, incidental, +# special, exemplary or consequential damages (including, but not limited +# to, loss of use, data or profits, or business interruption) however +# caused and on any theory of liability, whether in contract, strict +# liability or tort (including negligence or otherwise) arising in any way +# out of the use of this software, even if advised of the possibility of +# such damage. + +""" +A collection of basic statistical functions for Python. + +References +---------- +.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + +""" +import warnings +import math +from math import gcd +from collections import namedtuple + +import numpy as np +from numpy import array, asarray, ma + +from scipy import sparse +from scipy.spatial.distance import cdist +from scipy.spatial import distance_matrix + +from scipy.ndimage import _measurements +from scipy.optimize import milp, LinearConstraint +from scipy._lib._util import (check_random_state, MapWrapper, _get_nan, + rng_integers, _rename_parameter, _contains_nan, + AxisError) + +import scipy.special as special +from scipy import linalg +from . import distributions +from . import _mstats_basic as mstats_basic +from ._stats_mstats_common import (_find_repeats, linregress, theilslopes, + siegelslopes) +from ._stats import (_kendall_dis, _toint64, _weightedrankedtau, + _local_correlations) +from dataclasses import dataclass, field +from ._hypotests import _all_partitions +from ._stats_pythran import _compute_outer_prob_inside_method +from ._resampling import (MonteCarloMethod, PermutationMethod, BootstrapMethod, + monte_carlo_test, permutation_test, bootstrap, + _batch_generator) +from ._axis_nan_policy import (_axis_nan_policy_factory, + _broadcast_concatenate) +from ._binomtest import _binary_search_for_binom_tst as _binary_search +from scipy._lib._bunch import _make_tuple_bunch +from scipy import stats +from scipy.optimize import root_scalar +from scipy._lib.deprecation import _NoValue, _deprecate_positional_args +from scipy._lib._util import normalize_axis_index + +# In __all__ but deprecated for removal in SciPy 1.13.0 +from scipy._lib._util import float_factorial # noqa: F401 +from scipy.stats._mstats_basic import ( # noqa: F401 + PointbiserialrResult, Ttest_1sampResult, Ttest_relResult +) + + +# Functions/classes in other files should be added in `__init__.py`, not here +__all__ = ['find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar', + 'tmin', 'tmax', 'tstd', 'tsem', 'moment', + 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', + 'normaltest', 'jarque_bera', + 'scoreatpercentile', 'percentileofscore', + 'cumfreq', 'relfreq', 'obrientransform', + 'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd', + 'median_abs_deviation', + 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', + 'f_oneway', 'pearsonr', 'fisher_exact', + 'spearmanr', 'pointbiserialr', + 'kendalltau', 'weightedtau', 'multiscale_graphcorr', + 'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp', + 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', + 'kstest', 'ks_1samp', 'ks_2samp', + 'chisquare', 'power_divergence', + 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', + 'rankdata', 'combine_pvalues', 'quantile_test', + 'wasserstein_distance', 'wasserstein_distance_nd', 'energy_distance', + 'brunnermunzel', 'alexandergovern', + 'expectile'] + + +def _chk_asarray(a, axis): + if axis is None: + a = np.ravel(a) + outaxis = 0 + else: + a = np.asarray(a) + outaxis = axis + + if a.ndim == 0: + a = np.atleast_1d(a) + + return a, outaxis + + +def _chk2_asarray(a, b, axis): + if axis is None: + a = np.ravel(a) + b = np.ravel(b) + outaxis = 0 + else: + a = np.asarray(a) + b = np.asarray(b) + outaxis = axis + + if a.ndim == 0: + a = np.atleast_1d(a) + if b.ndim == 0: + b = np.atleast_1d(b) + + return a, b, outaxis + + +SignificanceResult = _make_tuple_bunch('SignificanceResult', + ['statistic', 'pvalue'], []) + + +# note that `weights` are paired with `x` +@_axis_nan_policy_factory( + lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, + result_to_tuple=lambda x: (x,), kwd_samples=['weights']) +def gmean(a, axis=0, dtype=None, weights=None): + r"""Compute the weighted geometric mean along the specified axis. + + The weighted geometric mean of the array :math:`a_i` associated to weights + :math:`w_i` is: + + .. math:: + + \exp \left( \frac{ \sum_{i=1}^n w_i \ln a_i }{ \sum_{i=1}^n w_i } + \right) \, , + + and, with equal weights, it gives: + + .. math:: + + \sqrt[n]{ \prod_{i=1}^n a_i } \, . + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int or None, optional + Axis along which the geometric mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type to which the input arrays are cast before the calculation is + performed. + weights : array_like, optional + The `weights` array must be broadcastable to the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + + Returns + ------- + gmean : ndarray + See `dtype` parameter above. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.average : Weighted average + hmean : Harmonic mean + + References + ---------- + .. [1] "Weighted Geometric Mean", *Wikipedia*, + https://en.wikipedia.org/wiki/Weighted_geometric_mean. + .. [2] Grossman, J., Grossman, M., Katz, R., "Averages: A New Approach", + Archimedes Foundation, 1983 + + Examples + -------- + >>> from scipy.stats import gmean + >>> gmean([1, 4]) + 2.0 + >>> gmean([1, 2, 3, 4, 5, 6, 7]) + 3.3800151591412964 + >>> gmean([1, 4, 7], weights=[3, 1, 3]) + 2.80668351922014 + + """ + + a = np.asarray(a, dtype=dtype) + + if weights is not None: + weights = np.asarray(weights, dtype=dtype) + + with np.errstate(divide='ignore'): + log_a = np.log(a) + + return np.exp(np.average(log_a, axis=axis, weights=weights)) + + +@_axis_nan_policy_factory( + lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, + result_to_tuple=lambda x: (x,), kwd_samples=['weights']) +def hmean(a, axis=0, dtype=None, *, weights=None): + r"""Calculate the weighted harmonic mean along the specified axis. + + The weighted harmonic mean of the array :math:`a_i` associated to weights + :math:`w_i` is: + + .. math:: + + \frac{ \sum_{i=1}^n w_i }{ \sum_{i=1}^n \frac{w_i}{a_i} } \, , + + and, with equal weights, it gives: + + .. math:: + + \frac{ n }{ \sum_{i=1}^n \frac{1}{a_i} } \, . + + Parameters + ---------- + a : array_like + Input array, masked array or object that can be converted to an array. + axis : int or None, optional + Axis along which the harmonic mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults to the + dtype of `a`, unless `a` has an integer `dtype` with a precision less + than that of the default platform integer. In that case, the default + platform integer is used. + weights : array_like, optional + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given `axis`) or of the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + + .. versionadded:: 1.9 + + Returns + ------- + hmean : ndarray + See `dtype` parameter above. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.average : Weighted average + gmean : Geometric mean + + Notes + ----- + The harmonic mean is computed over a single dimension of the input + array, axis=0 by default, or all values in the array if axis=None. + float64 intermediate and return values are used for integer inputs. + + References + ---------- + .. [1] "Weighted Harmonic Mean", *Wikipedia*, + https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean + .. [2] Ferger, F., "The nature and use of the harmonic mean", Journal of + the American Statistical Association, vol. 26, pp. 36-40, 1931 + + Examples + -------- + >>> from scipy.stats import hmean + >>> hmean([1, 4]) + 1.6000000000000001 + >>> hmean([1, 2, 3, 4, 5, 6, 7]) + 2.6997245179063363 + >>> hmean([1, 4, 7], weights=[3, 1, 3]) + 1.9029126213592233 + + """ + if not isinstance(a, np.ndarray): + a = np.array(a, dtype=dtype) + elif dtype: + # Must change the default dtype allowing array type + if isinstance(a, np.ma.MaskedArray): + a = np.ma.asarray(a, dtype=dtype) + else: + a = np.asarray(a, dtype=dtype) + + if np.all(a >= 0): + # Harmonic mean only defined if greater than or equal to zero. + if weights is not None: + weights = np.asanyarray(weights, dtype=dtype) + + with np.errstate(divide='ignore'): + return 1.0 / np.average(1.0 / a, axis=axis, weights=weights) + else: + raise ValueError("Harmonic mean only defined if all elements greater " + "than or equal to zero") + + +@_axis_nan_policy_factory( + lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, + result_to_tuple=lambda x: (x,), kwd_samples=['weights']) +def pmean(a, p, *, axis=0, dtype=None, weights=None): + r"""Calculate the weighted power mean along the specified axis. + + The weighted power mean of the array :math:`a_i` associated to weights + :math:`w_i` is: + + .. math:: + + \left( \frac{ \sum_{i=1}^n w_i a_i^p }{ \sum_{i=1}^n w_i } + \right)^{ 1 / p } \, , + + and, with equal weights, it gives: + + .. math:: + + \left( \frac{ 1 }{ n } \sum_{i=1}^n a_i^p \right)^{ 1 / p } \, . + + When ``p=0``, it returns the geometric mean. + + This mean is also called generalized mean or Hölder mean, and must not be + confused with the Kolmogorov generalized mean, also called + quasi-arithmetic mean or generalized f-mean [3]_. + + Parameters + ---------- + a : array_like + Input array, masked array or object that can be converted to an array. + p : int or float + Exponent. + axis : int or None, optional + Axis along which the power mean is computed. Default is 0. + If None, compute over the whole array `a`. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults to the + dtype of `a`, unless `a` has an integer `dtype` with a precision less + than that of the default platform integer. In that case, the default + platform integer is used. + weights : array_like, optional + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given `axis`) or of the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + + Returns + ------- + pmean : ndarray, see `dtype` parameter above. + Output array containing the power mean values. + + See Also + -------- + numpy.average : Weighted average + gmean : Geometric mean + hmean : Harmonic mean + + Notes + ----- + The power mean is computed over a single dimension of the input + array, ``axis=0`` by default, or all values in the array if ``axis=None``. + float64 intermediate and return values are used for integer inputs. + + .. versionadded:: 1.9 + + References + ---------- + .. [1] "Generalized Mean", *Wikipedia*, + https://en.wikipedia.org/wiki/Generalized_mean + .. [2] Norris, N., "Convexity properties of generalized mean value + functions", The Annals of Mathematical Statistics, vol. 8, + pp. 118-120, 1937 + .. [3] Bullen, P.S., Handbook of Means and Their Inequalities, 2003 + + Examples + -------- + >>> from scipy.stats import pmean, hmean, gmean + >>> pmean([1, 4], 1.3) + 2.639372938300652 + >>> pmean([1, 2, 3, 4, 5, 6, 7], 1.3) + 4.157111214492084 + >>> pmean([1, 4, 7], -2, weights=[3, 1, 3]) + 1.4969684896631954 + + For p=-1, power mean is equal to harmonic mean: + + >>> pmean([1, 4, 7], -1, weights=[3, 1, 3]) + 1.9029126213592233 + >>> hmean([1, 4, 7], weights=[3, 1, 3]) + 1.9029126213592233 + + For p=0, power mean is defined as the geometric mean: + + >>> pmean([1, 4, 7], 0, weights=[3, 1, 3]) + 2.80668351922014 + >>> gmean([1, 4, 7], weights=[3, 1, 3]) + 2.80668351922014 + + """ + if not isinstance(p, (int, float)): + raise ValueError("Power mean only defined for exponent of type int or " + "float.") + if p == 0: + return gmean(a, axis=axis, dtype=dtype, weights=weights) + + if not isinstance(a, np.ndarray): + a = np.array(a, dtype=dtype) + elif dtype: + # Must change the default dtype allowing array type + if isinstance(a, np.ma.MaskedArray): + a = np.ma.asarray(a, dtype=dtype) + else: + a = np.asarray(a, dtype=dtype) + + if np.all(a >= 0): + # Power mean only defined if greater than or equal to zero + if weights is not None: + weights = np.asanyarray(weights, dtype=dtype) + + with np.errstate(divide='ignore'): + return np.float_power( + np.average(np.float_power(a, p), axis=axis, weights=weights), + 1/p) + else: + raise ValueError("Power mean only defined if all elements greater " + "than or equal to zero") + + +ModeResult = namedtuple('ModeResult', ('mode', 'count')) + + +def _mode_result(mode, count): + # When a slice is empty, `_axis_nan_policy` automatically produces + # NaN for `mode` and `count`. This is a reasonable convention for `mode`, + # but `count` should not be NaN; it should be zero. + i = np.isnan(count) + if i.shape == (): + count = count.dtype(0) if i else count + else: + count[i] = 0 + return ModeResult(mode, count) + + +@_axis_nan_policy_factory(_mode_result, override={'vectorization': True, + 'nan_propagation': False}) +def mode(a, axis=0, nan_policy='propagate', keepdims=False): + r"""Return an array of the modal (most common) value in the passed array. + + If there is more than one such value, only one is returned. + The bin-count for the modal bins is also returned. + + Parameters + ---------- + a : array_like + Numeric, n-dimensional array of which to find mode(s). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': treats nan as it would treat any other value + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + keepdims : bool, optional + If set to ``False``, the `axis` over which the statistic is taken + is consumed (eliminated from the output array). If set to ``True``, + the `axis` is retained with size one, and the result will broadcast + correctly against the input array. + + Returns + ------- + mode : ndarray + Array of modal values. + count : ndarray + Array of counts for each mode. + + Notes + ----- + The mode is calculated using `numpy.unique`. + In NumPy versions 1.21 and after, all NaNs - even those with different + binary representations - are treated as equivalent and counted as separate + instances of the same value. + + By convention, the mode of an empty array is NaN, and the associated count + is zero. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[3, 0, 3, 7], + ... [3, 2, 6, 2], + ... [1, 7, 2, 8], + ... [3, 0, 6, 1], + ... [3, 2, 5, 5]]) + >>> from scipy import stats + >>> stats.mode(a, keepdims=True) + ModeResult(mode=array([[3, 0, 6, 1]]), count=array([[4, 2, 2, 1]])) + + To get mode of whole array, specify ``axis=None``: + + >>> stats.mode(a, axis=None, keepdims=True) + ModeResult(mode=[[3]], count=[[5]]) + >>> stats.mode(a, axis=None, keepdims=False) + ModeResult(mode=3, count=5) + + """ + # `axis`, `nan_policy`, and `keepdims` are handled by `_axis_nan_policy` + if not np.issubdtype(a.dtype, np.number): + message = ("Argument `a` is not recognized as numeric. " + "Support for input that cannot be coerced to a numeric " + "array was deprecated in SciPy 1.9.0 and removed in SciPy " + "1.11.0. Please consider `np.unique`.") + raise TypeError(message) + + if a.size == 0: + NaN = _get_nan(a) + return ModeResult(*np.array([NaN, 0], dtype=NaN.dtype)) + + vals, cnts = np.unique(a, return_counts=True) + modes, counts = vals[cnts.argmax()], cnts.max() + return ModeResult(modes[()], counts[()]) + + +def _put_nan_to_limits(a, limits, inclusive): + """Put NaNs in an array for values outside of given limits. + + This is primarily a utility function. + + Parameters + ---------- + a : array + limits : (float or None, float or None) + A tuple consisting of the (lower limit, upper limit). Values in the + input array less than the lower limit or greater than the upper limit + will be replaced with `np.nan`. None implies no limit. + inclusive : (bool, bool) + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to lower or upper are allowed. + + """ + if limits is None: + return a + mask = np.full_like(a, False, dtype=np.bool_) + lower_limit, upper_limit = limits + lower_include, upper_include = inclusive + if lower_limit is not None: + mask |= (a < lower_limit) if lower_include else a <= lower_limit + if upper_limit is not None: + mask |= (a > upper_limit) if upper_include else a >= upper_limit + if np.all(mask): + raise ValueError("No array values within given limits") + if np.any(mask): + a = a.copy() if np.issubdtype(a.dtype, np.inexact) else a.astype(np.float64) + a[mask] = np.nan + return a + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def tmean(a, limits=None, inclusive=(True, True), axis=None): + """Compute the trimmed mean. + + This function finds the arithmetic mean of given values, ignoring values + outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None (default), then all + values are used. Either of the limit values in the tuple can also be + None representing a half-open interval. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to compute test. Default is None. + + Returns + ------- + tmean : ndarray + Trimmed mean. + + See Also + -------- + trim_mean : Returns mean after trimming a proportion from both tails. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmean(x) + 9.5 + >>> stats.tmean(x, (3,17)) + 10.0 + + """ + a = _put_nan_to_limits(a, limits, inclusive) + return np.nanmean(a, axis=axis) + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """Compute the trimmed variance. + + This function computes the sample variance of an array of values, + while ignoring values which are outside of given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tvar : float + Trimmed variance. + + Notes + ----- + `tvar` computes the unbiased sample variance, i.e. it uses a correction + factor ``n / (n - 1)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tvar(x) + 35.0 + >>> stats.tvar(x, (3,17)) + 20.0 + + """ + a = _put_nan_to_limits(a, limits, inclusive) + return np.nanvar(a, ddof=ddof, axis=axis) + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'): + """Compute the trimmed minimum. + + This function finds the minimum value of an array `a` along the + specified axis, but only considering values greater than a specified + lower limit. + + Parameters + ---------- + a : array_like + Array of values. + lowerlimit : None or float, optional + Values in the input array less than the given limit will be ignored. + When lowerlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the lower limit + are included. The default value is True. + + Returns + ------- + tmin : float, int or ndarray + Trimmed minimum. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmin(x) + 0 + + >>> stats.tmin(x, 13) + 13 + + >>> stats.tmin(x, 13, inclusive=False) + 14 + + """ + dtype = a.dtype + a = _put_nan_to_limits(a, (lowerlimit, None), (inclusive, None)) + res = np.nanmin(a, axis=axis) + if not np.any(np.isnan(res)): + # needed if input is of integer dtype + return res.astype(dtype, copy=False) + return res + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'): + """Compute the trimmed maximum. + + This function computes the maximum value of an array along a given axis, + while ignoring values larger than a specified upper limit. + + Parameters + ---------- + a : array_like + Array of values. + upperlimit : None or float, optional + Values in the input array greater than the given limit will be ignored. + When upperlimit is None, then all values are used. The default value + is None. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + inclusive : {True, False}, optional + This flag determines whether values exactly equal to the upper limit + are included. The default value is True. + + Returns + ------- + tmax : float, int or ndarray + Trimmed maximum. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tmax(x) + 19 + + >>> stats.tmax(x, 13) + 13 + + >>> stats.tmax(x, 13, inclusive=False) + 12 + + """ + dtype = a.dtype + a = _put_nan_to_limits(a, (None, upperlimit), (None, inclusive)) + res = np.nanmax(a, axis=axis) + if not np.any(np.isnan(res)): + # needed if input is of integer dtype + return res.astype(dtype, copy=False) + return res + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """Compute the trimmed sample standard deviation. + + This function finds the sample standard deviation of given values, + ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tstd : float + Trimmed sample standard deviation. + + Notes + ----- + `tstd` computes the unbiased sample standard deviation, i.e. it uses a + correction factor ``n / (n - 1)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tstd(x) + 5.9160797830996161 + >>> stats.tstd(x, (3,17)) + 4.4721359549995796 + + """ + return np.sqrt(tvar(a, limits, inclusive, axis, ddof, _no_deco=True)) + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,) +) +def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): + """Compute the trimmed standard error of the mean. + + This function finds the standard error of the mean for given + values, ignoring values outside the given `limits`. + + Parameters + ---------- + a : array_like + Array of values. + limits : None or (lower limit, upper limit), optional + Values in the input array less than the lower limit or greater than the + upper limit will be ignored. When limits is None, then all values are + used. Either of the limit values in the tuple can also be None + representing a half-open interval. The default value is None. + inclusive : (bool, bool), optional + A tuple consisting of the (lower flag, upper flag). These flags + determine whether values exactly equal to the lower or upper limits + are included. The default value is (True, True). + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over the + whole array `a`. + ddof : int, optional + Delta degrees of freedom. Default is 1. + + Returns + ------- + tsem : float + Trimmed standard error of the mean. + + Notes + ----- + `tsem` uses unbiased sample standard deviation, i.e. it uses a + correction factor ``n / (n - 1)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = np.arange(20) + >>> stats.tsem(x) + 1.3228756555322954 + >>> stats.tsem(x, (3,17)) + 1.1547005383792515 + + """ + a = _put_nan_to_limits(a, limits, inclusive) + sd = np.sqrt(np.nanvar(a, ddof=ddof, axis=axis)) + n_obs = (~np.isnan(a)).sum(axis=axis) + return sd / np.sqrt(n_obs, dtype=sd.dtype) + + +##################################### +# MOMENTS # +##################################### + + +def _moment_outputs(kwds): + moment = np.atleast_1d(kwds.get('order', 1)) + if moment.size == 0: + raise ValueError("'order' must be a scalar or a non-empty 1D " + "list/array.") + return len(moment) + + +def _moment_result_object(*args): + if len(args) == 1: + return args[0] + return np.asarray(args) + +# `moment` fits into the `_axis_nan_policy` pattern, but it is a bit unusual +# because the number of outputs is variable. Specifically, +# `result_to_tuple=lambda x: (x,)` may be surprising for a function that +# can produce more than one output, but it is intended here. +# When `moment is called to produce the output: +# - `result_to_tuple` packs the returned array into a single-element tuple, +# - `_moment_result_object` extracts and returns that single element. +# However, when the input array is empty, `moment` is never called. Instead, +# - `_check_empty_inputs` is used to produce an empty array with the +# appropriate dimensions. +# - A list comprehension creates the appropriate number of copies of this +# array, depending on `n_outputs`. +# - This list - which may have multiple elements - is passed into +# `_moment_result_object`. +# - If there is a single output, `_moment_result_object` extracts and returns +# the single output from the list. +# - If there are multiple outputs, and therefore multiple elements in the list, +# `_moment_result_object` converts the list of arrays to a single array and +# returns it. +# Currently this leads to a slight inconsistency: when the input array is +# empty, there is no distinction between the `moment` function being called +# with parameter `order=1` and `order=[1]`; the latter *should* produce +# the same as the former but with a singleton zeroth dimension. +@_rename_parameter('moment', 'order') +@_axis_nan_policy_factory( # noqa: E302 + _moment_result_object, n_samples=1, result_to_tuple=lambda x: (x,), + n_outputs=_moment_outputs +) +def moment(a, order=1, axis=0, nan_policy='propagate', *, center=None): + r"""Calculate the nth moment about the mean for a sample. + + A moment is a specific quantitative measure of the shape of a set of + points. It is often used to calculate coefficients of skewness and kurtosis + due to its close relationship with them. + + Parameters + ---------- + a : array_like + Input array. + order : int or array_like of ints, optional + Order of central moment that is returned. Default is 1. + axis : int or None, optional + Axis along which the central moment is computed. Default is 0. + If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + center : float or None, optional + The point about which moments are taken. This can be the sample mean, + the origin, or any other be point. If `None` (default) compute the + center as the sample mean. + + Returns + ------- + n-th moment about the `center` : ndarray or float + The appropriate moment along the given axis or over all values if axis + is None. The denominator for the moment calculation is the number of + observations, no degrees of freedom correction is done. + + See Also + -------- + kurtosis, skew, describe + + Notes + ----- + The k-th moment of a data sample is: + + .. math:: + + m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - c)^k + + Where `n` is the number of samples, and `c` is the center around which the + moment is calculated. This function uses exponentiation by squares [1]_ for + efficiency. + + Note that, if `a` is an empty array (``a.size == 0``), array `moment` with + one element (`moment.size == 1`) is treated the same as scalar `moment` + (``np.isscalar(moment)``). This might produce arrays of unexpected shape. + + References + ---------- + .. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms + + Examples + -------- + >>> from scipy.stats import moment + >>> moment([1, 2, 3, 4, 5], order=1) + 0.0 + >>> moment([1, 2, 3, 4, 5], order=2) + 2.0 + + """ + moment = order # parameter was renamed + a, axis = _chk_asarray(a, axis) + + # for array_like moment input, return a value for each. + if not np.isscalar(moment): + # Calculated the mean once at most, and only if it will be used + calculate_mean = center is None and np.any(np.asarray(moment) > 1) + mean = a.mean(axis, keepdims=True) if calculate_mean else None + mmnt = [] + for i in moment: + if center is None and i > 1: + mmnt.append(_moment(a, i, axis, mean=mean)) + else: + mmnt.append(_moment(a, i, axis, mean=center)) + return np.array(mmnt) + else: + return _moment(a, moment, axis, mean=center) + + +# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True) +def _moment(a, moment, axis, *, mean=None): + if np.abs(moment - np.round(moment)) > 0: + raise ValueError("All moment parameters must be integers") + + # moment of empty array is the same regardless of order + if a.size == 0: + return np.mean(a, axis=axis) + + dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64 + + if moment == 0 or (moment == 1 and mean is None): + # By definition the zeroth moment is always 1, and the first *central* + # moment is 0. + shape = list(a.shape) + del shape[axis] + + if len(shape) == 0: + return dtype(1.0 if moment == 0 else 0.0) + else: + return (np.ones(shape, dtype=dtype) if moment == 0 + else np.zeros(shape, dtype=dtype)) + else: + # Exponentiation by squares: form exponent sequence + n_list = [moment] + current_n = moment + while current_n > 2: + if current_n % 2: + current_n = (current_n - 1) / 2 + else: + current_n /= 2 + n_list.append(current_n) + + # Starting point for exponentiation by squares + mean = (a.mean(axis, keepdims=True) if mean is None + else np.asarray(mean, dtype=dtype)[()]) + a_zero_mean = a - mean + + eps = np.finfo(a_zero_mean.dtype).resolution * 10 + with np.errstate(divide='ignore', invalid='ignore'): + rel_diff = np.max(np.abs(a_zero_mean), axis=axis, + keepdims=True) / np.abs(mean) + with np.errstate(invalid='ignore'): + precision_loss = np.any(rel_diff < eps) + n = a.shape[axis] if axis is not None else a.size + if precision_loss and n > 1: + message = ("Precision loss occurred in moment calculation due to " + "catastrophic cancellation. This occurs when the data " + "are nearly identical. Results may be unreliable.") + warnings.warn(message, RuntimeWarning, stacklevel=4) + + if n_list[-1] == 1: + s = a_zero_mean.copy() + else: + s = a_zero_mean**2 + + # Perform multiplications + for n in n_list[-2::-1]: + s = s**2 + if n % 2: + s *= a_zero_mean + return np.mean(s, axis) + + +def _var(x, axis=0, ddof=0, mean=None): + # Calculate variance of sample, warning if precision is lost + var = _moment(x, 2, axis, mean=mean) + if ddof != 0: + n = x.shape[axis] if axis is not None else x.size + var *= np.divide(n, n-ddof) # to avoid error on division by zero + return var + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1 +) +def skew(a, axis=0, bias=True, nan_policy='propagate'): + r"""Compute the sample skewness of a data set. + + For normally distributed data, the skewness should be about zero. For + unimodal continuous distributions, a skewness value greater than zero means + that there is more weight in the right tail of the distribution. The + function `skewtest` can be used to determine if the skewness value + is close enough to zero, statistically speaking. + + Parameters + ---------- + a : ndarray + Input array. + axis : int or None, optional + Axis along which skewness is calculated. Default is 0. + If None, compute over the whole array `a`. + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + skewness : ndarray + The skewness of values along an axis, returning NaN where all values + are equal. + + Notes + ----- + The sample skewness is computed as the Fisher-Pearson coefficient + of skewness, i.e. + + .. math:: + + g_1=\frac{m_3}{m_2^{3/2}} + + where + + .. math:: + + m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i + + is the biased sample :math:`i\texttt{th}` central moment, and + :math:`\bar{x}` is + the sample mean. If ``bias`` is False, the calculations are + corrected for bias and the value computed is the adjusted + Fisher-Pearson standardized moment coefficient, i.e. + + .. math:: + + G_1=\frac{k_3}{k_2^{3/2}}= + \frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + Section 2.2.24.1 + + Examples + -------- + >>> from scipy.stats import skew + >>> skew([1, 2, 3, 4, 5]) + 0.0 + >>> skew([2, 8, 0, 4, 1, 9, 9, 0]) + 0.2650554122698573 + + """ + a, axis = _chk_asarray(a, axis) + n = a.shape[axis] + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.skew(a, axis, bias) + + mean = a.mean(axis, keepdims=True) + m2 = _moment(a, 2, axis, mean=mean) + m3 = _moment(a, 3, axis, mean=mean) + with np.errstate(all='ignore'): + zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2) + vals = np.where(zero, np.nan, m3 / m2**1.5) + if not bias: + can_correct = ~zero & (n > 2) + if can_correct.any(): + m2 = np.extract(can_correct, m2) + m3 = np.extract(can_correct, m3) + nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5 + np.place(vals, can_correct, nval) + + return vals[()] + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1 +) +def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'): + """Compute the kurtosis (Fisher or Pearson) of a dataset. + + Kurtosis is the fourth central moment divided by the square of the + variance. If Fisher's definition is used, then 3.0 is subtracted from + the result to give 0.0 for a normal distribution. + + If bias is False then the kurtosis is calculated using k statistics to + eliminate bias coming from biased moment estimators + + Use `kurtosistest` to see if result is close enough to normal. + + Parameters + ---------- + a : array + Data for which the kurtosis is calculated. + axis : int or None, optional + Axis along which the kurtosis is calculated. Default is 0. + If None, compute over the whole array `a`. + fisher : bool, optional + If True, Fisher's definition is used (normal ==> 0.0). If False, + Pearson's definition is used (normal ==> 3.0). + bias : bool, optional + If False, then the calculations are corrected for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + kurtosis : array + The kurtosis of values along an axis, returning NaN where all values + are equal. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + + Examples + -------- + In Fisher's definition, the kurtosis of the normal distribution is zero. + In the following example, the kurtosis is close to zero, because it was + calculated from the dataset, not from the continuous distribution. + + >>> import numpy as np + >>> from scipy.stats import norm, kurtosis + >>> data = norm.rvs(size=1000, random_state=3) + >>> kurtosis(data) + -0.06928694200380558 + + The distribution with a higher kurtosis has a heavier tail. + The zero valued kurtosis of the normal distribution in Fisher's definition + can serve as a reference point. + + >>> import matplotlib.pyplot as plt + >>> import scipy.stats as stats + >>> from scipy.stats import kurtosis + + >>> x = np.linspace(-5, 5, 100) + >>> ax = plt.subplot() + >>> distnames = ['laplace', 'norm', 'uniform'] + + >>> for distname in distnames: + ... if distname == 'uniform': + ... dist = getattr(stats, distname)(loc=-2, scale=4) + ... else: + ... dist = getattr(stats, distname) + ... data = dist.rvs(size=1000) + ... kur = kurtosis(data, fisher=True) + ... y = dist.pdf(x) + ... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3))) + ... ax.legend() + + The Laplace distribution has a heavier tail than the normal distribution. + The uniform distribution (which has negative kurtosis) has the thinnest + tail. + + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.kurtosis(a, axis, fisher, bias) + + n = a.shape[axis] + mean = a.mean(axis, keepdims=True) + m2 = _moment(a, 2, axis, mean=mean) + m4 = _moment(a, 4, axis, mean=mean) + with np.errstate(all='ignore'): + zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2) + vals = np.where(zero, np.nan, m4 / m2**2.0) + + if not bias: + can_correct = ~zero & (n > 3) + if can_correct.any(): + m2 = np.extract(can_correct, m2) + m4 = np.extract(can_correct, m4) + nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0) + np.place(vals, can_correct, nval + 3.0) + + return vals[()] - 3 if fisher else vals[()] + + +DescribeResult = namedtuple('DescribeResult', + ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis')) + + +def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'): + """Compute several descriptive statistics of the passed array. + + Parameters + ---------- + a : array_like + Input data. + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + ddof : int, optional + Delta degrees of freedom (only for variance). Default is 1. + bias : bool, optional + If False, then the skewness and kurtosis calculations are corrected + for statistical bias. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + nobs : int or ndarray of ints + Number of observations (length of data along `axis`). + When 'omit' is chosen as nan_policy, the length along each axis + slice is counted separately. + minmax: tuple of ndarrays or floats + Minimum and maximum value of `a` along the given axis. + mean : ndarray or float + Arithmetic mean of `a` along the given axis. + variance : ndarray or float + Unbiased variance of `a` along the given axis; denominator is number + of observations minus one. + skewness : ndarray or float + Skewness of `a` along the given axis, based on moment calculations + with denominator equal to the number of observations, i.e. no degrees + of freedom correction. + kurtosis : ndarray or float + Kurtosis (Fisher) of `a` along the given axis. The kurtosis is + normalized so that it is zero for the normal distribution. No + degrees of freedom are used. + + See Also + -------- + skew, kurtosis + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(10) + >>> stats.describe(a) + DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, + variance=9.166666666666666, skewness=0.0, + kurtosis=-1.2242424242424244) + >>> b = [[1, 2], [3, 4]] + >>> stats.describe(b) + DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])), + mean=array([2., 3.]), variance=array([2., 2.]), + skewness=array([0., 0.]), kurtosis=array([-2., -2.])) + + """ + a, axis = _chk_asarray(a, axis) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + a = ma.masked_invalid(a) + return mstats_basic.describe(a, axis, ddof, bias) + + if a.size == 0: + raise ValueError("The input must not be empty.") + n = a.shape[axis] + mm = (np.min(a, axis=axis), np.max(a, axis=axis)) + m = np.mean(a, axis=axis) + v = _var(a, axis=axis, ddof=ddof) + sk = skew(a, axis, bias=bias) + kurt = kurtosis(a, axis, bias=bias) + + return DescribeResult(n, mm, m, v, sk, kurt) + +##################################### +# NORMALITY TESTS # +##################################### + + +def _get_pvalue(statistic, distribution, alternative, symmetric=True): + """Get p-value given the statistic, (continuous) distribution, and alternative""" + + if alternative == 'less': + pvalue = distribution.cdf(statistic) + elif alternative == 'greater': + pvalue = distribution.sf(statistic) + elif alternative == 'two-sided': + pvalue = 2 * (distribution.sf(np.abs(statistic)) if symmetric + else np.minimum(distribution.cdf(statistic), + distribution.sf(statistic))) + else: + message = "`alternative` must be 'less', 'greater', or 'two-sided'." + raise ValueError(message) + + return pvalue + + +SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(SkewtestResult, n_samples=1, too_small=7) +def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'): + r"""Test whether the skew is different from the normal distribution. + + This function tests the null hypothesis that the skewness of + the population that the sample was drawn from is the same + as that of a corresponding normal distribution. + + Parameters + ---------- + a : array + The data to be tested. + axis : int or None, optional + Axis along which statistics are calculated. Default is 0. + If None, compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the skewness of the distribution underlying the sample + is different from that of the normal distribution (i.e. 0) + * 'less': the skewness of the distribution underlying the sample + is less than that of the normal distribution + * 'greater': the skewness of the distribution underlying the sample + is greater than that of the normal distribution + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + The p-value for the hypothesis test. + + Notes + ----- + The sample size must be at least 8. + + References + ---------- + .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr., + "A suggestion for using powerful and informative tests of + normality", American Statistician 44, pp. 316-321, 1990. + .. [2] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test + for normality (complete samples). Biometrika, 52(3/4), 591-611. + .. [3] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + + Examples + -------- + Suppose we wish to infer from measurements whether the weights of adult + human males in a medical study are not normally distributed [2]_. + The weights (lbs) are recorded in the array ``x`` below. + + >>> import numpy as np + >>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236]) + + The skewness test from [1]_ begins by computing a statistic based on the + sample skewness. + + >>> from scipy import stats + >>> res = stats.skewtest(x) + >>> res.statistic + 2.7788579769903414 + + Because normal distributions have zero skewness, the magnitude of this + statistic tends to be low for samples drawn from a normal distribution. + + The test is performed by comparing the observed value of the + statistic against the null distribution: the distribution of statistic + values derived under the null hypothesis that the weights were drawn from + a normal distribution. + + For this test, the null distribution of the statistic for very large + samples is the standard normal distribution. + + >>> import matplotlib.pyplot as plt + >>> dist = stats.norm() + >>> st_val = np.linspace(-5, 5, 100) + >>> pdf = dist.pdf(st_val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def st_plot(ax): # we'll reuse this + ... ax.plot(st_val, pdf) + ... ax.set_title("Skew Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> st_plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution as extreme or more extreme than the observed + value of the statistic. In a two-sided test, elements of the null + distribution greater than the observed statistic and elements of the null + distribution less than the negative of the observed statistic are both + considered "more extreme". + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> st_plot(ax) + >>> pvalue = dist.cdf(-res.statistic) + dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (3, 0.005), (3.25, 0.02), arrowprops=props) + >>> i = st_val >= res.statistic + >>> ax.fill_between(st_val[i], y1=0, y2=pdf[i], color='C0') + >>> i = st_val <= -res.statistic + >>> ax.fill_between(st_val[i], y1=0, y2=pdf[i], color='C0') + >>> ax.set_xlim(-5, 5) + >>> ax.set_ylim(0, 0.1) + >>> plt.show() + >>> res.pvalue + 0.005455036974740185 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from a normally distributed population that produces such an + extreme value of the statistic - this may be taken as evidence against + the null hypothesis in favor of the alternative: the weights were not + drawn from a normal distribution. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [3]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + + Note that the standard normal distribution provides an asymptotic + approximation of the null distribution; it is only accurate for samples + with many observations. For small samples like ours, + `scipy.stats.monte_carlo_test` may provide a more accurate, albeit + stochastic, approximation of the exact p-value. + + >>> def statistic(x, axis): + ... # get just the skewtest statistic; ignore the p-value + ... return stats.skewtest(x, axis=axis).statistic + >>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> st_plot(ax) + >>> ax.hist(res.null_distribution, np.linspace(-5, 5, 50), + ... density=True) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'Monte Carlo approximation\n(11 observations)']) + >>> plt.show() + >>> res.pvalue + 0.0062 # may vary + + In this case, the asymptotic approximation and Monte Carlo approximation + agree fairly closely, even for our small sample. + + """ + b2 = skew(a, axis) + n = a.shape[axis] + if n < 8: + raise ValueError( + "skewtest is not valid with less than 8 samples; %i samples" + " were given." % int(n)) + y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2))) + beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) / + ((n-2.0) * (n+5) * (n+7) * (n+9))) + W2 = -1 + math.sqrt(2 * (beta2 - 1)) + delta = 1 / math.sqrt(0.5 * math.log(W2)) + alpha = math.sqrt(2.0 / (W2 - 1)) + y = np.where(y == 0, 1, y) + Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1)) + + pvalue = _get_pvalue(Z, distributions.norm, alternative) + return SkewtestResult(Z[()], pvalue[()]) + + +KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(KurtosistestResult, n_samples=1, too_small=4) +def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'): + r"""Test whether a dataset has normal kurtosis. + + This function tests the null hypothesis that the kurtosis + of the population from which the sample was drawn is that + of the normal distribution. + + Parameters + ---------- + a : array + Array of the sample data. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the kurtosis of the distribution underlying the sample + is different from that of the normal distribution + * 'less': the kurtosis of the distribution underlying the sample + is less than that of the normal distribution + * 'greater': the kurtosis of the distribution underlying the sample + is greater than that of the normal distribution + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The computed z-score for this test. + pvalue : float + The p-value for the hypothesis test. + + Notes + ----- + Valid only for n>20. This function uses the method described in [1]_. + + References + ---------- + .. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis + statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983. + .. [2] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test + for normality (complete samples). Biometrika, 52(3/4), 591-611. + .. [3] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [4] Panagiotakos, D. B. (2008). The value of p-value in biomedical + research. The open cardiovascular medicine journal, 2, 97. + + Examples + -------- + Suppose we wish to infer from measurements whether the weights of adult + human males in a medical study are not normally distributed [2]_. + The weights (lbs) are recorded in the array ``x`` below. + + >>> import numpy as np + >>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236]) + + The kurtosis test from [1]_ begins by computing a statistic based on the + sample (excess/Fisher) kurtosis. + + >>> from scipy import stats + >>> res = stats.kurtosistest(x) + >>> res.statistic + 2.3048235214240873 + + (The test warns that our sample has too few observations to perform the + test. We'll return to this at the end of the example.) + Because normal distributions have zero excess kurtosis (by definition), + the magnitude of this statistic tends to be low for samples drawn from a + normal distribution. + + The test is performed by comparing the observed value of the + statistic against the null distribution: the distribution of statistic + values derived under the null hypothesis that the weights were drawn from + a normal distribution. + + For this test, the null distribution of the statistic for very large + samples is the standard normal distribution. + + >>> import matplotlib.pyplot as plt + >>> dist = stats.norm() + >>> kt_val = np.linspace(-5, 5, 100) + >>> pdf = dist.pdf(kt_val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def kt_plot(ax): # we'll reuse this + ... ax.plot(kt_val, pdf) + ... ax.set_title("Kurtosis Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> kt_plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution as extreme or more extreme than the observed + value of the statistic. In a two-sided test in which the statistic is + positive, elements of the null distribution greater than the observed + statistic and elements of the null distribution less than the negative of + the observed statistic are both considered "more extreme". + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> kt_plot(ax) + >>> pvalue = dist.cdf(-res.statistic) + dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (3, 0.005), (3.25, 0.02), arrowprops=props) + >>> i = kt_val >= res.statistic + >>> ax.fill_between(kt_val[i], y1=0, y2=pdf[i], color='C0') + >>> i = kt_val <= -res.statistic + >>> ax.fill_between(kt_val[i], y1=0, y2=pdf[i], color='C0') + >>> ax.set_xlim(-5, 5) + >>> ax.set_ylim(0, 0.1) + >>> plt.show() + >>> res.pvalue + 0.0211764592113868 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from a normally distributed population that produces such an + extreme value of the statistic - this may be taken as evidence against + the null hypothesis in favor of the alternative: the weights were not + drawn from a normal distribution. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [3]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + + Note that the standard normal distribution provides an asymptotic + approximation of the null distribution; it is only accurate for samples + with many observations. This is the reason we received a warning at the + beginning of the example; our sample is quite small. In this case, + `scipy.stats.monte_carlo_test` may provide a more accurate, albeit + stochastic, approximation of the exact p-value. + + >>> def statistic(x, axis): + ... # get just the skewtest statistic; ignore the p-value + ... return stats.kurtosistest(x, axis=axis).statistic + >>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> kt_plot(ax) + >>> ax.hist(res.null_distribution, np.linspace(-5, 5, 50), + ... density=True) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'Monte Carlo approximation\n(11 observations)']) + >>> plt.show() + >>> res.pvalue + 0.0272 # may vary + + Furthermore, despite their stochastic nature, p-values computed in this way + can be used to exactly control the rate of false rejections of the null + hypothesis [4]_. + + """ + n = a.shape[axis] + if n < 5: + raise ValueError( + "kurtosistest requires at least 5 observations; %i observations" + " were given." % int(n)) + if n < 20: + warnings.warn("kurtosistest only valid for n>=20 ... continuing " + "anyway, n=%i" % int(n), + stacklevel=2) + b2 = kurtosis(a, axis, fisher=False) + + E = 3.0*(n-1) / (n+1) + varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1 + x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4 + # [1]_ Eq. 2: + sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / + (n*(n-2)*(n-3))) + # [1]_ Eq. 3: + A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) + term1 = 1 - 2/(9.0*A) + denom = 1 + x*np.sqrt(2/(A-4.0)) + term2 = np.sign(denom) * np.where(denom == 0.0, np.nan, + np.power((1-2.0/A)/np.abs(denom), 1/3.0)) + if np.any(denom == 0): + msg = ("Test statistic not defined in some cases due to division by " + "zero. Return nan in that case...") + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5 + + pvalue = _get_pvalue(Z, distributions.norm, alternative) + return KurtosistestResult(Z[()], pvalue[()]) + + +NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(NormaltestResult, n_samples=1, too_small=7) +def normaltest(a, axis=0, nan_policy='propagate'): + r"""Test whether a sample differs from a normal distribution. + + This function tests the null hypothesis that a sample comes + from a normal distribution. It is based on D'Agostino and + Pearson's [1]_, [2]_ test that combines skew and kurtosis to + produce an omnibus test of normality. + + Parameters + ---------- + a : array_like + The array containing the sample to be tested. + axis : int or None, optional + Axis along which to compute test. Default is 0. If None, + compute over the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + statistic : float or array + ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and + ``k`` is the z-score returned by `kurtosistest`. + pvalue : float or array + A 2-sided chi squared probability for the hypothesis test. + + References + ---------- + .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for + moderate and large sample size", Biometrika, 58, 341-348 + .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from + normality", Biometrika, 60, 613-622 + .. [3] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test + for normality (complete samples). Biometrika, 52(3/4), 591-611. + .. [4] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [5] Panagiotakos, D. B. (2008). The value of p-value in biomedical + research. The open cardiovascular medicine journal, 2, 97. + + Examples + -------- + Suppose we wish to infer from measurements whether the weights of adult + human males in a medical study are not normally distributed [3]_. + The weights (lbs) are recorded in the array ``x`` below. + + >>> import numpy as np + >>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236]) + + The normality test of [1]_ and [2]_ begins by computing a statistic based + on the sample skewness and kurtosis. + + >>> from scipy import stats + >>> res = stats.normaltest(x) + >>> res.statistic + 13.034263121192582 + + (The test warns that our sample has too few observations to perform the + test. We'll return to this at the end of the example.) + Because the normal distribution has zero skewness and zero + ("excess" or "Fisher") kurtosis, the value of this statistic tends to be + low for samples drawn from a normal distribution. + + The test is performed by comparing the observed value of the statistic + against the null distribution: the distribution of statistic values derived + under the null hypothesis that the weights were drawn from a normal + distribution. + For this normality test, the null distribution for very large samples is + the chi-squared distribution with two degrees of freedom. + + >>> import matplotlib.pyplot as plt + >>> dist = stats.chi2(df=2) + >>> stat_vals = np.linspace(0, 16, 100) + >>> pdf = dist.pdf(stat_vals) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(stat_vals, pdf) + ... ax.set_title("Normality Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.6f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (13.5, 5e-4), (14, 5e-3), arrowprops=props) + >>> i = stat_vals >= res.statistic # index more extreme statistic values + >>> ax.fill_between(stat_vals[i], y1=0, y2=pdf[i]) + >>> ax.set_xlim(8, 16) + >>> ax.set_ylim(0, 0.01) + >>> plt.show() + >>> res.pvalue + 0.0014779023013100172 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from a normally distributed population that produces such an + extreme value of the statistic - this may be taken as evidence against + the null hypothesis in favor of the alternative: the weights were not + drawn from a normal distribution. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [4]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + + Note that the chi-squared distribution provides an asymptotic + approximation of the null distribution; it is only accurate for samples + with many observations. This is the reason we received a warning at the + beginning of the example; our sample is quite small. In this case, + `scipy.stats.monte_carlo_test` may provide a more accurate, albeit + stochastic, approximation of the exact p-value. + + >>> def statistic(x, axis): + ... # Get only the `normaltest` statistic; ignore approximate p-value + ... return stats.normaltest(x, axis=axis).statistic + >>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic, + ... alternative='greater') + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> ax.hist(res.null_distribution, np.linspace(0, 25, 50), + ... density=True) + >>> ax.legend(['aymptotic approximation (many observations)', + ... 'Monte Carlo approximation (11 observations)']) + >>> ax.set_xlim(0, 14) + >>> plt.show() + >>> res.pvalue + 0.0082 # may vary + + Furthermore, despite their stochastic nature, p-values computed in this way + can be used to exactly control the rate of false rejections of the null + hypothesis [5]_. + + """ + s, _ = skewtest(a, axis) + k, _ = kurtosistest(a, axis) + k2 = s*s + k*k + + return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) + + +@_axis_nan_policy_factory(SignificanceResult, default_axis=None) +def jarque_bera(x, *, axis=None): + r"""Perform the Jarque-Bera goodness of fit test on sample data. + + The Jarque-Bera test tests whether the sample data has the skewness and + kurtosis matching a normal distribution. + + Note that this test only works for a large enough number of data samples + (>2000) as the test statistic asymptotically has a Chi-squared distribution + with 2 degrees of freedom. + + Parameters + ---------- + x : array_like + Observations of a random variable. + axis : int or None, default: 0 + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear in + a corresponding element of the output. + If ``None``, the input will be raveled before computing the statistic. + + Returns + ------- + result : SignificanceResult + An object with the following attributes: + + statistic : float + The test statistic. + pvalue : float + The p-value for the hypothesis test. + + References + ---------- + .. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality, + homoscedasticity and serial independence of regression residuals", + 6 Econometric Letters 255-259. + .. [2] Shapiro, S. S., & Wilk, M. B. (1965). An analysis of variance test + for normality (complete samples). Biometrika, 52(3/4), 591-611. + .. [3] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [4] Panagiotakos, D. B. (2008). The value of p-value in biomedical + research. The open cardiovascular medicine journal, 2, 97. + + Examples + -------- + Suppose we wish to infer from measurements whether the weights of adult + human males in a medical study are not normally distributed [2]_. + The weights (lbs) are recorded in the array ``x`` below. + + >>> import numpy as np + >>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236]) + + The Jarque-Bera test begins by computing a statistic based on the sample + skewness and kurtosis. + + >>> from scipy import stats + >>> res = stats.jarque_bera(x) + >>> res.statistic + 6.982848237344646 + + Because the normal distribution has zero skewness and zero + ("excess" or "Fisher") kurtosis, the value of this statistic tends to be + low for samples drawn from a normal distribution. + + The test is performed by comparing the observed value of the statistic + against the null distribution: the distribution of statistic values derived + under the null hypothesis that the weights were drawn from a normal + distribution. + For the Jarque-Bera test, the null distribution for very large samples is + the chi-squared distribution with two degrees of freedom. + + >>> import matplotlib.pyplot as plt + >>> dist = stats.chi2(df=2) + >>> jb_val = np.linspace(0, 11, 100) + >>> pdf = dist.pdf(jb_val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def jb_plot(ax): # we'll reuse this + ... ax.plot(jb_val, pdf) + ... ax.set_title("Jarque-Bera Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> jb_plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> jb_plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.6f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (7.5, 0.01), (8, 0.05), arrowprops=props) + >>> i = jb_val >= res.statistic # indices of more extreme statistic values + >>> ax.fill_between(jb_val[i], y1=0, y2=pdf[i]) + >>> ax.set_xlim(0, 11) + >>> ax.set_ylim(0, 0.3) + >>> plt.show() + >>> res.pvalue + 0.03045746622458189 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from a normally distributed population that produces such an + extreme value of the statistic - this may be taken as evidence against + the null hypothesis in favor of the alternative: the weights were not + drawn from a normal distribution. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [3]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + + Note that the chi-squared distribution provides an asymptotic approximation + of the null distribution; it is only accurate for samples with many + observations. For small samples like ours, `scipy.stats.monte_carlo_test` + may provide a more accurate, albeit stochastic, approximation of the + exact p-value. + + >>> def statistic(x, axis): + ... # underlying calculation of the Jarque Bera statistic + ... s = stats.skew(x, axis=axis) + ... k = stats.kurtosis(x, axis=axis) + ... return x.shape[axis]/6 * (s**2 + k**2/4) + >>> res = stats.monte_carlo_test(x, stats.norm.rvs, statistic, + ... alternative='greater') + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> jb_plot(ax) + >>> ax.hist(res.null_distribution, np.linspace(0, 10, 50), + ... density=True) + >>> ax.legend(['aymptotic approximation (many observations)', + ... 'Monte Carlo approximation (11 observations)']) + >>> plt.show() + >>> res.pvalue + 0.0097 # may vary + + Furthermore, despite their stochastic nature, p-values computed in this way + can be used to exactly control the rate of false rejections of the null + hypothesis [4]_. + + """ + x = np.asarray(x) + if axis is None: + x = x.ravel() + axis = 0 + + n = x.shape[axis] + if n == 0: + raise ValueError('At least one observation is required.') + + mu = x.mean(axis=axis, keepdims=True) + diffx = x - mu + s = skew(diffx, axis=axis, _no_deco=True) + k = kurtosis(diffx, axis=axis, _no_deco=True) + statistic = n / 6 * (s**2 + k**2 / 4) + pvalue = distributions.chi2.sf(statistic, df=2) + + return SignificanceResult(statistic, pvalue) + + +##################################### +# FREQUENCY FUNCTIONS # +##################################### + + +def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', + axis=None): + """Calculate the score at a given percentile of the input sequence. + + For example, the score at `per=50` is the median. If the desired quantile + lies between two data points, we interpolate between them, according to + the value of `interpolation`. If the parameter `limit` is provided, it + should be a tuple (lower, upper) of two values. + + Parameters + ---------- + a : array_like + A 1-D array of values from which to extract score. + per : array_like + Percentile(s) at which to extract score. Values should be in range + [0,100]. + limit : tuple, optional + Tuple of two scalars, the lower and upper limits within which to + compute the percentile. Values of `a` outside + this (closed) interval will be ignored. + interpolation_method : {'fraction', 'lower', 'higher'}, optional + Specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j` + The following options are available (default is 'fraction'): + + * 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the + fractional part of the index surrounded by ``i`` and ``j`` + * 'lower': ``i`` + * 'higher': ``j`` + + axis : int, optional + Axis along which the percentiles are computed. Default is None. If + None, compute over the whole array `a`. + + Returns + ------- + score : float or ndarray + Score at percentile(s). + + See Also + -------- + percentileofscore, numpy.percentile + + Notes + ----- + This function will become obsolete in the future. + For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality + that `scoreatpercentile` provides. And it's significantly faster. + Therefore it's recommended to use `numpy.percentile` for users that have + numpy >= 1.9. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(100) + >>> stats.scoreatpercentile(a, 50) + 49.5 + + """ + # adapted from NumPy's percentile function. When we require numpy >= 1.8, + # the implementation of this function can be replaced by np.percentile. + a = np.asarray(a) + if a.size == 0: + # empty array, return nan(s) with shape matching `per` + if np.isscalar(per): + return np.nan + else: + return np.full(np.asarray(per).shape, np.nan, dtype=np.float64) + + if limit: + a = a[(limit[0] <= a) & (a <= limit[1])] + + sorted_ = np.sort(a, axis=axis) + if axis is None: + axis = 0 + + return _compute_qth_percentile(sorted_, per, interpolation_method, axis) + + +# handle sequence of per's without calling sort multiple times +def _compute_qth_percentile(sorted_, per, interpolation_method, axis): + if not np.isscalar(per): + score = [_compute_qth_percentile(sorted_, i, + interpolation_method, axis) + for i in per] + return np.array(score) + + if not (0 <= per <= 100): + raise ValueError("percentile must be in the range [0, 100]") + + indexer = [slice(None)] * sorted_.ndim + idx = per / 100. * (sorted_.shape[axis] - 1) + + if int(idx) != idx: + # round fractional indices according to interpolation method + if interpolation_method == 'lower': + idx = int(np.floor(idx)) + elif interpolation_method == 'higher': + idx = int(np.ceil(idx)) + elif interpolation_method == 'fraction': + pass # keep idx as fraction and interpolate + else: + raise ValueError("interpolation_method can only be 'fraction', " + "'lower' or 'higher'") + + i = int(idx) + if i == idx: + indexer[axis] = slice(i, i + 1) + weights = array(1) + sumval = 1.0 + else: + indexer[axis] = slice(i, i + 2) + j = i + 1 + weights = array([(j - idx), (idx - i)], float) + wshape = [1] * sorted_.ndim + wshape[axis] = 2 + weights.shape = wshape + sumval = weights.sum() + + # Use np.add.reduce (== np.sum but a little faster) to coerce data type + return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval + + +def percentileofscore(a, score, kind='rank', nan_policy='propagate'): + """Compute the percentile rank of a score relative to a list of scores. + + A `percentileofscore` of, for example, 80% means that 80% of the + scores in `a` are below the given score. In the case of gaps or + ties, the exact definition depends on the optional keyword, `kind`. + + Parameters + ---------- + a : array_like + A 1-D array to which `score` is compared. + score : array_like + Scores to compute percentiles for. + kind : {'rank', 'weak', 'strict', 'mean'}, optional + Specifies the interpretation of the resulting score. + The following options are available (default is 'rank'): + + * 'rank': Average percentage ranking of score. In case of multiple + matches, average the percentage rankings of all matching scores. + * 'weak': This kind corresponds to the definition of a cumulative + distribution function. A percentileofscore of 80% means that 80% + of values are less than or equal to the provided score. + * 'strict': Similar to "weak", except that only values that are + strictly less than the given score are counted. + * 'mean': The average of the "weak" and "strict" scores, often used + in testing. See https://en.wikipedia.org/wiki/Percentile_rank + nan_policy : {'propagate', 'raise', 'omit'}, optional + Specifies how to treat `nan` values in `a`. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan (for each value in `score`). + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + pcos : float + Percentile-position of score (0-100) relative to `a`. + + See Also + -------- + numpy.percentile + scipy.stats.scoreatpercentile, scipy.stats.rankdata + + Examples + -------- + Three-quarters of the given values lie below a given score: + + >>> import numpy as np + >>> from scipy import stats + >>> stats.percentileofscore([1, 2, 3, 4], 3) + 75.0 + + With multiple matches, note how the scores of the two matches, 0.6 + and 0.8 respectively, are averaged: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3) + 70.0 + + Only 2/5 values are strictly less than 3: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') + 40.0 + + But 4/5 values are less than or equal to 3: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') + 80.0 + + The average between the weak and the strict scores is: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') + 60.0 + + Score arrays (of any dimensionality) are supported: + + >>> stats.percentileofscore([1, 2, 3, 3, 4], [2, 3]) + array([40., 70.]) + + The inputs can be infinite: + + >>> stats.percentileofscore([-np.inf, 0, 1, np.inf], [1, 2, np.inf]) + array([75., 75., 100.]) + + If `a` is empty, then the resulting percentiles are all `nan`: + + >>> stats.percentileofscore([], [1, 2]) + array([nan, nan]) + """ + + a = np.asarray(a) + n = len(a) + score = np.asarray(score) + + # Nan treatment + cna, npa = _contains_nan(a, nan_policy, use_summation=False) + cns, nps = _contains_nan(score, nan_policy, use_summation=False) + + if (cna or cns) and nan_policy == 'raise': + raise ValueError("The input contains nan values") + + if cns: + # If a score is nan, then the output should be nan + # (also if nan_policy is "omit", because it only applies to `a`) + score = ma.masked_where(np.isnan(score), score) + + if cna: + if nan_policy == "omit": + # Don't count nans + a = ma.masked_where(np.isnan(a), a) + n = a.count() + + if nan_policy == "propagate": + # All outputs should be nans + n = 0 + + # Cannot compare to empty list ==> nan + if n == 0: + perct = np.full_like(score, np.nan, dtype=np.float64) + + else: + # Prepare broadcasting + score = score[..., None] + + def count(x): + return np.count_nonzero(x, -1) + + # Main computations/logic + if kind == 'rank': + left = count(a < score) + right = count(a <= score) + plus1 = left < right + perct = (left + right + plus1) * (50.0 / n) + elif kind == 'strict': + perct = count(a < score) * (100.0 / n) + elif kind == 'weak': + perct = count(a <= score) * (100.0 / n) + elif kind == 'mean': + left = count(a < score) + right = count(a <= score) + perct = (left + right) * (50.0 / n) + else: + raise ValueError( + "kind can only be 'rank', 'strict', 'weak' or 'mean'") + + # Re-insert nan values + perct = ma.filled(perct, np.nan) + + if perct.ndim == 0: + return perct[()] + return perct + + +HistogramResult = namedtuple('HistogramResult', + ('count', 'lowerlimit', 'binsize', 'extrapoints')) + + +def _histogram(a, numbins=10, defaultlimits=None, weights=None, + printextras=False): + """Create a histogram. + + Separate the range into several bins and return the number of instances + in each bin. + + Parameters + ---------- + a : array_like + Array of scores which will be put into bins. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultlimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in a is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + printextras : bool, optional + If True, if there are extra points (i.e. the points that fall outside + the bin limits) a warning is raised saying how many of those points + there are. Default is False. + + Returns + ------- + count : ndarray + Number of points (or sum of weights) in each bin. + lowerlimit : float + Lowest value of histogram, the lower limit of the first bin. + binsize : float + The size of the bins (all bins have the same size). + extrapoints : int + The number of points outside the range of the histogram. + + See Also + -------- + numpy.histogram + + Notes + ----- + This histogram is based on numpy's histogram but has a larger range by + default if default limits is not set. + + """ + a = np.ravel(a) + if defaultlimits is None: + if a.size == 0: + # handle empty arrays. Undetermined range, so use 0-1. + defaultlimits = (0, 1) + else: + # no range given, so use values in `a` + data_min = a.min() + data_max = a.max() + # Have bins extend past min and max values slightly + s = (data_max - data_min) / (2. * (numbins - 1.)) + defaultlimits = (data_min - s, data_max + s) + + # use numpy's histogram method to compute bins + hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits, + weights=weights) + # hist are not always floats, convert to keep with old output + hist = np.array(hist, dtype=float) + # fixed width for bins is assumed, as numpy's histogram gives + # fixed width bins for int values for 'bins' + binsize = bin_edges[1] - bin_edges[0] + # calculate number of extra points + extrapoints = len([v for v in a + if defaultlimits[0] > v or v > defaultlimits[1]]) + if extrapoints > 0 and printextras: + warnings.warn("Points outside given histogram range = %s" % extrapoints, + stacklevel=3,) + + return HistogramResult(hist, defaultlimits[0], binsize, extrapoints) + + +CumfreqResult = namedtuple('CumfreqResult', + ('cumcount', 'lowerlimit', 'binsize', + 'extrapoints')) + + +def cumfreq(a, numbins=10, defaultreallimits=None, weights=None): + """Return a cumulative frequency histogram, using the histogram function. + + A cumulative histogram is a mapping that counts the cumulative number of + observations in all of the bins up to the specified bin. + + Parameters + ---------- + a : array_like + Input array. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultreallimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + + Returns + ------- + cumcount : ndarray + Binned values of cumulative frequency. + lowerlimit : float + Lower real limit + binsize : float + Width of each bin. + extrapoints : int + Extra points. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x = [1, 4, 2, 1, 3, 1] + >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) + >>> res.cumcount + array([ 1., 2., 3., 3.]) + >>> res.extrapoints + 3 + + Create a normal distribution with 1000 random values + + >>> samples = stats.norm.rvs(size=1000, random_state=rng) + + Calculate cumulative frequencies + + >>> res = stats.cumfreq(samples, numbins=25) + + Calculate space of values for x + + >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, + ... res.cumcount.size) + + Plot histogram and cumulative histogram + + >>> fig = plt.figure(figsize=(10, 4)) + >>> ax1 = fig.add_subplot(1, 2, 1) + >>> ax2 = fig.add_subplot(1, 2, 2) + >>> ax1.hist(samples, bins=25) + >>> ax1.set_title('Histogram') + >>> ax2.bar(x, res.cumcount, width=res.binsize) + >>> ax2.set_title('Cumulative histogram') + >>> ax2.set_xlim([x.min(), x.max()]) + + >>> plt.show() + + """ + h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) + cumhist = np.cumsum(h * 1, axis=0) + return CumfreqResult(cumhist, l, b, e) + + +RelfreqResult = namedtuple('RelfreqResult', + ('frequency', 'lowerlimit', 'binsize', + 'extrapoints')) + + +def relfreq(a, numbins=10, defaultreallimits=None, weights=None): + """Return a relative frequency histogram, using the histogram function. + + A relative frequency histogram is a mapping of the number of + observations in each of the bins relative to the total of observations. + + Parameters + ---------- + a : array_like + Input array. + numbins : int, optional + The number of bins to use for the histogram. Default is 10. + defaultreallimits : tuple (lower, upper), optional + The lower and upper values for the range of the histogram. + If no value is given, a range slightly larger than the range of the + values in a is used. Specifically ``(a.min() - s, a.max() + s)``, + where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. + weights : array_like, optional + The weights for each value in `a`. Default is None, which gives each + value a weight of 1.0 + + Returns + ------- + frequency : ndarray + Binned values of relative frequency. + lowerlimit : float + Lower real limit. + binsize : float + Width of each bin. + extrapoints : int + Extra points. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> a = np.array([2, 4, 1, 2, 3, 2]) + >>> res = stats.relfreq(a, numbins=4) + >>> res.frequency + array([ 0.16666667, 0.5 , 0.16666667, 0.16666667]) + >>> np.sum(res.frequency) # relative frequencies should add up to 1 + 1.0 + + Create a normal distribution with 1000 random values + + >>> samples = stats.norm.rvs(size=1000, random_state=rng) + + Calculate relative frequencies + + >>> res = stats.relfreq(samples, numbins=25) + + Calculate space of values for x + + >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size, + ... res.frequency.size) + + Plot relative frequency histogram + + >>> fig = plt.figure(figsize=(5, 4)) + >>> ax = fig.add_subplot(1, 1, 1) + >>> ax.bar(x, res.frequency, width=res.binsize) + >>> ax.set_title('Relative frequency histogram') + >>> ax.set_xlim([x.min(), x.max()]) + + >>> plt.show() + + """ + a = np.asanyarray(a) + h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) + h = h / a.shape[0] + + return RelfreqResult(h, l, b, e) + + +##################################### +# VARIABILITY FUNCTIONS # +##################################### + +def obrientransform(*samples): + """Compute the O'Brien transform on input data (any number of arrays). + + Used to test for homogeneity of variance prior to running one-way stats. + Each array in ``*samples`` is one level of a factor. + If `f_oneway` is run on the transformed data and found significant, + the variances are unequal. From Maxwell and Delaney [1]_, p.112. + + Parameters + ---------- + sample1, sample2, ... : array_like + Any number of arrays. + + Returns + ------- + obrientransform : ndarray + Transformed data for use in an ANOVA. The first dimension + of the result corresponds to the sequence of transformed + arrays. If the arrays given are all 1-D of the same length, + the return value is a 2-D array; otherwise it is a 1-D array + of type object, with each element being an ndarray. + + References + ---------- + .. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and + Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990. + + Examples + -------- + We'll test the following data sets for differences in their variance. + + >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10] + >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15] + + Apply the O'Brien transform to the data. + + >>> from scipy.stats import obrientransform + >>> tx, ty = obrientransform(x, y) + + Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the + transformed data. + + >>> from scipy.stats import f_oneway + >>> F, p = f_oneway(tx, ty) + >>> p + 0.1314139477040335 + + If we require that ``p < 0.05`` for significance, we cannot conclude + that the variances are different. + + """ + TINY = np.sqrt(np.finfo(float).eps) + + # `arrays` will hold the transformed arguments. + arrays = [] + sLast = None + + for sample in samples: + a = np.asarray(sample) + n = len(a) + mu = np.mean(a) + sq = (a - mu)**2 + sumsq = sq.sum() + + # The O'Brien transform. + t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2)) + + # Check that the mean of the transformed data is equal to the + # original variance. + var = sumsq / (n - 1) + if abs(var - np.mean(t)) > TINY: + raise ValueError('Lack of convergence in obrientransform.') + + arrays.append(t) + sLast = a.shape + + if sLast: + for arr in arrays[:-1]: + if sLast != arr.shape: + return np.array(arrays, dtype=object) + return np.array(arrays) + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, too_small=1 +) +def sem(a, axis=0, ddof=1, nan_policy='propagate'): + """Compute standard error of the mean. + + Calculate the standard error of the mean (or standard error of + measurement) of the values in the input array. + + Parameters + ---------- + a : array_like + An array containing the values for which the standard error is + returned. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Delta degrees-of-freedom. How many degrees of freedom to adjust + for bias in limited samples relative to the population estimate + of variance. Defaults to 1. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + s : ndarray or float + The standard error of the mean in the sample(s), along the input axis. + + Notes + ----- + The default value for `ddof` is different to the default (0) used by other + ddof containing routines, such as np.std and np.nanstd. + + Examples + -------- + Find standard error along the first axis: + + >>> import numpy as np + >>> from scipy import stats + >>> a = np.arange(20).reshape(5,4) + >>> stats.sem(a) + array([ 2.8284, 2.8284, 2.8284, 2.8284]) + + Find standard error across the whole array, using n degrees of freedom: + + >>> stats.sem(a, axis=None, ddof=0) + 1.2893796958227628 + + """ + n = a.shape[axis] + s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n) + return s + + +def _isconst(x): + """ + Check if all values in x are the same. nans are ignored. + + x must be a 1d array. + + The return value is a 1d array with length 1, so it can be used + in np.apply_along_axis. + """ + y = x[~np.isnan(x)] + if y.size == 0: + return np.array([True]) + else: + return (y[0] == y).all(keepdims=True) + + +def _quiet_nanmean(x): + """ + Compute nanmean for the 1d array x, but quietly return nan if x is all nan. + + The return value is a 1d array with length 1, so it can be used + in np.apply_along_axis. + """ + y = x[~np.isnan(x)] + if y.size == 0: + return np.array([np.nan]) + else: + return np.mean(y, keepdims=True) + + +def _quiet_nanstd(x, ddof=0): + """ + Compute nanstd for the 1d array x, but quietly return nan if x is all nan. + + The return value is a 1d array with length 1, so it can be used + in np.apply_along_axis. + """ + y = x[~np.isnan(x)] + if y.size == 0: + return np.array([np.nan]) + else: + return np.std(y, keepdims=True, ddof=ddof) + + +def zscore(a, axis=0, ddof=0, nan_policy='propagate'): + """ + Compute the z score. + + Compute the z score of each value in the sample, relative to the + sample mean and standard deviation. + + Parameters + ---------- + a : array_like + An array like object containing the sample data. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. Note that when the value is 'omit', + nans in the input also propagate to the output, but they do not affect + the z-scores computed for the non-nan values. + + Returns + ------- + zscore : array_like + The z-scores, standardized by mean and standard deviation of + input array `a`. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.std : Arithmetic standard deviation + scipy.stats.gzscore : Geometric standard score + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses `asanyarray` instead of + `asarray` for parameters). + + References + ---------- + .. [1] "Standard score", *Wikipedia*, + https://en.wikipedia.org/wiki/Standard_score. + .. [2] Huck, S. W., Cross, T. L., Clark, S. B, "Overcoming misconceptions + about Z-scores", Teaching Statistics, vol. 8, pp. 38-40, 1986 + + Examples + -------- + >>> import numpy as np + >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, + ... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508]) + >>> from scipy import stats + >>> stats.zscore(a) + array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786, + 0.6748, -1.1488, -1.3324]) + + Computing along a specified axis, using n-1 degrees of freedom + (``ddof=1``) to calculate the standard deviation: + + >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608], + ... [ 0.7149, 0.0775, 0.6072, 0.9656], + ... [ 0.6341, 0.1403, 0.9759, 0.4064], + ... [ 0.5918, 0.6948, 0.904 , 0.3721], + ... [ 0.0921, 0.2481, 0.1188, 0.1366]]) + >>> stats.zscore(b, axis=1, ddof=1) + array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358], + [ 0.33048416, -1.37380874, 0.04251374, 1.00081084], + [ 0.26796377, -1.12598418, 1.23283094, -0.37481053], + [-0.22095197, 0.24468594, 1.19042819, -1.21416216], + [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]]) + + An example with `nan_policy='omit'`: + + >>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15], + ... [14.95, 16.06, 121.25, 94.35, 29.81]]) + >>> stats.zscore(x, axis=1, nan_policy='omit') + array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602], + [-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]]) + """ + return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy) + + +def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'): + """ + Compute the geometric standard score. + + Compute the geometric z score of each strictly positive value in the + sample, relative to the geometric mean and standard deviation. + Mathematically the geometric z score can be evaluated as:: + + gzscore = log(a/gmu) / log(gsigma) + + where ``gmu`` (resp. ``gsigma``) is the geometric mean (resp. standard + deviation). + + Parameters + ---------- + a : array_like + Sample data. + axis : int or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. Note that when the value is 'omit', + nans in the input also propagate to the output, but they do not affect + the geometric z scores computed for the non-nan values. + + Returns + ------- + gzscore : array_like + The geometric z scores, standardized by geometric mean and geometric + standard deviation of input array `a`. + + See Also + -------- + gmean : Geometric mean + gstd : Geometric standard deviation + zscore : Standard score + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses ``asanyarray`` instead of + ``asarray`` for parameters). + + .. versionadded:: 1.8 + + References + ---------- + .. [1] "Geometric standard score", *Wikipedia*, + https://en.wikipedia.org/wiki/Geometric_standard_deviation#Geometric_standard_score. + + Examples + -------- + Draw samples from a log-normal distribution: + + >>> import numpy as np + >>> from scipy.stats import zscore, gzscore + >>> import matplotlib.pyplot as plt + + >>> rng = np.random.default_rng() + >>> mu, sigma = 3., 1. # mean and standard deviation + >>> x = rng.lognormal(mu, sigma, size=500) + + Display the histogram of the samples: + + >>> fig, ax = plt.subplots() + >>> ax.hist(x, 50) + >>> plt.show() + + Display the histogram of the samples standardized by the classical zscore. + Distribution is rescaled but its shape is unchanged. + + >>> fig, ax = plt.subplots() + >>> ax.hist(zscore(x), 50) + >>> plt.show() + + Demonstrate that the distribution of geometric zscores is rescaled and + quasinormal: + + >>> fig, ax = plt.subplots() + >>> ax.hist(gzscore(x), 50) + >>> plt.show() + + """ + a = np.asanyarray(a) + log = ma.log if isinstance(a, ma.MaskedArray) else np.log + + return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy) + + +def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'): + """ + Calculate the relative z-scores. + + Return an array of z-scores, i.e., scores that are standardized to + zero mean and unit variance, where mean and variance are calculated + from the comparison array. + + Parameters + ---------- + scores : array_like + The input for which z-scores are calculated. + compare : array_like + The input from which the mean and standard deviation of the + normalization are taken; assumed to have the same dimension as + `scores`. + axis : int or None, optional + Axis over which mean and variance of `compare` are calculated. + Default is 0. If None, compute over the whole array `scores`. + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle the occurrence of nans in `compare`. + 'propagate' returns nan, 'raise' raises an exception, 'omit' + performs the calculations ignoring nan values. Default is + 'propagate'. Note that when the value is 'omit', nans in `scores` + also propagate to the output, but they do not affect the z-scores + computed for the non-nan values. + + Returns + ------- + zscore : array_like + Z-scores, in the same shape as `scores`. + + Notes + ----- + This function preserves ndarray subclasses, and works also with + matrices and masked arrays (it uses `asanyarray` instead of + `asarray` for parameters). + + Examples + -------- + >>> from scipy.stats import zmap + >>> a = [0.5, 2.0, 2.5, 3] + >>> b = [0, 1, 2, 3, 4] + >>> zmap(a, b) + array([-1.06066017, 0. , 0.35355339, 0.70710678]) + + """ + a = np.asanyarray(compare) + + if a.size == 0: + return np.empty(a.shape) + + contains_nan, nan_policy = _contains_nan(a, nan_policy) + + if contains_nan and nan_policy == 'omit': + if axis is None: + mn = _quiet_nanmean(a.ravel()) + std = _quiet_nanstd(a.ravel(), ddof=ddof) + isconst = _isconst(a.ravel()) + else: + mn = np.apply_along_axis(_quiet_nanmean, axis, a) + std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof) + isconst = np.apply_along_axis(_isconst, axis, a) + else: + mn = a.mean(axis=axis, keepdims=True) + std = a.std(axis=axis, ddof=ddof, keepdims=True) + # The intent is to check whether all elements of `a` along `axis` are + # identical. Due to finite precision arithmetic, comparing elements + # against `mn` doesn't work. Previously, this compared elements to + # `_first`, but that extracts the element at index 0 regardless of + # whether it is masked. As a simple fix, compare against `min`. + a0 = a.min(axis=axis, keepdims=True) + isconst = (a == a0).all(axis=axis, keepdims=True) + + # Set std deviations that are 0 to 1 to avoid division by 0. + std[isconst] = 1.0 + z = (scores - mn) / std + # Set the outputs associated with a constant input to nan. + z[np.broadcast_to(isconst, z.shape)] = np.nan + return z + + +def gstd(a, axis=0, ddof=1): + """ + Calculate the geometric standard deviation of an array. + + The geometric standard deviation describes the spread of a set of numbers + where the geometric mean is preferred. It is a multiplicative factor, and + so a dimensionless quantity. + + It is defined as the exponent of the standard deviation of ``log(a)``. + Mathematically the population geometric standard deviation can be + evaluated as:: + + gstd = exp(std(log(a))) + + .. versionadded:: 1.3.0 + + Parameters + ---------- + a : array_like + An array like object containing the sample data. + axis : int, tuple or None, optional + Axis along which to operate. Default is 0. If None, compute over + the whole array `a`. + ddof : int, optional + Degree of freedom correction in the calculation of the + geometric standard deviation. Default is 1. + + Returns + ------- + gstd : ndarray or float + An array of the geometric standard deviation. If `axis` is None or `a` + is a 1d array a float is returned. + + See Also + -------- + gmean : Geometric mean + numpy.std : Standard deviation + gzscore : Geometric standard score + + Notes + ----- + As the calculation requires the use of logarithms the geometric standard + deviation only supports strictly positive values. Any non-positive or + infinite values will raise a `ValueError`. + The geometric standard deviation is sometimes confused with the exponent of + the standard deviation, ``exp(std(a))``. Instead the geometric standard + deviation is ``exp(std(log(a)))``. + The default value for `ddof` is different to the default value (0) used + by other ddof containing functions, such as ``np.std`` and ``np.nanstd``. + + References + ---------- + .. [1] "Geometric standard deviation", *Wikipedia*, + https://en.wikipedia.org/wiki/Geometric_standard_deviation. + .. [2] Kirkwood, T. B., "Geometric means and measures of dispersion", + Biometrics, vol. 35, pp. 908-909, 1979 + + Examples + -------- + Find the geometric standard deviation of a log-normally distributed sample. + Note that the standard deviation of the distribution is one, on a + log scale this evaluates to approximately ``exp(1)``. + + >>> import numpy as np + >>> from scipy.stats import gstd + >>> rng = np.random.default_rng() + >>> sample = rng.lognormal(mean=0, sigma=1, size=1000) + >>> gstd(sample) + 2.810010162475324 + + Compute the geometric standard deviation of a multidimensional array and + of a given axis. + + >>> a = np.arange(1, 25).reshape(2, 3, 4) + >>> gstd(a, axis=None) + 2.2944076136018947 + >>> gstd(a, axis=2) + array([[1.82424757, 1.22436866, 1.13183117], + [1.09348306, 1.07244798, 1.05914985]]) + >>> gstd(a, axis=(1,2)) + array([2.12939215, 1.22120169]) + + The geometric standard deviation further handles masked arrays. + + >>> a = np.arange(1, 25).reshape(2, 3, 4) + >>> ma = np.ma.masked_where(a > 16, a) + >>> ma + masked_array( + data=[[[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]], + [[13, 14, 15, 16], + [--, --, --, --], + [--, --, --, --]]], + mask=[[[False, False, False, False], + [False, False, False, False], + [False, False, False, False]], + [[False, False, False, False], + [ True, True, True, True], + [ True, True, True, True]]], + fill_value=999999) + >>> gstd(ma, axis=2) + masked_array( + data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478], + [1.0934830582350938, --, --]], + mask=[[False, False, False], + [False, True, True]], + fill_value=999999) + + """ + a = np.asanyarray(a) + log = ma.log if isinstance(a, ma.MaskedArray) else np.log + + try: + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + return np.exp(np.std(log(a), axis=axis, ddof=ddof)) + except RuntimeWarning as w: + if np.isinf(a).any(): + raise ValueError( + 'Infinite value encountered. The geometric standard deviation ' + 'is defined for strictly positive values only.' + ) from w + a_nan = np.isnan(a) + a_nan_any = a_nan.any() + # exclude NaN's from negativity check, but + # avoid expensive masking for arrays with no NaN + if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or + (not a_nan_any and np.less_equal(a, 0).any())): + raise ValueError( + 'Non positive value encountered. The geometric standard ' + 'deviation is defined for strictly positive values only.' + ) from w + elif 'Degrees of freedom <= 0 for slice' == str(w): + raise ValueError(w) from w + else: + # Remaining warnings don't need to be exceptions. + return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof)) + except TypeError as e: + raise ValueError( + 'Invalid array input. The inputs could not be ' + 'safely coerced to any supported types') from e + + +# Private dictionary initialized only once at module level +# See https://en.wikipedia.org/wiki/Robust_measures_of_scale +_scale_conversions = {'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)} + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, + default_axis=None, override={'nan_propagation': False} +) +def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate', + interpolation='linear', keepdims=False): + r""" + Compute the interquartile range of the data along the specified axis. + + The interquartile range (IQR) is the difference between the 75th and + 25th percentile of the data. It is a measure of the dispersion + similar to standard deviation or variance, but is much more robust + against outliers [2]_. + + The ``rng`` parameter allows this function to compute other + percentile ranges than the actual IQR. For example, setting + ``rng=(0, 100)`` is equivalent to `numpy.ptp`. + + The IQR of an empty array is `np.nan`. + + .. versionadded:: 0.18.0 + + Parameters + ---------- + x : array_like + Input array or object that can be converted to an array. + axis : int or sequence of int, optional + Axis along which the range is computed. The default is to + compute the IQR for the entire array. + rng : Two-element sequence containing floats in range of [0,100] optional + Percentiles over which to compute the range. Each must be + between 0 and 100, inclusive. The default is the true IQR: + ``(25, 75)``. The order of the elements is not important. + scale : scalar or str or array_like of reals, optional + The numerical value of scale will be divided out of the final + result. The following string value is also recognized: + + * 'normal' : Scale by + :math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`. + + The default is 1.0. + Array-like `scale` of real dtype is also allowed, as long + as it broadcasts correctly to the output such that + ``out / scale`` is a valid operation. The output dimensions + depend on the input array, `x`, the `axis` argument, and the + `keepdims` flag. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + interpolation : str, optional + + Specifies the interpolation method to use when the percentile + boundaries lie between two data points ``i`` and ``j``. + The following options are available (default is 'linear'): + + * 'linear': ``i + (j - i)*fraction``, where ``fraction`` is the + fractional part of the index surrounded by ``i`` and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j`` whichever is nearest. + * 'midpoint': ``(i + j)/2``. + + For NumPy >= 1.22.0, the additional options provided by the ``method`` + keyword of `numpy.percentile` are also valid. + + keepdims : bool, optional + If this is set to True, the reduced axes are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array `x`. + + Returns + ------- + iqr : scalar or ndarray + If ``axis=None``, a scalar is returned. If the input contains + integers or floats of smaller precision than ``np.float64``, then the + output data-type is ``np.float64``. Otherwise, the output data-type is + the same as that of the input. + + See Also + -------- + numpy.std, numpy.var + + References + ---------- + .. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range + .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale + .. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import iqr + >>> x = np.array([[10, 7, 4], [3, 2, 1]]) + >>> x + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> iqr(x) + 4.0 + >>> iqr(x, axis=0) + array([ 3.5, 2.5, 1.5]) + >>> iqr(x, axis=1) + array([ 3., 1.]) + >>> iqr(x, axis=1, keepdims=True) + array([[ 3.], + [ 1.]]) + + """ + x = asarray(x) + + # This check prevents percentile from raising an error later. Also, it is + # consistent with `np.var` and `np.std`. + if not x.size: + return _get_nan(x) + + # An error may be raised here, so fail-fast, before doing lengthy + # computations, even though `scale` is not used until later + if isinstance(scale, str): + scale_key = scale.lower() + if scale_key not in _scale_conversions: + raise ValueError(f"{scale} not a valid scale for `iqr`") + scale = _scale_conversions[scale_key] + + # Select the percentile function to use based on nans and policy + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + if contains_nan and nan_policy == 'omit': + percentile_func = np.nanpercentile + else: + percentile_func = np.percentile + + if len(rng) != 2: + raise TypeError("quantile range must be two element sequence") + + if np.isnan(rng).any(): + raise ValueError("range must not contain NaNs") + + rng = sorted(rng) + pct = percentile_func(x, rng, axis=axis, method=interpolation, + keepdims=keepdims) + out = np.subtract(pct[1], pct[0]) + + if scale != 1.0: + out /= scale + + return out + + +def _mad_1d(x, center, nan_policy): + # Median absolute deviation for 1-d array x. + # This is a helper function for `median_abs_deviation`; it assumes its + # arguments have been validated already. In particular, x must be a + # 1-d numpy array, center must be callable, and if nan_policy is not + # 'propagate', it is assumed to be 'omit', because 'raise' is handled + # in `median_abs_deviation`. + # No warning is generated if x is empty or all nan. + isnan = np.isnan(x) + if isnan.any(): + if nan_policy == 'propagate': + return np.nan + x = x[~isnan] + if x.size == 0: + # MAD of an empty array is nan. + return np.nan + # Edge cases have been handled, so do the basic MAD calculation. + med = center(x) + mad = np.median(np.abs(x - med)) + return mad + + +def median_abs_deviation(x, axis=0, center=np.median, scale=1.0, + nan_policy='propagate'): + r""" + Compute the median absolute deviation of the data along the given axis. + + The median absolute deviation (MAD, [1]_) computes the median over the + absolute deviations from the median. It is a measure of dispersion + similar to the standard deviation but more robust to outliers [2]_. + + The MAD of an empty array is ``np.nan``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + x : array_like + Input array or object that can be converted to an array. + axis : int or None, optional + Axis along which the range is computed. Default is 0. If None, compute + the MAD over the entire array. + center : callable, optional + A function that will return the central value. The default is to use + np.median. Any user defined function used will need to have the + function signature ``func(arr, axis)``. + scale : scalar or str, optional + The numerical value of scale will be divided out of the final + result. The default is 1.0. The string "normal" is also accepted, + and results in `scale` being the inverse of the standard normal + quantile function at 0.75, which is approximately 0.67449. + Array-like scale is also allowed, as long as it broadcasts correctly + to the output such that ``out / scale`` is a valid operation. The + output dimensions depend on the input array, `x`, and the `axis` + argument. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + mad : scalar or ndarray + If ``axis=None``, a scalar is returned. If the input contains + integers or floats of smaller precision than ``np.float64``, then the + output data-type is ``np.float64``. Otherwise, the output data-type is + the same as that of the input. + + See Also + -------- + numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean, + scipy.stats.tstd, scipy.stats.tvar + + Notes + ----- + The `center` argument only affects the calculation of the central value + around which the MAD is calculated. That is, passing in ``center=np.mean`` + will calculate the MAD around the mean - it will not calculate the *mean* + absolute deviation. + + The input array may contain `inf`, but if `center` returns `inf`, the + corresponding MAD for that data will be `nan`. + + References + ---------- + .. [1] "Median absolute deviation", + https://en.wikipedia.org/wiki/Median_absolute_deviation + .. [2] "Robust measures of scale", + https://en.wikipedia.org/wiki/Robust_measures_of_scale + + Examples + -------- + When comparing the behavior of `median_abs_deviation` with ``np.std``, + the latter is affected when we change a single value of an array to have an + outlier value while the MAD hardly changes: + + >>> import numpy as np + >>> from scipy import stats + >>> x = stats.norm.rvs(size=100, scale=1, random_state=123456) + >>> x.std() + 0.9973906394005013 + >>> stats.median_abs_deviation(x) + 0.82832610097857 + >>> x[0] = 345.6 + >>> x.std() + 34.42304872314415 + >>> stats.median_abs_deviation(x) + 0.8323442311590675 + + Axis handling example: + + >>> x = np.array([[10, 7, 4], [3, 2, 1]]) + >>> x + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> stats.median_abs_deviation(x) + array([3.5, 2.5, 1.5]) + >>> stats.median_abs_deviation(x, axis=None) + 2.0 + + Scale normal example: + + >>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456) + >>> stats.median_abs_deviation(x) + 1.3487398527041636 + >>> stats.median_abs_deviation(x, scale='normal') + 1.9996446978061115 + + """ + if not callable(center): + raise TypeError("The argument 'center' must be callable. The given " + f"value {repr(center)} is not callable.") + + # An error may be raised here, so fail-fast, before doing lengthy + # computations, even though `scale` is not used until later + if isinstance(scale, str): + if scale.lower() == 'normal': + scale = 0.6744897501960817 # special.ndtri(0.75) + else: + raise ValueError(f"{scale} is not a valid scale value.") + + x = asarray(x) + + # Consistent with `np.var` and `np.std`. + if not x.size: + if axis is None: + return np.nan + nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis) + if nan_shape == (): + # Return nan, not array(nan) + return np.nan + return np.full(nan_shape, np.nan) + + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + if contains_nan: + if axis is None: + mad = _mad_1d(x.ravel(), center, nan_policy) + else: + mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy) + else: + if axis is None: + med = center(x, axis=None) + mad = np.median(np.abs(x - med)) + else: + # Wrap the call to center() in expand_dims() so it acts like + # keepdims=True was used. + med = np.expand_dims(center(x, axis=axis), axis) + mad = np.median(np.abs(x - med), axis=axis) + + return mad / scale + + +##################################### +# TRIMMING FUNCTIONS # +##################################### + + +SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper')) + + +def sigmaclip(a, low=4., high=4.): + """Perform iterative sigma-clipping of array elements. + + Starting from the full sample, all elements outside the critical range are + removed, i.e. all elements of the input array `c` that satisfy either of + the following conditions:: + + c < mean(c) - std(c)*low + c > mean(c) + std(c)*high + + The iteration continues with the updated sample until no + elements are outside the (updated) range. + + Parameters + ---------- + a : array_like + Data array, will be raveled if not 1-D. + low : float, optional + Lower bound factor of sigma clipping. Default is 4. + high : float, optional + Upper bound factor of sigma clipping. Default is 4. + + Returns + ------- + clipped : ndarray + Input array with clipped elements removed. + lower : float + Lower threshold value use for clipping. + upper : float + Upper threshold value use for clipping. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import sigmaclip + >>> a = np.concatenate((np.linspace(9.5, 10.5, 31), + ... np.linspace(0, 20, 5))) + >>> fact = 1.5 + >>> c, low, upp = sigmaclip(a, fact, fact) + >>> c + array([ 9.96666667, 10. , 10.03333333, 10. ]) + >>> c.var(), c.std() + (0.00055555555555555165, 0.023570226039551501) + >>> low, c.mean() - fact*c.std(), c.min() + (9.9646446609406727, 9.9646446609406727, 9.9666666666666668) + >>> upp, c.mean() + fact*c.std(), c.max() + (10.035355339059327, 10.035355339059327, 10.033333333333333) + + >>> a = np.concatenate((np.linspace(9.5, 10.5, 11), + ... np.linspace(-100, -50, 3))) + >>> c, low, upp = sigmaclip(a, 1.8, 1.8) + >>> (c == np.linspace(9.5, 10.5, 11)).all() + True + + """ + c = np.asarray(a).ravel() + delta = 1 + while delta: + c_std = c.std() + c_mean = c.mean() + size = c.size + critlower = c_mean - c_std * low + critupper = c_mean + c_std * high + c = c[(c >= critlower) & (c <= critupper)] + delta = size - c.size + + return SigmaclipResult(c, critlower, critupper) + + +def trimboth(a, proportiontocut, axis=0): + """Slice off a proportion of items from both ends of an array. + + Slice off the passed proportion of items from both ends of the passed + array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and** + rightmost 10% of scores). The trimmed values are the lowest and + highest ones. + Slice off less if proportion results in a non-integer slice index (i.e. + conservatively slices off `proportiontocut`). + + Parameters + ---------- + a : array_like + Data to trim. + proportiontocut : float + Proportion (in range 0-1) of total data set to trim of each end. + axis : int or None, optional + Axis along which to trim data. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + out : ndarray + Trimmed version of array `a`. The order of the trimmed content + is undefined. + + See Also + -------- + trim_mean + + Examples + -------- + Create an array of 10 values and trim 10% of those values from each end: + + >>> import numpy as np + >>> from scipy import stats + >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> stats.trimboth(a, 0.1) + array([1, 3, 2, 4, 5, 6, 7, 8]) + + Note that the elements of the input array are trimmed by value, but the + output array is not necessarily sorted. + + The proportion to trim is rounded down to the nearest integer. For + instance, trimming 25% of the values from each end of an array of 10 + values will return an array of 6 values: + + >>> b = np.arange(10) + >>> stats.trimboth(b, 1/4).shape + (6,) + + Multidimensional arrays can be trimmed along any axis or across the entire + array: + + >>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9] + >>> d = np.array([a, b, c]) + >>> stats.trimboth(d, 0.4, axis=0).shape + (1, 10) + >>> stats.trimboth(d, 0.4, axis=1).shape + (3, 2) + >>> stats.trimboth(d, 0.4, axis=None).shape + (6,) + + """ + a = np.asarray(a) + + if a.size == 0: + return a + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut >= uppercut): + raise ValueError("Proportion too big.") + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return atmp[tuple(sl)] + + +def trim1(a, proportiontocut, tail='right', axis=0): + """Slice off a proportion from ONE end of the passed array distribution. + + If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost' + 10% of scores. The lowest or highest values are trimmed (depending on + the tail). + Slice off less if proportion results in a non-integer slice index + (i.e. conservatively slices off `proportiontocut` ). + + Parameters + ---------- + a : array_like + Input array. + proportiontocut : float + Fraction to cut off of 'left' or 'right' of distribution. + tail : {'left', 'right'}, optional + Defaults to 'right'. + axis : int or None, optional + Axis along which to trim data. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + trim1 : ndarray + Trimmed version of array `a`. The order of the trimmed content is + undefined. + + Examples + -------- + Create an array of 10 values and trim 20% of its lowest values: + + >>> import numpy as np + >>> from scipy import stats + >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> stats.trim1(a, 0.2, 'left') + array([2, 4, 3, 5, 6, 7, 8, 9]) + + Note that the elements of the input array are trimmed by value, but the + output array is not necessarily sorted. + + The proportion to trim is rounded down to the nearest integer. For + instance, trimming 25% of the values from an array of 10 values will + return an array of 8 values: + + >>> b = np.arange(10) + >>> stats.trim1(b, 1/4).shape + (8,) + + Multidimensional arrays can be trimmed along any axis or across the entire + array: + + >>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9] + >>> d = np.array([a, b, c]) + >>> stats.trim1(d, 0.8, axis=0).shape + (1, 10) + >>> stats.trim1(d, 0.8, axis=1).shape + (3, 2) + >>> stats.trim1(d, 0.8, axis=None).shape + (6,) + + """ + a = np.asarray(a) + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + + # avoid possible corner case + if proportiontocut >= 1: + return [] + + if tail.lower() == 'right': + lowercut = 0 + uppercut = nobs - int(proportiontocut * nobs) + + elif tail.lower() == 'left': + lowercut = int(proportiontocut * nobs) + uppercut = nobs + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return atmp[tuple(sl)] + + +def trim_mean(a, proportiontocut, axis=0): + """Return mean of array after trimming a specified fraction of extreme values + + Removes the specified proportion of elements from *each* end of the + sorted array, then computes the mean of the remaining elements. + + Parameters + ---------- + a : array_like + Input array. + proportiontocut : float + Fraction of the most positive and most negative elements to remove. + When the specified proportion does not result in an integer number of + elements, the number of elements to trim is rounded down. + axis : int or None, default: 0 + Axis along which the trimmed means are computed. + If None, compute over the raveled array. + + Returns + ------- + trim_mean : ndarray + Mean of trimmed array. + + See Also + -------- + trimboth : Remove a proportion of elements from each end of an array. + tmean : Compute the mean after trimming values outside specified limits. + + Notes + ----- + For 1-D array `a`, `trim_mean` is approximately equivalent to the following + calculation:: + + import numpy as np + a = np.sort(a) + m = int(proportiontocut * len(a)) + np.mean(a[m: len(a) - m]) + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = [1, 2, 3, 5] + >>> stats.trim_mean(x, 0.25) + 2.5 + + When the specified proportion does not result in an integer number of + elements, the number of elements to trim is rounded down. + + >>> stats.trim_mean(x, 0.24999) == np.mean(x) + True + + Use `axis` to specify the axis along which the calculation is performed. + + >>> x2 = [[1, 2, 3, 5], + ... [10, 20, 30, 50]] + >>> stats.trim_mean(x2, 0.25) + array([ 5.5, 11. , 16.5, 27.5]) + >>> stats.trim_mean(x2, 0.25, axis=1) + array([ 2.5, 25. ]) + + """ + a = np.asarray(a) + + if a.size == 0: + return np.nan + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut > uppercut): + raise ValueError("Proportion too big.") + + atmp = np.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return np.mean(atmp[tuple(sl)], axis=axis) + + +F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) + + +def _create_f_oneway_nan_result(shape, axis, samples): + """ + This is a helper function for f_oneway for creating the return values + in certain degenerate conditions. It creates return values that are + all nan with the appropriate shape for the given `shape` and `axis`. + """ + axis = normalize_axis_index(axis, len(shape)) + shp = shape[:axis] + shape[axis+1:] + f = np.full(shp, fill_value=_get_nan(*samples)) + prob = f.copy() + return F_onewayResult(f[()], prob[()]) + + +def _first(arr, axis): + """Return arr[..., 0:1, ...] where 0:1 is in the `axis` position.""" + return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis) + + +def _f_oneway_is_too_small(samples, kwargs={}, axis=-1): + # Check this after forming alldata, so shape errors are detected + # and reported before checking for 0 length inputs. + if any(sample.shape[axis] == 0 for sample in samples): + msg = 'at least one input has length 0' + warnings.warn(stats.DegenerateDataWarning(msg), stacklevel=2) + return True + + # Must have at least one group with length greater than 1. + if all(sample.shape[axis] == 1 for sample in samples): + msg = ('all input arrays have length 1. f_oneway requires that at ' + 'least one input has length greater than 1.') + warnings.warn(stats.DegenerateDataWarning(msg), stacklevel=2) + return True + + return False + + +@_axis_nan_policy_factory( + F_onewayResult, n_samples=None, too_small=_f_oneway_is_too_small +) +def f_oneway(*samples, axis=0): + """Perform one-way ANOVA. + + The one-way ANOVA tests the null hypothesis that two or more groups have + the same population mean. The test is applied to samples from two or + more groups, possibly with differing sizes. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample measurements for each group. There must be at least + two arguments. If the arrays are multidimensional, then all the + dimensions of the array must be the same except for `axis`. + axis : int, optional + Axis of the input arrays along which the test is applied. + Default is 0. + + Returns + ------- + statistic : float + The computed F statistic of the test. + pvalue : float + The associated p-value from the F distribution. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if all values within each of the input arrays are identical. + In this case the F statistic is either infinite or isn't defined, + so ``np.inf`` or ``np.nan`` is returned. + + `~scipy.stats.DegenerateDataWarning` + Raised if the length of any input array is 0, or if all the input + arrays have length 1. ``np.nan`` is returned for the F statistic + and the p-value in these cases. + + Notes + ----- + The ANOVA test has important assumptions that must be satisfied in order + for the associated p-value to be valid. + + 1. The samples are independent. + 2. Each sample is from a normally distributed population. + 3. The population standard deviations of the groups are all equal. This + property is known as homoscedasticity. + + If these assumptions are not true for a given set of data, it may still + be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or + the Alexander-Govern test (`scipy.stats.alexandergovern`) although with + some loss of power. + + The length of each group must be at least one, and there must be at + least one group with length greater than one. If these conditions + are not satisfied, a warning is generated and (``np.nan``, ``np.nan``) + is returned. + + If all values in each group are identical, and there exist at least two + groups with different values, the function generates a warning and + returns (``np.inf``, 0). + + If all values in all groups are the same, function generates a warning + and returns (``np.nan``, ``np.nan``). + + The algorithm is from Heiman [2]_, pp.394-7. + + References + ---------- + .. [1] R. Lowry, "Concepts and Applications of Inferential Statistics", + Chapter 14, 2014, http://vassarstats.net/textbook/ + + .. [2] G.W. Heiman, "Understanding research methods and statistics: An + integrated introduction for psychology", Houghton, Mifflin and + Company, 2001. + + .. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA. + http://www.biostathandbook.com/onewayanova.html + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import f_oneway + + Here are some data [3]_ on a shell measurement (the length of the anterior + adductor muscle scar, standardized by dividing by length) in the mussel + Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon; + Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a + much larger data set used in McDonald et al. (1991). + + >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735, + ... 0.0659, 0.0923, 0.0836] + >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835, + ... 0.0725] + >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105] + >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764, + ... 0.0689] + >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045] + >>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne) + F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544) + + `f_oneway` accepts multidimensional input arrays. When the inputs + are multidimensional and `axis` is not given, the test is performed + along the first axis of the input arrays. For the following data, the + test is performed three times, once for each column. + + >>> a = np.array([[9.87, 9.03, 6.81], + ... [7.18, 8.35, 7.00], + ... [8.39, 7.58, 7.68], + ... [7.45, 6.33, 9.35], + ... [6.41, 7.10, 9.33], + ... [8.00, 8.24, 8.44]]) + >>> b = np.array([[6.35, 7.30, 7.16], + ... [6.65, 6.68, 7.63], + ... [5.72, 7.73, 6.72], + ... [7.01, 9.19, 7.41], + ... [7.75, 7.87, 8.30], + ... [6.90, 7.97, 6.97]]) + >>> c = np.array([[3.31, 8.77, 1.01], + ... [8.25, 3.24, 3.62], + ... [6.32, 8.81, 5.19], + ... [7.48, 8.83, 8.91], + ... [8.59, 6.01, 6.07], + ... [3.07, 9.72, 7.48]]) + >>> F, p = f_oneway(a, b, c) + >>> F + array([1.75676344, 0.03701228, 3.76439349]) + >>> p + array([0.20630784, 0.96375203, 0.04733157]) + + """ + if len(samples) < 2: + raise TypeError('at least two inputs are required;' + f' got {len(samples)}.') + + # ANOVA on N groups, each in its own array + num_groups = len(samples) + + # We haven't explicitly validated axis, but if it is bad, this call of + # np.concatenate will raise np.exceptions.AxisError. The call will raise + # ValueError if the dimensions of all the arrays, except the axis + # dimension, are not the same. + alldata = np.concatenate(samples, axis=axis) + bign = alldata.shape[axis] + + # Check if the inputs are too small + if _f_oneway_is_too_small(samples): + return _create_f_oneway_nan_result(alldata.shape, axis, samples) + + # Check if all values within each group are identical, and if the common + # value in at least one group is different from that in another group. + # Based on https://github.com/scipy/scipy/issues/11669 + + # If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ..., + # then is_const is a boolean array with shape (num_groups, ...). + # It is True if the values within the groups along the axis slice are + # identical. In the typical case where each input array is 1-d, is_const is + # a 1-d array with length num_groups. + is_const = np.concatenate( + [(_first(sample, axis) == sample).all(axis=axis, + keepdims=True) + for sample in samples], + axis=axis + ) + + # all_const is a boolean array with shape (...) (see previous comment). + # It is True if the values within each group along the axis slice are + # the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]). + all_const = is_const.all(axis=axis) + if all_const.any(): + msg = ("Each of the input arrays is constant; " + "the F statistic is not defined or infinite") + warnings.warn(stats.ConstantInputWarning(msg), stacklevel=2) + + # all_same_const is True if all the values in the groups along the axis=0 + # slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]). + all_same_const = (_first(alldata, axis) == alldata).all(axis=axis) + + # Determine the mean of the data, and subtract that from all inputs to a + # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant + # to a shift in location, and centering all data around zero vastly + # improves numerical stability. + offset = alldata.mean(axis=axis, keepdims=True) + alldata = alldata - offset + + normalized_ss = _square_of_sums(alldata, axis=axis) / bign + + sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss + + ssbn = 0 + for sample in samples: + smo_ss = _square_of_sums(sample - offset, axis=axis) + ssbn = ssbn + smo_ss / sample.shape[axis] + + # Naming: variables ending in bn/b are for "between treatments", wn/w are + # for "within treatments" + ssbn = ssbn - normalized_ss + sswn = sstot - ssbn + dfbn = num_groups - 1 + dfwn = bign - num_groups + msb = ssbn / dfbn + msw = sswn / dfwn + with np.errstate(divide='ignore', invalid='ignore'): + f = msb / msw + + prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf + + # Fix any f values that should be inf or nan because the corresponding + # inputs were constant. + if np.isscalar(f): + if all_same_const: + f = np.nan + prob = np.nan + elif all_const: + f = np.inf + prob = 0.0 + else: + f[all_const] = np.inf + prob[all_const] = 0.0 + f[all_same_const] = np.nan + prob[all_same_const] = np.nan + + return F_onewayResult(f, prob) + + +@dataclass +class AlexanderGovernResult: + statistic: float + pvalue: float + + +@_axis_nan_policy_factory( + AlexanderGovernResult, n_samples=None, + result_to_tuple=lambda x: (x.statistic, x.pvalue), + too_small=1 +) +def alexandergovern(*samples, nan_policy='propagate'): + """Performs the Alexander Govern test. + + The Alexander-Govern approximation tests the equality of k independent + means in the face of heterogeneity of variance. The test is applied to + samples from two or more groups, possibly with differing sizes. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample measurements for each group. There must be at least + two samples. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + res : AlexanderGovernResult + An object with attributes: + + statistic : float + The computed A statistic of the test. + pvalue : float + The associated p-value from the chi-squared distribution. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The statistic is not defined + in this case, so ``np.nan`` is returned. + + See Also + -------- + f_oneway : one-way ANOVA + + Notes + ----- + The use of this test relies on several assumptions. + + 1. The samples are independent. + 2. Each sample is from a normally distributed population. + 3. Unlike `f_oneway`, this test does not assume on homoscedasticity, + instead relaxing the assumption of equal variances. + + Input samples must be finite, one dimensional, and with size greater than + one. + + References + ---------- + .. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler + Approximation for ANOVA under Variance Heterogeneity." Journal + of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101. + JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020. + + Examples + -------- + >>> from scipy.stats import alexandergovern + + Here are some data on annual percentage rate of interest charged on + new car loans at nine of the largest banks in four American cities + taken from the National Institute of Standards and Technology's + ANOVA dataset. + + We use `alexandergovern` to test the null hypothesis that all cities + have the same mean APR against the alternative that the cities do not + all have the same mean APR. We decide that a significance level of 5% + is required to reject the null hypothesis in favor of the alternative. + + >>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5] + >>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9] + >>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5] + >>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25, + ... 11.89] + >>> alexandergovern(atlanta, chicago, houston, memphis) + AlexanderGovernResult(statistic=4.65087071883494, + pvalue=0.19922132490385214) + + The p-value is 0.1992, indicating a nearly 20% chance of observing + such an extreme value of the test statistic under the null hypothesis. + This exceeds 5%, so we do not reject the null hypothesis in favor of + the alternative. + + """ + samples = _alexandergovern_input_validation(samples, nan_policy) + + if np.any([(sample == sample[0]).all() for sample in samples]): + msg = "An input array is constant; the statistic is not defined." + warnings.warn(stats.ConstantInputWarning(msg), stacklevel=2) + return AlexanderGovernResult(np.nan, np.nan) + + # The following formula numbers reference the equation described on + # page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other + # tests that serve as the basis for equation (8) but are not needed + # to perform the test. + + # precalculate mean and length of each sample + lengths = np.array([len(sample) for sample in samples]) + means = np.array([np.mean(sample) for sample in samples]) + + # (1) determine standard error of the mean for each sample + standard_errors = [np.std(sample, ddof=1) / np.sqrt(length) + for sample, length in zip(samples, lengths)] + + # (2) define a weight for each sample + inv_sq_se = 1 / np.square(standard_errors) + weights = inv_sq_se / np.sum(inv_sq_se) + + # (3) determine variance-weighted estimate of the common mean + var_w = np.sum(weights * means) + + # (4) determine one-sample t statistic for each group + t_stats = (means - var_w)/standard_errors + + # calculate parameters to be used in transformation + v = lengths - 1 + a = v - .5 + b = 48 * a**2 + c = (a * np.log(1 + (t_stats ** 2)/v))**.5 + + # (8) perform a normalizing transformation on t statistic + z = (c + ((c**3 + 3*c)/b) - + ((4*c**7 + 33*c**5 + 240*c**3 + 855*c) / + (b**2*10 + 8*b*c**4 + 1000*b))) + + # (9) calculate statistic + A = np.sum(np.square(z)) + + # "[the p value is determined from] central chi-square random deviates + # with k - 1 degrees of freedom". Alexander, Govern (94) + p = distributions.chi2.sf(A, len(samples) - 1) + return AlexanderGovernResult(A, p) + + +def _alexandergovern_input_validation(samples, nan_policy): + if len(samples) < 2: + raise TypeError(f"2 or more inputs required, got {len(samples)}") + + for sample in samples: + if np.size(sample) <= 1: + raise ValueError("Input sample size must be greater than one.") + if np.isinf(sample).any(): + raise ValueError("Input samples must be finite.") + + return samples + + +def _pearsonr_fisher_ci(r, n, confidence_level, alternative): + """ + Compute the confidence interval for Pearson's R. + + Fisher's transformation is used to compute the confidence interval + (https://en.wikipedia.org/wiki/Fisher_transformation). + """ + if r == 1: + zr = np.inf + elif r == -1: + zr = -np.inf + else: + zr = np.arctanh(r) + + if n > 3: + se = np.sqrt(1 / (n - 3)) + if alternative == "two-sided": + h = special.ndtri(0.5 + confidence_level/2) + zlo = zr - h*se + zhi = zr + h*se + rlo = np.tanh(zlo) + rhi = np.tanh(zhi) + elif alternative == "less": + h = special.ndtri(confidence_level) + zhi = zr + h*se + rhi = np.tanh(zhi) + rlo = -1.0 + else: + # alternative == "greater": + h = special.ndtri(confidence_level) + zlo = zr - h*se + rlo = np.tanh(zlo) + rhi = 1.0 + else: + rlo, rhi = -1.0, 1.0 + + return ConfidenceInterval(low=rlo, high=rhi) + + +def _pearsonr_bootstrap_ci(confidence_level, method, x, y, alternative): + """ + Compute the confidence interval for Pearson's R using the bootstrap. + """ + def statistic(x, y): + statistic, _ = pearsonr(x, y) + return statistic + + res = bootstrap((x, y), statistic, confidence_level=confidence_level, + paired=True, alternative=alternative, **method._asdict()) + # for one-sided confidence intervals, bootstrap gives +/- inf on one side + res.confidence_interval = np.clip(res.confidence_interval, -1, 1) + + return ConfidenceInterval(*res.confidence_interval) + + +ConfidenceInterval = namedtuple('ConfidenceInterval', ['low', 'high']) + +PearsonRResultBase = _make_tuple_bunch('PearsonRResultBase', + ['statistic', 'pvalue'], []) + + +class PearsonRResult(PearsonRResultBase): + """ + Result of `scipy.stats.pearsonr` + + Attributes + ---------- + statistic : float + Pearson product-moment correlation coefficient. + pvalue : float + The p-value associated with the chosen alternative. + + Methods + ------- + confidence_interval + Computes the confidence interval of the correlation + coefficient `statistic` for the given confidence level. + + """ + def __init__(self, statistic, pvalue, alternative, n, x, y): + super().__init__(statistic, pvalue) + self._alternative = alternative + self._n = n + self._x = x + self._y = y + + # add alias for consistency with other correlation functions + self.correlation = statistic + + def confidence_interval(self, confidence_level=0.95, method=None): + """ + The confidence interval for the correlation coefficient. + + Compute the confidence interval for the correlation coefficient + ``statistic`` with the given confidence level. + + If `method` is not provided, + The confidence interval is computed using the Fisher transformation + F(r) = arctanh(r) [1]_. When the sample pairs are drawn from a + bivariate normal distribution, F(r) approximately follows a normal + distribution with standard error ``1/sqrt(n - 3)``, where ``n`` is the + length of the original samples along the calculation axis. When + ``n <= 3``, this approximation does not yield a finite, real standard + error, so we define the confidence interval to be -1 to 1. + + If `method` is an instance of `BootstrapMethod`, the confidence + interval is computed using `scipy.stats.bootstrap` with the provided + configuration options and other appropriate settings. In some cases, + confidence limits may be NaN due to a degenerate resample, and this is + typical for very small samples (~6 observations). + + Parameters + ---------- + confidence_level : float + The confidence level for the calculation of the correlation + coefficient confidence interval. Default is 0.95. + + method : BootstrapMethod, optional + Defines the method used to compute the confidence interval. See + method description for details. + + .. versionadded:: 1.11.0 + + Returns + ------- + ci : namedtuple + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + References + ---------- + .. [1] "Pearson correlation coefficient", Wikipedia, + https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + """ + if isinstance(method, BootstrapMethod): + ci = _pearsonr_bootstrap_ci(confidence_level, method, + self._x, self._y, self._alternative) + elif method is None: + ci = _pearsonr_fisher_ci(self.statistic, self._n, confidence_level, + self._alternative) + else: + message = ('`method` must be an instance of `BootstrapMethod` ' + 'or None.') + raise ValueError(message) + return ci + +def pearsonr(x, y, *, alternative='two-sided', method=None): + r""" + Pearson correlation coefficient and p-value for testing non-correlation. + + The Pearson correlation coefficient [1]_ measures the linear relationship + between two datasets. Like other correlation + coefficients, this one varies between -1 and +1 with 0 implying no + correlation. Correlations of -1 or +1 imply an exact linear relationship. + Positive correlations imply that as x increases, so does y. Negative + correlations imply that as x increases, y decreases. + + This function also performs a test of the null hypothesis that the + distributions underlying the samples are uncorrelated and normally + distributed. (See Kowalski [3]_ + for a discussion of the effects of non-normality of the input on the + distribution of the correlation coefficient.) + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Pearson correlation at least as extreme + as the one computed from these datasets. + + Parameters + ---------- + x : (N,) array_like + Input array. + y : (N,) array_like + Input array. + alternative : {'two-sided', 'greater', 'less'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the correlation is nonzero + * 'less': the correlation is negative (less than zero) + * 'greater': the correlation is positive (greater than zero) + + .. versionadded:: 1.9.0 + method : ResamplingMethod, optional + Defines the method used to compute the p-value. If `method` is an + instance of `PermutationMethod`/`MonteCarloMethod`, the p-value is + computed using + `scipy.stats.permutation_test`/`scipy.stats.monte_carlo_test` with the + provided configuration options and other appropriate settings. + Otherwise, the p-value is computed as documented in the notes. + + .. versionadded:: 1.11.0 + + Returns + ------- + result : `~scipy.stats._result_classes.PearsonRResult` + An object with the following attributes: + + statistic : float + Pearson product-moment correlation coefficient. + pvalue : float + The p-value associated with the chosen alternative. + + The object has the following method: + + confidence_interval(confidence_level, method) + This computes the confidence interval of the correlation + coefficient `statistic` for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. If `method` is not provided, the + confidence interval is computed using the Fisher transformation + [1]_. If `method` is an instance of `BootstrapMethod`, the + confidence interval is computed using `scipy.stats.bootstrap` with + the provided configuration options and other appropriate settings. + In some cases, confidence limits may be NaN due to a degenerate + resample, and this is typical for very small samples (~6 + observations). + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The correlation coefficient + is not defined in this case, so ``np.nan`` is returned. + + `~scipy.stats.NearConstantInputWarning` + Raised if an input is "nearly" constant. The array ``x`` is considered + nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``. + Numerical errors in the calculation ``x - mean(x)`` in this case might + result in an inaccurate calculation of r. + + See Also + -------- + spearmanr : Spearman rank-order correlation coefficient. + kendalltau : Kendall's tau, a correlation measure for ordinal data. + + Notes + ----- + The correlation coefficient is calculated as follows: + + .. math:: + + r = \frac{\sum (x - m_x) (y - m_y)} + {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}} + + where :math:`m_x` is the mean of the vector x and :math:`m_y` is + the mean of the vector y. + + Under the assumption that x and y are drawn from + independent normal distributions (so the population correlation coefficient + is 0), the probability density function of the sample correlation + coefficient r is ([1]_, [2]_): + + .. math:: + f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)} + + where n is the number of samples, and B is the beta function. This + is sometimes referred to as the exact distribution of r. This is + the distribution that is used in `pearsonr` to compute the p-value when + the `method` parameter is left at its default value (None). + The distribution is a beta distribution on the interval [-1, 1], + with equal shape parameters a = b = n/2 - 1. In terms of SciPy's + implementation of the beta distribution, the distribution of r is:: + + dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2) + + The default p-value returned by `pearsonr` is a two-sided p-value. For a + given sample with correlation coefficient r, the p-value is + the probability that abs(r') of a random sample x' and y' drawn from + the population with zero correlation would be greater than or equal + to abs(r). In terms of the object ``dist`` shown above, the p-value + for a given r and length n can be computed as:: + + p = 2*dist.cdf(-abs(r)) + + When n is 2, the above continuous distribution is not well-defined. + One can interpret the limit of the beta distribution as the shape + parameters a and b approach a = b = 0 as a discrete distribution with + equal probability masses at r = 1 and r = -1. More directly, one + can observe that, given the data x = [x1, x2] and y = [y1, y2], and + assuming x1 != x2 and y1 != y2, the only possible values for r are 1 + and -1. Because abs(r') for any sample x' and y' with length 2 will + be 1, the two-sided p-value for a sample of length 2 is always 1. + + For backwards compatibility, the object that is returned also behaves + like a tuple of length two that holds the statistic and the p-value. + + References + ---------- + .. [1] "Pearson correlation coefficient", Wikipedia, + https://en.wikipedia.org/wiki/Pearson_correlation_coefficient + .. [2] Student, "Probable error of a correlation coefficient", + Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310. + .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution + of the Sample Product-Moment Correlation Coefficient" + Journal of the Royal Statistical Society. Series C (Applied + Statistics), Vol. 21, No. 1 (1972), pp. 1-12. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x, y = [1, 2, 3, 4, 5, 6, 7], [10, 9, 2.5, 6, 4, 3, 2] + >>> res = stats.pearsonr(x, y) + >>> res + PearsonRResult(statistic=-0.828503883588428, pvalue=0.021280260007523286) + + To perform an exact permutation version of the test: + + >>> rng = np.random.default_rng(7796654889291491997) + >>> method = stats.PermutationMethod(n_resamples=np.inf, random_state=rng) + >>> stats.pearsonr(x, y, method=method) + PearsonRResult(statistic=-0.828503883588428, pvalue=0.028174603174603175) + + To perform the test under the null hypothesis that the data were drawn from + *uniform* distributions: + + >>> method = stats.MonteCarloMethod(rvs=(rng.uniform, rng.uniform)) + >>> stats.pearsonr(x, y, method=method) + PearsonRResult(statistic=-0.828503883588428, pvalue=0.0188) + + To produce an asymptotic 90% confidence interval: + + >>> res.confidence_interval(confidence_level=0.9) + ConfidenceInterval(low=-0.9644331982722841, high=-0.3460237473272273) + + And for a bootstrap confidence interval: + + >>> method = stats.BootstrapMethod(method='BCa', random_state=rng) + >>> res.confidence_interval(confidence_level=0.9, method=method) + ConfidenceInterval(low=-0.9983163756488651, high=-0.22771001702132443) # may vary + + There is a linear dependence between x and y if y = a + b*x + e, where + a,b are constants and e is a random error term, assumed to be independent + of x. For simplicity, assume that x is standard normal, a=0, b=1 and let + e follow a normal distribution with mean zero and standard deviation s>0. + + >>> rng = np.random.default_rng() + >>> s = 0.5 + >>> x = stats.norm.rvs(size=500, random_state=rng) + >>> e = stats.norm.rvs(scale=s, size=500, random_state=rng) + >>> y = x + e + >>> stats.pearsonr(x, y).statistic + 0.9001942438244763 + + This should be close to the exact value given by + + >>> 1/np.sqrt(1 + s**2) + 0.8944271909999159 + + For s=0.5, we observe a high level of correlation. In general, a large + variance of the noise reduces the correlation, while the correlation + approaches one as the variance of the error goes to zero. + + It is important to keep in mind that no correlation does not imply + independence unless (x, y) is jointly normal. Correlation can even be zero + when there is a very simple dependence structure: if X follows a + standard normal distribution, let y = abs(x). Note that the correlation + between x and y is zero. Indeed, since the expectation of x is zero, + cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero + by symmetry. The following lines of code illustrate this observation: + + >>> y = np.abs(x) + >>> stats.pearsonr(x, y) + PearsonRResult(statistic=-0.05444919272687482, pvalue=0.22422294836207743) + + A non-zero correlation coefficient can be misleading. For example, if X has + a standard normal distribution, define y = x if x < 0 and y = 0 otherwise. + A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797..., + implying a high level of correlation: + + >>> y = np.where(x < 0, x, 0) + >>> stats.pearsonr(x, y) + PearsonRResult(statistic=0.861985781588, pvalue=4.813432002751103e-149) + + This is unintuitive since there is no dependence of x and y if x is larger + than zero which happens in about half of the cases if we sample x and y. + + """ + n = len(x) + if n != len(y): + raise ValueError('x and y must have the same length.') + + if n < 2: + raise ValueError('x and y must have length at least 2.') + + x = np.asarray(x) + y = np.asarray(y) + + if (np.issubdtype(x.dtype, np.complexfloating) + or np.issubdtype(y.dtype, np.complexfloating)): + raise ValueError('This function does not support complex data') + + # If an input is constant, the correlation coefficient is not defined. + if (x == x[0]).all() or (y == y[0]).all(): + msg = ("An input array is constant; the correlation coefficient " + "is not defined.") + warnings.warn(stats.ConstantInputWarning(msg), stacklevel=2) + result = PearsonRResult(statistic=np.nan, pvalue=np.nan, n=n, + alternative=alternative, x=x, y=y) + return result + + if isinstance(method, PermutationMethod): + def statistic(y): + statistic, _ = pearsonr(x, y, alternative=alternative) + return statistic + + res = permutation_test((y,), statistic, permutation_type='pairings', + alternative=alternative, **method._asdict()) + + return PearsonRResult(statistic=res.statistic, pvalue=res.pvalue, n=n, + alternative=alternative, x=x, y=y) + elif isinstance(method, MonteCarloMethod): + def statistic(x, y): + statistic, _ = pearsonr(x, y, alternative=alternative) + return statistic + + if method.rvs is None: + rng = np.random.default_rng() + method.rvs = rng.normal, rng.normal + + res = monte_carlo_test((x, y,), statistic=statistic, + alternative=alternative, **method._asdict()) + + return PearsonRResult(statistic=res.statistic, pvalue=res.pvalue, n=n, + alternative=alternative, x=x, y=y) + elif method is not None: + message = ('`method` must be an instance of `PermutationMethod`,' + '`MonteCarloMethod`, or None.') + raise ValueError(message) + + # dtype is the data type for the calculations. This expression ensures + # that the data type is at least 64 bit floating point. It might have + # more precision if the input is, for example, np.longdouble. + dtype = type(1.0 + x[0] + y[0]) + + if n == 2: + r = dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])) + result = PearsonRResult(statistic=r, pvalue=1.0, n=n, + alternative=alternative, x=x, y=y) + return result + + xmean = x.mean(dtype=dtype) + ymean = y.mean(dtype=dtype) + + # By using `astype(dtype)`, we ensure that the intermediate calculations + # use at least 64 bit floating point. + xm = x.astype(dtype) - xmean + ym = y.astype(dtype) - ymean + + # Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()), + # scipy.linalg.norm(xm) does not overflow if xm is, for example, + # [-5e210, 5e210, 3e200, -3e200] + normxm = linalg.norm(xm) + normym = linalg.norm(ym) + + threshold = 1e-13 + if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean): + # If all the values in x (likewise y) are very close to the mean, + # the loss of precision that occurs in the subtraction xm = x - xmean + # might result in large errors in r. + msg = ("An input array is nearly constant; the computed " + "correlation coefficient may be inaccurate.") + warnings.warn(stats.NearConstantInputWarning(msg), stacklevel=2) + + r = np.dot(xm/normxm, ym/normym) + + # Presumably, if abs(r) > 1, then it is only some small artifact of + # floating point arithmetic. + r = max(min(r, 1.0), -1.0) + + # As explained in the docstring, the distribution of `r` under the null + # hypothesis is the beta distribution on (-1, 1) with a = b = n/2 - 1. + ab = n/2 - 1 + dist = stats.beta(ab, ab, loc=-1, scale=2) + pvalue = _get_pvalue(r, dist, alternative) + + return PearsonRResult(statistic=r, pvalue=pvalue, n=n, + alternative=alternative, x=x, y=y) + + +def fisher_exact(table, alternative='two-sided'): + """Perform a Fisher exact test on a 2x2 contingency table. + + The null hypothesis is that the true odds ratio of the populations + underlying the observations is one, and the observations were sampled + from these populations under a condition: the marginals of the + resulting table must equal those of the observed table. The statistic + returned is the unconditional maximum likelihood estimate of the odds + ratio, and the p-value is the probability under the null hypothesis of + obtaining a table at least as extreme as the one that was actually + observed. There are other possible choices of statistic and two-sided + p-value definition associated with Fisher's exact test; please see the + Notes for more information. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements must be non-negative integers. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the odds ratio of the underlying population is not one + * 'less': the odds ratio of the underlying population is less than one + * 'greater': the odds ratio of the underlying population is greater + than one + + See the Notes for more details. + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + This is the prior odds ratio, not a posterior estimate. + pvalue : float + The probability under the null hypothesis of obtaining a + table at least as extreme as the one that was actually observed. + + See Also + -------- + chi2_contingency : Chi-square test of independence of variables in a + contingency table. This can be used as an alternative to + `fisher_exact` when the numbers in the table are large. + contingency.odds_ratio : Compute the odds ratio (sample or conditional + MLE) for a 2x2 contingency table. + barnard_exact : Barnard's exact test, which is a more powerful alternative + than Fisher's exact test for 2x2 contingency tables. + boschloo_exact : Boschloo's exact test, which is a more powerful + alternative than Fisher's exact test for 2x2 contingency tables. + + Notes + ----- + *Null hypothesis and p-values* + + The null hypothesis is that the true odds ratio of the populations + underlying the observations is one, and the observations were sampled at + random from these populations under a condition: the marginals of the + resulting table must equal those of the observed table. Equivalently, + the null hypothesis is that the input table is from the hypergeometric + distribution with parameters (as used in `hypergeom`) + ``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the + input table is ``[[a, b], [c, d]]``. This distribution has support + ``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values + in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x`` + can be interpreted as the upper-left element of a 2x2 table, so the + tables in the distribution have form:: + + [ x n - x ] + [N - x M - (n + N) + x] + + For example, if:: + + table = [6 2] + [1 4] + + then the support is ``2 <= x <= 7``, and the tables in the distribution + are:: + + [2 6] [3 5] [4 4] [5 3] [6 2] [7 1] + [5 0] [4 1] [3 2] [2 3] [1 4] [0 5] + + The probability of each table is given by the hypergeometric distribution + ``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to + three significant digits):: + + x 2 3 4 5 6 7 + p 0.0163 0.163 0.408 0.326 0.0816 0.00466 + + These can be computed with:: + + >>> import numpy as np + >>> from scipy.stats import hypergeom + >>> table = np.array([[6, 2], [1, 4]]) + >>> M = table.sum() + >>> n = table[0].sum() + >>> N = table[:, 0].sum() + >>> start, end = hypergeom.support(M, n, N) + >>> hypergeom.pmf(np.arange(start, end+1), M, n, N) + array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508, + 0.004662 ]) + + The two-sided p-value is the probability that, under the null hypothesis, + a random table would have a probability equal to or less than the + probability of the input table. For our example, the probability of + the input table (where ``x = 6``) is 0.0816. The x values where the + probability does not exceed this are 2, 6 and 7, so the two-sided p-value + is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``:: + + >>> from scipy.stats import fisher_exact + >>> res = fisher_exact(table, alternative='two-sided') + >>> res.pvalue + 0.10256410256410257 + + The one-sided p-value for ``alternative='greater'`` is the probability + that a random table has ``x >= a``, which in our example is ``x >= 6``, + or ``0.0816 + 0.00466 ~= 0.08626``:: + + >>> res = fisher_exact(table, alternative='greater') + >>> res.pvalue + 0.08624708624708627 + + This is equivalent to computing the survival function of the + distribution at ``x = 5`` (one less than ``x`` from the input table, + because we want to include the probability of ``x = 6`` in the sum):: + + >>> hypergeom.sf(5, M, n, N) + 0.08624708624708627 + + For ``alternative='less'``, the one-sided p-value is the probability + that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example), + or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``:: + + >>> res = fisher_exact(table, alternative='less') + >>> res.pvalue + 0.9953379953379957 + + This is equivalent to computing the cumulative distribution function + of the distribution at ``x = 6``: + + >>> hypergeom.cdf(6, M, n, N) + 0.9953379953379957 + + *Odds ratio* + + The calculated odds ratio is different from the value computed by the + R function ``fisher.test``. This implementation returns the "sample" + or "unconditional" maximum likelihood estimate, while ``fisher.test`` + in R uses the conditional maximum likelihood estimate. To compute the + conditional maximum likelihood estimate of the odds ratio, use + `scipy.stats.contingency.odds_ratio`. + + References + ---------- + .. [1] Fisher, Sir Ronald A, "The Design of Experiments: + Mathematics of a Lady Tasting Tea." ISBN 978-0-486-41151-4, 1935. + .. [2] "Fisher's exact test", + https://en.wikipedia.org/wiki/Fisher's_exact_test + .. [3] Emma V. Low et al. "Identifying the lowest effective dose of + acetazolamide for the prophylaxis of acute mountain sickness: + systematic review and meta-analysis." + BMJ, 345, :doi:`10.1136/bmj.e6779`, 2012. + + Examples + -------- + In [3]_, the effective dose of acetazolamide for the prophylaxis of acute + mountain sickness was investigated. The study notably concluded: + + Acetazolamide 250 mg, 500 mg, and 750 mg daily were all efficacious for + preventing acute mountain sickness. Acetazolamide 250 mg was the lowest + effective dose with available evidence for this indication. + + The following table summarizes the results of the experiment in which + some participants took a daily dose of acetazolamide 250 mg while others + took a placebo. + Cases of acute mountain sickness were recorded:: + + Acetazolamide Control/Placebo + Acute mountain sickness 7 17 + No 15 5 + + + Is there evidence that the acetazolamide 250 mg reduces the risk of + acute mountain sickness? + We begin by formulating a null hypothesis :math:`H_0`: + + The odds of experiencing acute mountain sickness are the same with + the acetazolamide treatment as they are with placebo. + + Let's assess the plausibility of this hypothesis with + Fisher's test. + + >>> from scipy.stats import fisher_exact + >>> res = fisher_exact([[7, 17], [15, 5]], alternative='less') + >>> res.statistic + 0.13725490196078433 + >>> res.pvalue + 0.0028841933752349743 + + Using a significance level of 5%, we would reject the null hypothesis in + favor of the alternative hypothesis: "The odds of experiencing acute + mountain sickness with acetazolamide treatment are less than the odds of + experiencing acute mountain sickness with placebo." + + .. note:: + + Because the null distribution of Fisher's exact test is formed under + the assumption that both row and column sums are fixed, the result of + the test are conservative when applied to an experiment in which the + row sums are not fixed. + + In this case, the column sums are fixed; there are 22 subjects in each + group. But the number of cases of acute mountain sickness is not + (and cannot be) fixed before conducting the experiment. It is a + consequence. + + Boschloo's test does not depend on the assumption that the row sums + are fixed, and consequently, it provides a more powerful test in this + situation. + + >>> from scipy.stats import boschloo_exact + >>> res = boschloo_exact([[7, 17], [15, 5]], alternative='less') + >>> res.statistic + 0.0028841933752349743 + >>> res.pvalue + 0.0015141406667567101 + + We verify that the p-value is less than with `fisher_exact`. + + """ + hypergeom = distributions.hypergeom + # int32 is not enough for the algorithm + c = np.asarray(table, dtype=np.int64) + if not c.shape == (2, 2): + raise ValueError("The input `table` must be of shape (2, 2).") + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1 and + # the odds ratio is NaN. + return SignificanceResult(np.nan, 1.0) + + if c[1, 0] > 0 and c[0, 1] > 0: + oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1]) + else: + oddsratio = np.inf + + n1 = c[0, 0] + c[0, 1] + n2 = c[1, 0] + c[1, 1] + n = c[0, 0] + c[1, 0] + + def pmf(x): + return hypergeom.pmf(x, n1 + n2, n1, n) + + if alternative == 'less': + pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) + elif alternative == 'greater': + # Same formula as the 'less' case, but with the second column. + pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1]) + elif alternative == 'two-sided': + mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2)) + pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n) + pmode = hypergeom.pmf(mode, n1 + n2, n1, n) + + epsilon = 1e-14 + gamma = 1 + epsilon + + if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= epsilon: + return SignificanceResult(oddsratio, 1.) + + elif c[0, 0] < mode: + plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) + if hypergeom.pmf(n, n1 + n2, n1, n) > pexact * gamma: + return SignificanceResult(oddsratio, plower) + + guess = _binary_search(lambda x: -pmf(x), -pexact * gamma, mode, n) + pvalue = plower + hypergeom.sf(guess, n1 + n2, n1, n) + else: + pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n) + if hypergeom.pmf(0, n1 + n2, n1, n) > pexact * gamma: + return SignificanceResult(oddsratio, pupper) + + guess = _binary_search(pmf, pexact * gamma, 0, mode) + pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n) + else: + msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}" + raise ValueError(msg) + + pvalue = min(pvalue, 1.0) + + return SignificanceResult(oddsratio, pvalue) + + +def spearmanr(a, b=None, axis=0, nan_policy='propagate', + alternative='two-sided'): + r"""Calculate a Spearman correlation coefficient with associated p-value. + + The Spearman rank-order correlation coefficient is a nonparametric measure + of the monotonicity of the relationship between two datasets. + Like other correlation coefficients, + this one varies between -1 and +1 with 0 implying no correlation. + Correlations of -1 or +1 imply an exact monotonic relationship. Positive + correlations imply that as x increases, so does y. Negative correlations + imply that as x increases, y decreases. + + The p-value roughly indicates the probability of an uncorrelated system + producing datasets that have a Spearman correlation at least as extreme + as the one computed from these datasets. Although calculation of the + p-value does not make strong assumptions about the distributions underlying + the samples, it is only accurate for very large samples (>500 + observations). For smaller sample sizes, consider a permutation test (see + Examples section below). + + Parameters + ---------- + a, b : 1D or 2D array_like, b is optional + One or two 1-D or 2-D arrays containing multiple variables and + observations. When these are 1-D, each represents a vector of + observations of a single variable. For the behavior in the 2-D case, + see under ``axis``, below. + Both arrays need to have the same length in the ``axis`` dimension. + axis : int or None, optional + If axis=0 (default), then each column represents a variable, with + observations in the rows. If axis=1, the relationship is transposed: + each row represents a variable, while the columns contain observations. + If axis=None, then both arrays will be raveled. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the correlation is nonzero + * 'less': the correlation is negative (less than zero) + * 'greater': the correlation is positive (greater than zero) + + .. versionadded:: 1.7.0 + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float or ndarray (2-D square) + Spearman correlation matrix or correlation coefficient (if only 2 + variables are given as parameters). Correlation matrix is square + with length equal to total number of variables (columns or rows) in + ``a`` and ``b`` combined. + pvalue : float + The p-value for a hypothesis test whose null hypothesis + is that two samples have no ordinal correlation. See + `alternative` above for alternative hypotheses. `pvalue` has the + same shape as `statistic`. + + Warns + ----- + `~scipy.stats.ConstantInputWarning` + Raised if an input is a constant array. The correlation coefficient + is not defined in this case, so ``np.nan`` is returned. + + References + ---------- + .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. + Section 14.7 + .. [2] Kendall, M. G. and Stuart, A. (1973). + The Advanced Theory of Statistics, Volume 2: Inference and Relationship. + Griffin. 1973. + Section 31.18 + .. [3] Kershenobich, D., Fierro, F. J., & Rojkind, M. (1970). The + relationship between the free pool of proline and collagen content in + human liver cirrhosis. The Journal of Clinical Investigation, 49(12), + 2246-2249. + .. [4] Hollander, M., Wolfe, D. A., & Chicken, E. (2013). Nonparametric + statistical methods. John Wiley & Sons. + .. [5] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + .. [6] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + Consider the following data from [3]_, which studied the relationship + between free proline (an amino acid) and total collagen (a protein often + found in connective tissue) in unhealthy human livers. + + The ``x`` and ``y`` arrays below record measurements of the two compounds. + The observations are paired: each free proline measurement was taken from + the same liver as the total collagen measurement at the same index. + + >>> import numpy as np + >>> # total collagen (mg/g dry weight of liver) + >>> x = np.array([7.1, 7.1, 7.2, 8.3, 9.4, 10.5, 11.4]) + >>> # free proline (μ mole/g dry weight of liver) + >>> y = np.array([2.8, 2.9, 2.8, 2.6, 3.5, 4.6, 5.0]) + + These data were analyzed in [4]_ using Spearman's correlation coefficient, + a statistic sensitive to monotonic correlation between the samples. + + >>> from scipy import stats + >>> res = stats.spearmanr(x, y) + >>> res.statistic + 0.7000000000000001 + + The value of this statistic tends to be high (close to 1) for samples with + a strongly positive ordinal correlation, low (close to -1) for samples with + a strongly negative ordinal correlation, and small in magnitude (close to + zero) for samples with weak ordinal correlation. + + The test is performed by comparing the observed value of the + statistic against the null distribution: the distribution of statistic + values derived under the null hypothesis that total collagen and free + proline measurements are independent. + + For this test, the statistic can be transformed such that the null + distribution for large samples is Student's t distribution with + ``len(x) - 2`` degrees of freedom. + + >>> import matplotlib.pyplot as plt + >>> dof = len(x)-2 # len(x) == len(y) + >>> dist = stats.t(df=dof) + >>> t_vals = np.linspace(-5, 5, 100) + >>> pdf = dist.pdf(t_vals) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(t_vals, pdf) + ... ax.set_title("Spearman's Rho Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution as extreme or more extreme than the observed + value of the statistic. In a two-sided test in which the statistic is + positive, elements of the null distribution greater than the transformed + statistic and elements of the null distribution less than the negative of + the observed statistic are both considered "more extreme". + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> rs = res.statistic # original statistic + >>> transformed = rs * np.sqrt(dof / ((rs+1.0)*(1.0-rs))) + >>> pvalue = dist.cdf(-transformed) + dist.sf(transformed) + >>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (2.7, 0.025), (3, 0.03), arrowprops=props) + >>> i = t_vals >= transformed + >>> ax.fill_between(t_vals[i], y1=0, y2=pdf[i], color='C0') + >>> i = t_vals <= -transformed + >>> ax.fill_between(t_vals[i], y1=0, y2=pdf[i], color='C0') + >>> ax.set_xlim(-5, 5) + >>> ax.set_ylim(0, 0.1) + >>> plt.show() + >>> res.pvalue + 0.07991669030889909 # two-sided p-value + + If the p-value is "small" - that is, if there is a low probability of + sampling data from independent distributions that produces such an extreme + value of the statistic - this may be taken as evidence against the null + hypothesis in favor of the alternative: the distribution of total collagen + and free proline are *not* independent. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [5]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Suppose that before performing the experiment, the authors had reason + to predict a positive correlation between the total collagen and free + proline measurements, and that they had chosen to assess the plausibility + of the null hypothesis against a one-sided alternative: free proline has a + positive ordinal correlation with total collagen. In this case, only those + values in the null distribution that are as great or greater than the + observed statistic are considered to be more extreme. + + >>> res = stats.spearmanr(x, y, alternative='greater') + >>> res.statistic + 0.7000000000000001 # same statistic + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(transformed) + >>> annotation = (f'p-value={pvalue:.6f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (3, 0.018), (3.5, 0.03), arrowprops=props) + >>> i = t_vals >= transformed + >>> ax.fill_between(t_vals[i], y1=0, y2=pdf[i], color='C0') + >>> ax.set_xlim(1, 5) + >>> ax.set_ylim(0, 0.1) + >>> plt.show() + >>> res.pvalue + 0.03995834515444954 # one-sided p-value; half of the two-sided p-value + + Note that the t-distribution provides an asymptotic approximation of the + null distribution; it is only accurate for samples with many observations. + For small samples, it may be more appropriate to perform a permutation + test: Under the null hypothesis that total collagen and free proline are + independent, each of the free proline measurements were equally likely to + have been observed with any of the total collagen measurements. Therefore, + we can form an *exact* null distribution by calculating the statistic under + each possible pairing of elements between ``x`` and ``y``. + + >>> def statistic(x): # explore all possible pairings by permuting `x` + ... rs = stats.spearmanr(x, y).statistic # ignore pvalue + ... transformed = rs * np.sqrt(dof / ((rs+1.0)*(1.0-rs))) + ... return transformed + >>> ref = stats.permutation_test((x,), statistic, alternative='greater', + ... permutation_type='pairings') + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> ax.hist(ref.null_distribution, np.linspace(-5, 5, 26), + ... density=True) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... f'exact \n({len(ref.null_distribution)} permutations)']) + >>> plt.show() + >>> ref.pvalue + 0.04563492063492063 # exact one-sided p-value + + """ + if axis is not None and axis > 1: + raise ValueError("spearmanr only handles 1-D or 2-D arrays, " + f"supplied axis argument {axis}, please use only " + "values 0, 1 or None for axis") + + a, axisout = _chk_asarray(a, axis) + if a.ndim > 2: + raise ValueError("spearmanr only handles 1-D or 2-D arrays") + + if b is None: + if a.ndim < 2: + raise ValueError("`spearmanr` needs at least 2 " + "variables to compare") + else: + # Concatenate a and b, so that we now only have to handle the case + # of a 2-D `a`. + b, _ = _chk_asarray(b, axis) + if axisout == 0: + a = np.column_stack((a, b)) + else: + a = np.vstack((a, b)) + + n_vars = a.shape[1 - axisout] + n_obs = a.shape[axisout] + if n_obs <= 1: + # Handle empty arrays or single observations. + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + warn_msg = ("An input array is constant; the correlation coefficient " + "is not defined.") + if axisout == 0: + if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all(): + # If an input is constant, the correlation coefficient + # is not defined. + warnings.warn(stats.ConstantInputWarning(warn_msg), stacklevel=2) + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + else: # case when axisout == 1 b/c a is 2 dim only + if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all(): + # If an input is constant, the correlation coefficient + # is not defined. + warnings.warn(stats.ConstantInputWarning(warn_msg), stacklevel=2) + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + a_contains_nan, nan_policy = _contains_nan(a, nan_policy) + variable_has_nan = np.zeros(n_vars, dtype=bool) + if a_contains_nan: + if nan_policy == 'omit': + return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy, + alternative=alternative) + elif nan_policy == 'propagate': + if a.ndim == 1 or n_vars <= 2: + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + else: + # Keep track of variables with NaNs, set the outputs to NaN + # only for those variables + variable_has_nan = np.isnan(a).any(axis=axisout) + + a_ranked = np.apply_along_axis(rankdata, axisout, a) + rs = np.corrcoef(a_ranked, rowvar=axisout) + dof = n_obs - 2 # degrees of freedom + + # rs can have elements equal to 1, so avoid zero division warnings + with np.errstate(divide='ignore'): + # clip the small negative values possibly caused by rounding + # errors before taking the square root + t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0)) + + prob = _get_pvalue(t, distributions.t(dof), alternative) + + # For backwards compatibility, return scalars when comparing 2 columns + if rs.shape == (2, 2): + res = SignificanceResult(rs[1, 0], prob[1, 0]) + res.correlation = rs[1, 0] + return res + else: + rs[variable_has_nan, :] = np.nan + rs[:, variable_has_nan] = np.nan + res = SignificanceResult(rs[()], prob[()]) + res.correlation = rs + return res + + +def pointbiserialr(x, y): + r"""Calculate a point biserial correlation coefficient and its p-value. + + The point biserial correlation is used to measure the relationship + between a binary variable, x, and a continuous variable, y. Like other + correlation coefficients, this one varies between -1 and +1 with 0 + implying no correlation. Correlations of -1 or +1 imply a determinative + relationship. + + This function may be computed using a shortcut formula but produces the + same result as `pearsonr`. + + Parameters + ---------- + x : array_like of bools + Input array. + y : array_like + Input array. + + Returns + ------- + res: SignificanceResult + An object containing attributes: + + statistic : float + The R value. + pvalue : float + The two-sided p-value. + + Notes + ----- + `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom. + It is equivalent to `pearsonr`. + + The value of the point-biserial correlation can be calculated from: + + .. math:: + + r_{pb} = \frac{\overline{Y_1} - \overline{Y_0}} + {s_y} + \sqrt{\frac{N_0 N_1} + {N (N - 1)}} + + Where :math:`\overline{Y_{0}}` and :math:`\overline{Y_{1}}` are means + of the metric observations coded 0 and 1 respectively; :math:`N_{0}` and + :math:`N_{1}` are number of observations coded 0 and 1 respectively; + :math:`N` is the total number of observations and :math:`s_{y}` is the + standard deviation of all the metric observations. + + A value of :math:`r_{pb}` that is significantly different from zero is + completely equivalent to a significant difference in means between the two + groups. Thus, an independent groups t Test with :math:`N-2` degrees of + freedom may be used to test whether :math:`r_{pb}` is nonzero. The + relation between the t-statistic for comparing two independent groups and + :math:`r_{pb}` is given by: + + .. math:: + + t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}} + + References + ---------- + .. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math. + Statist., Vol. 20, no.1, pp. 125-126, 1949. + + .. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous + Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25, + np. 3, pp. 603-607, 1954. + + .. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef: + Statistics Reference Online (eds N. Balakrishnan, et al.), 2014. + :doi:`10.1002/9781118445112.stat06227` + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) + >>> b = np.arange(7) + >>> stats.pointbiserialr(a, b) + (0.8660254037844386, 0.011724811003954652) + >>> stats.pearsonr(a, b) + (0.86602540378443871, 0.011724811003954626) + >>> np.corrcoef(a, b) + array([[ 1. , 0.8660254], + [ 0.8660254, 1. ]]) + + """ + rpb, prob = pearsonr(x, y) + # create result object with alias for backward compatibility + res = SignificanceResult(rpb, prob) + res.correlation = rpb + return res + + +@_deprecate_positional_args(version="1.14") +def kendalltau(x, y, *, initial_lexsort=_NoValue, nan_policy='propagate', + method='auto', variant='b', alternative='two-sided'): + r"""Calculate Kendall's tau, a correlation measure for ordinal data. + + Kendall's tau is a measure of the correspondence between two rankings. + Values close to 1 indicate strong agreement, and values close to -1 + indicate strong disagreement. This implements two variants of Kendall's + tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These + differ only in how they are normalized to lie within the range -1 to 1; + the hypothesis tests (their p-values) are identical. Kendall's original + tau-a is not implemented separately because both tau-b and tau-c reduce + to tau-a in the absence of ties. + + Parameters + ---------- + x, y : array_like + Arrays of rankings, of the same shape. If arrays are not 1-D, they + will be flattened to 1-D. + initial_lexsort : bool, optional, deprecated + This argument is unused. + + .. deprecated:: 1.10.0 + `kendalltau` keyword argument `initial_lexsort` is deprecated as it + is unused and will be removed in SciPy 1.14.0. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + method : {'auto', 'asymptotic', 'exact'}, optional + Defines which method is used to calculate the p-value [5]_. + The following options are available (default is 'auto'): + + * 'auto': selects the appropriate method based on a trade-off + between speed and accuracy + * 'asymptotic': uses a normal approximation valid for large samples + * 'exact': computes the exact p-value, but can only be used if no ties + are present. As the sample size increases, the 'exact' computation + time may grow and the result may lose some precision. + variant : {'b', 'c'}, optional + Defines which variant of Kendall's tau is returned. Default is 'b'. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the rank correlation is nonzero + * 'less': the rank correlation is negative (less than zero) + * 'greater': the rank correlation is positive (greater than zero) + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + The tau statistic. + pvalue : float + The p-value for a hypothesis test whose null hypothesis is + an absence of association, tau = 0. + + See Also + -------- + spearmanr : Calculates a Spearman rank-order correlation coefficient. + theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). + weightedtau : Computes a weighted version of Kendall's tau. + + Notes + ----- + The definition of Kendall's tau that is used is [2]_:: + + tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U)) + + tau_c = 2 (P - Q) / (n**2 * (m - 1) / m) + + where P is the number of concordant pairs, Q the number of discordant + pairs, T the number of ties only in `x`, and U the number of ties only in + `y`. If a tie occurs for the same pair in both `x` and `y`, it is not + added to either T or U. n is the total number of samples, and m is the + number of unique values in either `x` or `y`, whichever is smaller. + + References + ---------- + .. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika + Vol. 30, No. 1/2, pp. 81-93, 1938. + .. [2] Maurice G. Kendall, "The treatment of ties in ranking problems", + Biometrika Vol. 33, No. 3, pp. 239-251. 1945. + .. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John + Wiley & Sons, 1967. + .. [4] Peter M. Fenwick, "A new data structure for cumulative frequency + tables", Software: Practice and Experience, Vol. 24, No. 3, + pp. 327-336, 1994. + .. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), + Charles Griffin & Co., 1970. + .. [6] Kershenobich, D., Fierro, F. J., & Rojkind, M. (1970). The + relationship between the free pool of proline and collagen content + in human liver cirrhosis. The Journal of Clinical Investigation, + 49(12), 2246-2249. + .. [7] Hollander, M., Wolfe, D. A., & Chicken, E. (2013). Nonparametric + statistical methods. John Wiley & Sons. + .. [8] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + + Examples + -------- + Consider the following data from [6]_, which studied the relationship + between free proline (an amino acid) and total collagen (a protein often + found in connective tissue) in unhealthy human livers. + + The ``x`` and ``y`` arrays below record measurements of the two compounds. + The observations are paired: each free proline measurement was taken from + the same liver as the total collagen measurement at the same index. + + >>> import numpy as np + >>> # total collagen (mg/g dry weight of liver) + >>> x = np.array([7.1, 7.1, 7.2, 8.3, 9.4, 10.5, 11.4]) + >>> # free proline (μ mole/g dry weight of liver) + >>> y = np.array([2.8, 2.9, 2.8, 2.6, 3.5, 4.6, 5.0]) + + These data were analyzed in [7]_ using Spearman's correlation coefficient, + a statistic similar to to Kendall's tau in that it is also sensitive to + ordinal correlation between the samples. Let's perform an analogous study + using Kendall's tau. + + >>> from scipy import stats + >>> res = stats.kendalltau(x, y) + >>> res.statistic + 0.5499999999999999 + + The value of this statistic tends to be high (close to 1) for samples with + a strongly positive ordinal correlation, low (close to -1) for samples with + a strongly negative ordinal correlation, and small in magnitude (close to + zero) for samples with weak ordinal correlation. + + The test is performed by comparing the observed value of the + statistic against the null distribution: the distribution of statistic + values derived under the null hypothesis that total collagen and free + proline measurements are independent. + + For this test, the null distribution for large samples without ties is + approximated as the normal distribution with variance + ``(2*(2*n + 5))/(9*n*(n - 1))``, where ``n = len(x)``. + + >>> import matplotlib.pyplot as plt + >>> n = len(x) # len(x) == len(y) + >>> var = (2*(2*n + 5))/(9*n*(n - 1)) + >>> dist = stats.norm(scale=np.sqrt(var)) + >>> z_vals = np.linspace(-1.25, 1.25, 100) + >>> pdf = dist.pdf(z_vals) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(z_vals, pdf) + ... ax.set_title("Kendall Tau Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution as extreme or more extreme than the observed + value of the statistic. In a two-sided test in which the statistic is + positive, elements of the null distribution greater than the transformed + statistic and elements of the null distribution less than the negative of + the observed statistic are both considered "more extreme". + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.cdf(-res.statistic) + dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (0.65, 0.15), (0.8, 0.3), arrowprops=props) + >>> i = z_vals >= res.statistic + >>> ax.fill_between(z_vals[i], y1=0, y2=pdf[i], color='C0') + >>> i = z_vals <= -res.statistic + >>> ax.fill_between(z_vals[i], y1=0, y2=pdf[i], color='C0') + >>> ax.set_xlim(-1.25, 1.25) + >>> ax.set_ylim(0, 0.5) + >>> plt.show() + >>> res.pvalue + 0.09108705741631495 # approximate p-value + + Note that there is slight disagreement between the shaded area of the curve + and the p-value returned by `kendalltau`. This is because our data has + ties, and we have neglected a tie correction to the null distribution + variance that `kendalltau` performs. For samples without ties, the shaded + areas of our plot and p-value returned by `kendalltau` would match exactly. + + If the p-value is "small" - that is, if there is a low probability of + sampling data from independent distributions that produces such an extreme + value of the statistic - this may be taken as evidence against the null + hypothesis in favor of the alternative: the distribution of total collagen + and free proline are *not* independent. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [8]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + For samples without ties of moderate size, `kendalltau` can compute the + p-value exactly. However, in the presence of ties, `kendalltau` resorts + to an asymptotic approximation. Nonetheles, we can use a permutation test + to compute the null distribution exactly: Under the null hypothesis that + total collagen and free proline are independent, each of the free proline + measurements were equally likely to have been observed with any of the + total collagen measurements. Therefore, we can form an *exact* null + distribution by calculating the statistic under each possible pairing of + elements between ``x`` and ``y``. + + >>> def statistic(x): # explore all possible pairings by permuting `x` + ... return stats.kendalltau(x, y).statistic # ignore pvalue + >>> ref = stats.permutation_test((x,), statistic, + ... permutation_type='pairings') + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(-1.25, 1.25, 25) + >>> ax.hist(ref.null_distribution, bins=bins, density=True) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'exact null distribution']) + >>> plot(ax) + >>> plt.show() + >>> ref.pvalue + 0.12222222222222222 # exact p-value + + Note that there is significant disagreement between the exact p-value + calculated here and the approximation returned by `kendalltau` above. For + small samples with ties, consider performing a permutation test for more + accurate results. + + """ + if initial_lexsort is not _NoValue: + msg = ("'kendalltau' keyword argument 'initial_lexsort' is deprecated" + " as it is unused and will be removed in SciPy 1.12.0.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + if x.size != y.size: + raise ValueError("All inputs to `kendalltau` must be of the same " + f"size, found x-size {x.size} and y-size {y.size}") + elif not x.size or not y.size: + # Return NaN if arrays are empty + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + # check both x and y + cnx, npx = _contains_nan(x, nan_policy) + cny, npy = _contains_nan(y, nan_policy) + contains_nan = cnx or cny + if npx == 'omit' or npy == 'omit': + nan_policy = 'omit' + + if contains_nan and nan_policy == 'propagate': + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + elif contains_nan and nan_policy == 'omit': + x = ma.masked_invalid(x) + y = ma.masked_invalid(y) + if variant == 'b': + return mstats_basic.kendalltau(x, y, method=method, use_ties=True, + alternative=alternative) + else: + message = ("nan_policy='omit' is currently compatible only with " + "variant='b'.") + raise ValueError(message) + + def count_rank_tie(ranks): + cnt = np.bincount(ranks).astype('int64', copy=False) + cnt = cnt[cnt > 1] + # Python ints to avoid overflow down the line + return (int((cnt * (cnt - 1) // 2).sum()), + int((cnt * (cnt - 1.) * (cnt - 2)).sum()), + int((cnt * (cnt - 1.) * (2*cnt + 5)).sum())) + + size = x.size + perm = np.argsort(y) # sort on y and convert y to dense ranks + x, y = x[perm], y[perm] + y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp) + + # stable sort on x and convert x to dense ranks + perm = np.argsort(x, kind='mergesort') + x, y = x[perm], y[perm] + x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp) + + dis = _kendall_dis(x, y) # discordant pairs + + obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True] + cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False) + + ntie = int((cnt * (cnt - 1) // 2).sum()) # joint ties + xtie, x0, x1 = count_rank_tie(x) # ties in x, stats + ytie, y0, y1 = count_rank_tie(y) # ties in y, stats + + tot = (size * (size - 1)) // 2 + + if xtie == tot or ytie == tot: + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie + # = con + dis + xtie + ytie - ntie + con_minus_dis = tot - xtie - ytie + ntie - 2 * dis + if variant == 'b': + tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie) + elif variant == 'c': + minclasses = min(len(set(x)), len(set(y))) + tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses) + else: + raise ValueError(f"Unknown variant of the method chosen: {variant}. " + "variant must be 'b' or 'c'.") + + # Limit range to fix computational errors + tau = np.minimum(1., max(-1., tau)) + + # The p-value calculation is the same for all variants since the p-value + # depends only on con_minus_dis. + if method == 'exact' and (xtie != 0 or ytie != 0): + raise ValueError("Ties found, exact method cannot be used.") + + if method == 'auto': + if (xtie == 0 and ytie == 0) and (size <= 33 or + min(dis, tot-dis) <= 1): + method = 'exact' + else: + method = 'asymptotic' + + if xtie == 0 and ytie == 0 and method == 'exact': + pvalue = mstats_basic._kendall_p_exact(size, tot-dis, alternative) + elif method == 'asymptotic': + # con_minus_dis is approx normally distributed with this variance [3]_ + m = size * (size - 1.) + var = ((m * (2*size + 5) - x1 - y1) / 18 + + (2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2))) + z = con_minus_dis / np.sqrt(var) + pvalue = _get_pvalue(z, distributions.norm, alternative) + else: + raise ValueError(f"Unknown method {method} specified. Use 'auto', " + "'exact' or 'asymptotic'.") + + # create result object with alias for backward compatibility + res = SignificanceResult(tau[()], pvalue[()]) + res.correlation = tau[()] + return res + + +def weightedtau(x, y, rank=True, weigher=None, additive=True): + r"""Compute a weighted version of Kendall's :math:`\tau`. + + The weighted :math:`\tau` is a weighted version of Kendall's + :math:`\tau` in which exchanges of high weight are more influential than + exchanges of low weight. The default parameters compute the additive + hyperbolic version of the index, :math:`\tau_\mathrm h`, which has + been shown to provide the best balance between important and + unimportant elements [1]_. + + The weighting is defined by means of a rank array, which assigns a + nonnegative rank to each element (higher importance ranks being + associated with smaller values, e.g., 0 is the highest possible rank), + and a weigher function, which assigns a weight based on the rank to + each element. The weight of an exchange is then the sum or the product + of the weights of the ranks of the exchanged elements. The default + parameters compute :math:`\tau_\mathrm h`: an exchange between + elements with rank :math:`r` and :math:`s` (starting from zero) has + weight :math:`1/(r+1) + 1/(s+1)`. + + Specifying a rank array is meaningful only if you have in mind an + external criterion of importance. If, as it usually happens, you do + not have in mind a specific rank, the weighted :math:`\tau` is + defined by averaging the values obtained using the decreasing + lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the + behavior with default parameters. Note that the convention used + here for ranking (lower values imply higher importance) is opposite + to that used by other SciPy statistical functions. + + Parameters + ---------- + x, y : array_like + Arrays of scores, of the same shape. If arrays are not 1-D, they will + be flattened to 1-D. + rank : array_like of ints or bool, optional + A nonnegative rank assigned to each element. If it is None, the + decreasing lexicographical rank by (`x`, `y`) will be used: elements of + higher rank will be those with larger `x`-values, using `y`-values to + break ties (in particular, swapping `x` and `y` will give a different + result). If it is False, the element indices will be used + directly as ranks. The default is True, in which case this + function returns the average of the values obtained using the + decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). + weigher : callable, optional + The weigher function. Must map nonnegative integers (zero + representing the most important element) to a nonnegative weight. + The default, None, provides hyperbolic weighing, that is, + rank :math:`r` is mapped to weight :math:`1/(r+1)`. + additive : bool, optional + If True, the weight of an exchange is computed by adding the + weights of the ranks of the exchanged elements; otherwise, the weights + are multiplied. The default is True. + + Returns + ------- + res: SignificanceResult + An object containing attributes: + + statistic : float + The weighted :math:`\tau` correlation index. + pvalue : float + Presently ``np.nan``, as the null distribution of the statistic is + unknown (even in the additive hyperbolic case). + + See Also + -------- + kendalltau : Calculates Kendall's tau. + spearmanr : Calculates a Spearman rank-order correlation coefficient. + theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). + + Notes + ----- + This function uses an :math:`O(n \log n)`, mergesort-based algorithm + [1]_ that is a weighted extension of Knight's algorithm for Kendall's + :math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_ + between rankings without ties (i.e., permutations) by setting + `additive` and `rank` to False, as the definition given in [1]_ is a + generalization of Shieh's. + + NaNs are considered the smallest possible score. + + .. versionadded:: 0.19.0 + + References + ---------- + .. [1] Sebastiano Vigna, "A weighted correlation index for rankings with + ties", Proceedings of the 24th international conference on World + Wide Web, pp. 1166-1176, ACM, 2015. + .. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with + Ungrouped Data", Journal of the American Statistical Association, + Vol. 61, No. 314, Part 1, pp. 436-439, 1966. + .. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics & + Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> res = stats.weightedtau(x, y) + >>> res.statistic + -0.56694968153682723 + >>> res.pvalue + nan + >>> res = stats.weightedtau(x, y, additive=False) + >>> res.statistic + -0.62205716951801038 + + NaNs are considered the smallest possible score: + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, np.nan] + >>> res = stats.weightedtau(x, y) + >>> res.statistic + -0.56694968153682723 + + This is exactly Kendall's tau: + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> res = stats.weightedtau(x, y, weigher=lambda x: 1) + >>> res.statistic + -0.47140452079103173 + + >>> x = [12, 2, 1, 12, 2] + >>> y = [1, 4, 7, 1, 0] + >>> stats.weightedtau(x, y, rank=None) + SignificanceResult(statistic=-0.4157652301037516, pvalue=nan) + >>> stats.weightedtau(y, x, rank=None) + SignificanceResult(statistic=-0.7181341329699028, pvalue=nan) + + """ + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + if x.size != y.size: + raise ValueError("All inputs to `weightedtau` must be " + "of the same size, " + f"found x-size {x.size} and y-size {y.size}") + if not x.size: + # Return NaN if arrays are empty + res = SignificanceResult(np.nan, np.nan) + res.correlation = np.nan + return res + + # If there are NaNs we apply _toint64() + if np.isnan(np.sum(x)): + x = _toint64(x) + if np.isnan(np.sum(y)): + y = _toint64(y) + + # Reduce to ranks unsupported types + if x.dtype != y.dtype: + if x.dtype != np.int64: + x = _toint64(x) + if y.dtype != np.int64: + y = _toint64(y) + else: + if x.dtype not in (np.int32, np.int64, np.float32, np.float64): + x = _toint64(x) + y = _toint64(y) + + if rank is True: + tau = ( + _weightedrankedtau(x, y, None, weigher, additive) + + _weightedrankedtau(y, x, None, weigher, additive) + ) / 2 + res = SignificanceResult(tau, np.nan) + res.correlation = tau + return res + + if rank is False: + rank = np.arange(x.size, dtype=np.intp) + elif rank is not None: + rank = np.asarray(rank).ravel() + if rank.size != x.size: + raise ValueError( + "All inputs to `weightedtau` must be of the same size, " + f"found x-size {x.size} and rank-size {rank.size}" + ) + + tau = _weightedrankedtau(x, y, rank, weigher, additive) + res = SignificanceResult(tau, np.nan) + res.correlation = tau + return res + + +# FROM MGCPY: https://github.com/neurodata/mgcpy + + +class _ParallelP: + """Helper function to calculate parallel p-value.""" + + def __init__(self, x, y, random_states): + self.x = x + self.y = y + self.random_states = random_states + + def __call__(self, index): + order = self.random_states[index].permutation(self.y.shape[0]) + permy = self.y[order][:, order] + + # calculate permuted stats, store in null distribution + perm_stat = _mgc_stat(self.x, permy)[0] + + return perm_stat + + +def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None): + r"""Helper function that calculates the p-value. See below for uses. + + Parameters + ---------- + x, y : ndarray + `x` and `y` have shapes `(n, p)` and `(n, q)`. + stat : float + The sample test statistic. + reps : int, optional + The number of replications used to estimate the null when using the + permutation test. The default is 1000 replications. + workers : int or map-like callable, optional + If `workers` is an int the population is subdivided into `workers` + sections and evaluated in parallel (uses + `multiprocessing.Pool `). Supply `-1` to use all cores + available to the Process. Alternatively supply a map-like callable, + such as `multiprocessing.Pool.map` for evaluating the population in + parallel. This evaluation is carried out as `workers(func, iterable)`. + Requires that `func` be pickleable. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + pvalue : float + The sample test p-value. + null_dist : list + The approximated null distribution. + + """ + # generate seeds for each rep (change to new parallel random number + # capabilities in numpy >= 1.17+) + random_state = check_random_state(random_state) + random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32, + size=4, dtype=np.uint32)) for _ in range(reps)] + + # parallelizes with specified workers over number of reps and set seeds + parallelp = _ParallelP(x=x, y=y, random_states=random_states) + with MapWrapper(workers) as mapwrapper: + null_dist = np.array(list(mapwrapper(parallelp, range(reps)))) + + # calculate p-value and significant permutation map through list + pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps) + + return pvalue, null_dist + + +def _euclidean_dist(x): + return cdist(x, x) + + +MGCResult = _make_tuple_bunch('MGCResult', + ['statistic', 'pvalue', 'mgc_dict'], []) + + +def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000, + workers=1, is_twosamp=False, random_state=None): + r"""Computes the Multiscale Graph Correlation (MGC) test statistic. + + Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for + one property (e.g. cloud density), and the :math:`l`-nearest neighbors for + the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is + called the "scale". A priori, however, it is not know which scales will be + most informative. So, MGC computes all distance pairs, and then efficiently + computes the distance correlations for all scales. The local correlations + illustrate which scales are relatively informative about the relationship. + The key, therefore, to successfully discover and decipher relationships + between disparate data modalities is to adaptively determine which scales + are the most informative, and the geometric implication for the most + informative scales. Doing so not only provides an estimate of whether the + modalities are related, but also provides insight into how the + determination was made. This is especially important in high-dimensional + data, where simple visualizations do not reveal relationships to the + unaided human eye. Characterizations of this implementation in particular + have been derived from and benchmarked within in [2]_. + + Parameters + ---------- + x, y : ndarray + If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is + the number of samples and `p` and `q` are the number of dimensions, + then the MGC independence test will be run. Alternatively, ``x`` and + ``y`` can have shapes ``(n, n)`` if they are distance or similarity + matrices, and ``compute_distance`` must be sent to ``None``. If ``x`` + and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired + two-sample MGC test will be run. + compute_distance : callable, optional + A function that computes the distance or similarity among the samples + within each data matrix. Set to ``None`` if ``x`` and ``y`` are + already distance matrices. The default uses the euclidean norm metric. + If you are calling a custom function, either create the distance + matrix before-hand or create a function of the form + ``compute_distance(x)`` where `x` is the data matrix for which + pairwise distances are calculated. + reps : int, optional + The number of replications used to estimate the null when using the + permutation test. The default is ``1000``. + workers : int or map-like callable, optional + If ``workers`` is an int the population is subdivided into ``workers`` + sections and evaluated in parallel (uses ``multiprocessing.Pool + ``). Supply ``-1`` to use all cores available to the + Process. Alternatively supply a map-like callable, such as + ``multiprocessing.Pool.map`` for evaluating the p-value in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + Requires that `func` be pickleable. The default is ``1``. + is_twosamp : bool, optional + If `True`, a two sample test will be run. If ``x`` and ``y`` have + shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and + set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes + ``(n, p)`` and a two sample test is desired. The default is ``False``. + Note that this will not run if inputs are distance matrices. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + res : MGCResult + An object containing attributes: + + statistic : float + The sample MGC test statistic within `[-1, 1]`. + pvalue : float + The p-value obtained via permutation. + mgc_dict : dict + Contains additional useful results: + + - mgc_map : ndarray + A 2D representation of the latent geometry of the + relationship. + - opt_scale : (int, int) + The estimated optimal scale as a `(x, y)` pair. + - null_dist : list + The null distribution derived from the permuted matrices. + + See Also + -------- + pearsonr : Pearson correlation coefficient and p-value for testing + non-correlation. + kendalltau : Calculates Kendall's tau. + spearmanr : Calculates a Spearman rank-order correlation coefficient. + + Notes + ----- + A description of the process of MGC and applications on neuroscience data + can be found in [1]_. It is performed using the following steps: + + #. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and + modified to be mean zero columnwise. This results in two + :math:`n \times n` distance matrices :math:`A` and :math:`B` (the + centering and unbiased modification) [3]_. + + #. For all values :math:`k` and :math:`l` from :math:`1, ..., n`, + + * The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs + are calculated for each property. Here, :math:`G_k (i, j)` indicates + the :math:`k`-smallest values of the :math:`i`-th row of :math:`A` + and :math:`H_l (i, j)` indicates the :math:`l` smallested values of + the :math:`i`-th row of :math:`B` + + * Let :math:`\circ` denotes the entry-wise matrix product, then local + correlations are summed and normalized using the following statistic: + + .. math:: + + c^{kl} = \frac{\sum_{ij} A G_k B H_l} + {\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}} + + #. The MGC test statistic is the smoothed optimal local correlation of + :math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)` + (which essentially set all isolated large correlations) as 0 and + connected large correlations the same as before, see [3]_.) MGC is, + + .. math:: + + MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right) + \right) + + The test statistic returns a value between :math:`(-1, 1)` since it is + normalized. + + The p-value returned is calculated using a permutation test. This process + is completed by first randomly permuting :math:`y` to estimate the null + distribution and then calculating the probability of observing a test + statistic, under the null, at least as extreme as the observed test + statistic. + + MGC requires at least 5 samples to run with reliable results. It can also + handle high-dimensional data sets. + In addition, by manipulating the input data matrices, the two-sample + testing problem can be reduced to the independence testing problem [4]_. + Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n` + :math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as + follows: + + .. math:: + + X = [U | V] \in \mathcal{R}^{p \times (n + m)} + Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)} + + Then, the MGC statistic can be calculated as normal. This methodology can + be extended to similar tests such as distance correlation [4]_. + + .. versionadded:: 1.4.0 + + References + ---------- + .. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E., + Maggioni, M., & Shen, C. (2019). Discovering and deciphering + relationships across disparate data modalities. ELife. + .. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A., + Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019). + mgcpy: A Comprehensive High Dimensional Independence Testing Python + Package. :arXiv:`1907.02088` + .. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance + correlation to multiscale graph correlation. Journal of the American + Statistical Association. + .. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of + Distance and Kernel Methods for Hypothesis Testing. + :arXiv:`1806.05514` + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import multiscale_graphcorr + >>> x = np.arange(100) + >>> y = x + >>> res = multiscale_graphcorr(x, y) + >>> res.statistic, res.pvalue + (1.0, 0.001) + + To run an unpaired two-sample test, + + >>> x = np.arange(100) + >>> y = np.arange(79) + >>> res = multiscale_graphcorr(x, y) + >>> res.statistic, res.pvalue # doctest: +SKIP + (0.033258146255703246, 0.023) + + or, if shape of the inputs are the same, + + >>> x = np.arange(100) + >>> y = x + >>> res = multiscale_graphcorr(x, y, is_twosamp=True) + >>> res.statistic, res.pvalue # doctest: +SKIP + (-0.008021809890200488, 1.0) + + """ + if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray): + raise ValueError("x and y must be ndarrays") + + # convert arrays of type (n,) to (n, 1) + if x.ndim == 1: + x = x[:, np.newaxis] + elif x.ndim != 2: + raise ValueError(f"Expected a 2-D array `x`, found shape {x.shape}") + if y.ndim == 1: + y = y[:, np.newaxis] + elif y.ndim != 2: + raise ValueError(f"Expected a 2-D array `y`, found shape {y.shape}") + + nx, px = x.shape + ny, py = y.shape + + # check for NaNs + _contains_nan(x, nan_policy='raise') + _contains_nan(y, nan_policy='raise') + + # check for positive or negative infinity and raise error + if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0: + raise ValueError("Inputs contain infinities") + + if nx != ny: + if px == py: + # reshape x and y for two sample testing + is_twosamp = True + else: + raise ValueError("Shape mismatch, x and y must have shape [n, p] " + "and [n, q] or have shape [n, p] and [m, p].") + + if nx < 5 or ny < 5: + raise ValueError("MGC requires at least 5 samples to give reasonable " + "results.") + + # convert x and y to float + x = x.astype(np.float64) + y = y.astype(np.float64) + + # check if compute_distance_matrix if a callable() + if not callable(compute_distance) and compute_distance is not None: + raise ValueError("Compute_distance must be a function.") + + # check if number of reps exists, integer, or > 0 (if under 1000 raises + # warning) + if not isinstance(reps, int) or reps < 0: + raise ValueError("Number of reps must be an integer greater than 0.") + elif reps < 1000: + msg = ("The number of replications is low (under 1000), and p-value " + "calculations may be unreliable. Use the p-value result, with " + "caution!") + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + if is_twosamp: + if compute_distance is None: + raise ValueError("Cannot run if inputs are distance matrices") + x, y = _two_sample_transform(x, y) + + if compute_distance is not None: + # compute distance matrices for x and y + x = compute_distance(x) + y = compute_distance(y) + + # calculate MGC stat + stat, stat_dict = _mgc_stat(x, y) + stat_mgc_map = stat_dict["stat_mgc_map"] + opt_scale = stat_dict["opt_scale"] + + # calculate permutation MGC p-value + pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers, + random_state=random_state) + + # save all stats (other than stat/p-value) in dictionary + mgc_dict = {"mgc_map": stat_mgc_map, + "opt_scale": opt_scale, + "null_dist": null_dist} + + # create result object with alias for backward compatibility + res = MGCResult(stat, pvalue, mgc_dict) + res.stat = stat + return res + + +def _mgc_stat(distx, disty): + r"""Helper function that calculates the MGC stat. See above for use. + + Parameters + ---------- + distx, disty : ndarray + `distx` and `disty` have shapes `(n, p)` and `(n, q)` or + `(n, n)` and `(n, n)` + if distance matrices. + + Returns + ------- + stat : float + The sample MGC test statistic within `[-1, 1]`. + stat_dict : dict + Contains additional useful additional returns containing the following + keys: + + - stat_mgc_map : ndarray + MGC-map of the statistics. + - opt_scale : (float, float) + The estimated optimal scale as a `(x, y)` pair. + + """ + # calculate MGC map and optimal scale + stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc') + + n, m = stat_mgc_map.shape + if m == 1 or n == 1: + # the global scale at is the statistic calculated at maximial nearest + # neighbors. There is not enough local scale to search over, so + # default to global scale + stat = stat_mgc_map[m - 1][n - 1] + opt_scale = m * n + else: + samp_size = len(distx) - 1 + + # threshold to find connected region of significant local correlations + sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size) + + # maximum within the significant region + stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map) + + stat_dict = {"stat_mgc_map": stat_mgc_map, + "opt_scale": opt_scale} + + return stat, stat_dict + + +def _threshold_mgc_map(stat_mgc_map, samp_size): + r""" + Finds a connected region of significance in the MGC-map by thresholding. + + Parameters + ---------- + stat_mgc_map : ndarray + All local correlations within `[-1,1]`. + samp_size : int + The sample size of original data. + + Returns + ------- + sig_connect : ndarray + A binary matrix with 1's indicating the significant region. + + """ + m, n = stat_mgc_map.shape + + # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05 + # with varying levels of performance. Threshold is based on a beta + # approximation. + per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant + threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation + threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1 + + # the global scale at is the statistic calculated at maximial nearest + # neighbors. Threshold is the maximum on the global and local scales + threshold = max(threshold, stat_mgc_map[m - 1][n - 1]) + + # find the largest connected component of significant correlations + sig_connect = stat_mgc_map > threshold + if np.sum(sig_connect) > 0: + sig_connect, _ = _measurements.label(sig_connect) + _, label_counts = np.unique(sig_connect, return_counts=True) + + # skip the first element in label_counts, as it is count(zeros) + max_label = np.argmax(label_counts[1:]) + 1 + sig_connect = sig_connect == max_label + else: + sig_connect = np.array([[False]]) + + return sig_connect + + +def _smooth_mgc_map(sig_connect, stat_mgc_map): + """Finds the smoothed maximal within the significant region R. + + If area of R is too small it returns the last local correlation. Otherwise, + returns the maximum within significant_connected_region. + + Parameters + ---------- + sig_connect : ndarray + A binary matrix with 1's indicating the significant region. + stat_mgc_map : ndarray + All local correlations within `[-1, 1]`. + + Returns + ------- + stat : float + The sample MGC statistic within `[-1, 1]`. + opt_scale: (float, float) + The estimated optimal scale as an `(x, y)` pair. + + """ + m, n = stat_mgc_map.shape + + # the global scale at is the statistic calculated at maximial nearest + # neighbors. By default, statistic and optimal scale are global. + stat = stat_mgc_map[m - 1][n - 1] + opt_scale = [m, n] + + if np.linalg.norm(sig_connect) != 0: + # proceed only when the connected region's area is sufficiently large + # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05 + # with varying levels of performance + if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n): + max_corr = max(stat_mgc_map[sig_connect]) + + # find all scales within significant_connected_region that maximize + # the local correlation + max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect) + + if max_corr >= stat: + stat = max_corr + + k, l = max_corr_index + one_d_indices = k * n + l # 2D to 1D indexing + k = np.max(one_d_indices) // n + l = np.max(one_d_indices) % n + opt_scale = [k+1, l+1] # adding 1s to match R indexing + + return stat, opt_scale + + +def _two_sample_transform(u, v): + """Helper function that concatenates x and y for two sample MGC stat. + + See above for use. + + Parameters + ---------- + u, v : ndarray + `u` and `v` have shapes `(n, p)` and `(m, p)`. + + Returns + ------- + x : ndarray + Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape + `(2n, p)`. + y : ndarray + Label matrix for `x` where 0 refers to samples that comes from `u` and + 1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`. + + """ + nx = u.shape[0] + ny = v.shape[0] + x = np.concatenate([u, v], axis=0) + y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1) + return x, y + + +##################################### +# INFERENTIAL STATISTICS # +##################################### + +TtestResultBase = _make_tuple_bunch('TtestResultBase', + ['statistic', 'pvalue'], ['df']) + + +class TtestResult(TtestResultBase): + """ + Result of a t-test. + + See the documentation of the particular t-test function for more + information about the definition of the statistic and meaning of + the confidence interval. + + Attributes + ---------- + statistic : float or array + The t-statistic of the sample. + pvalue : float or array + The p-value associated with the given alternative. + df : float or array + The number of degrees of freedom used in calculation of the + t-statistic; this is one less than the size of the sample + (``a.shape[axis]-1`` if there are no masked elements or omitted NaNs). + + Methods + ------- + confidence_interval + Computes a confidence interval around the population statistic + for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + """ + + def __init__(self, statistic, pvalue, df, # public + alternative, standard_error, estimate): # private + super().__init__(statistic, pvalue, df=df) + self._alternative = alternative + self._standard_error = standard_error # denominator of t-statistic + self._estimate = estimate # point estimate of sample mean + + def confidence_interval(self, confidence_level=0.95): + """ + Parameters + ---------- + confidence_level : float + The confidence level for the calculation of the population mean + confidence interval. Default is 0.95. + + Returns + ------- + ci : namedtuple + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + """ + low, high = _t_confidence_interval(self.df, self.statistic, + confidence_level, self._alternative) + low = low * self._standard_error + self._estimate + high = high * self._standard_error + self._estimate + return ConfidenceInterval(low=low, high=high) + + +def pack_TtestResult(statistic, pvalue, df, alternative, standard_error, + estimate): + # this could be any number of dimensions (including 0d), but there is + # at most one unique non-NaN value + alternative = np.atleast_1d(alternative) # can't index 0D object + alternative = alternative[np.isfinite(alternative)] + alternative = alternative[0] if alternative.size else np.nan + return TtestResult(statistic, pvalue, df=df, alternative=alternative, + standard_error=standard_error, estimate=estimate) + + +def unpack_TtestResult(res): + return (res.statistic, res.pvalue, res.df, res._alternative, + res._standard_error, res._estimate) + + +@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2, + result_to_tuple=unpack_TtestResult, n_outputs=6) +def ttest_1samp(a, popmean, axis=0, nan_policy='propagate', + alternative="two-sided"): + """Calculate the T-test for the mean of ONE group of scores. + + This is a test for the null hypothesis that the expected value + (mean) of a sample of independent observations `a` is equal to the given + population mean, `popmean`. + + Parameters + ---------- + a : array_like + Sample observations. + popmean : float or array_like + Expected value in null hypothesis. If array_like, then its length along + `axis` must equal 1, and it must otherwise be broadcastable with `a`. + axis : int or None, optional + Axis along which to compute test; default is 0. If None, compute over + the whole array `a`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the mean of the underlying distribution of the sample + is different than the given population mean (`popmean`) + * 'less': the mean of the underlying distribution of the sample is + less than the given population mean (`popmean`) + * 'greater': the mean of the underlying distribution of the sample is + greater than the given population mean (`popmean`) + + Returns + ------- + result : `~scipy.stats._result_classes.TtestResult` + An object with the following attributes: + + statistic : float or array + The t-statistic. + pvalue : float or array + The p-value associated with the given alternative. + df : float or array + The number of degrees of freedom used in calculation of the + t-statistic; this is one less than the size of the sample + (``a.shape[axis]``). + + .. versionadded:: 1.10.0 + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the population + mean for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + .. versionadded:: 1.10.0 + + Notes + ----- + The statistic is calculated as ``(np.mean(a) - popmean)/se``, where + ``se`` is the standard error. Therefore, the statistic will be positive + when the sample mean is greater than the population mean and negative when + the sample mean is less than the population mean. + + Examples + -------- + Suppose we wish to test the null hypothesis that the mean of a population + is equal to 0.5. We choose a confidence level of 99%; that is, we will + reject the null hypothesis in favor of the alternative if the p-value is + less than 0.01. + + When testing random variates from the standard uniform distribution, which + has a mean of 0.5, we expect the data to be consistent with the null + hypothesis most of the time. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> rvs = stats.uniform.rvs(size=50, random_state=rng) + >>> stats.ttest_1samp(rvs, popmean=0.5) + TtestResult(statistic=2.456308468440, pvalue=0.017628209047638, df=49) + + As expected, the p-value of 0.017 is not below our threshold of 0.01, so + we cannot reject the null hypothesis. + + When testing data from the standard *normal* distribution, which has a mean + of 0, we would expect the null hypothesis to be rejected. + + >>> rvs = stats.norm.rvs(size=50, random_state=rng) + >>> stats.ttest_1samp(rvs, popmean=0.5) + TtestResult(statistic=-7.433605518875, pvalue=1.416760157221e-09, df=49) + + Indeed, the p-value is lower than our threshold of 0.01, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the mean + of the population is *not* equal to 0.5. + + However, suppose we were to test the null hypothesis against the + one-sided alternative that the mean of the population is *greater* than + 0.5. Since the mean of the standard normal is less than 0.5, we would not + expect the null hypothesis to be rejected. + + >>> stats.ttest_1samp(rvs, popmean=0.5, alternative='greater') + TtestResult(statistic=-7.433605518875, pvalue=0.99999999929, df=49) + + Unsurprisingly, with a p-value greater than our threshold, we would not + reject the null hypothesis. + + Note that when working with a confidence level of 99%, a true null + hypothesis will be rejected approximately 1% of the time. + + >>> rvs = stats.uniform.rvs(size=(100, 50), random_state=rng) + >>> res = stats.ttest_1samp(rvs, popmean=0.5, axis=1) + >>> np.sum(res.pvalue < 0.01) + 1 + + Indeed, even though all 100 samples above were drawn from the standard + uniform distribution, which *does* have a population mean of 0.5, we would + mistakenly reject the null hypothesis for one of them. + + `ttest_1samp` can also compute a confidence interval around the population + mean. + + >>> rvs = stats.norm.rvs(size=50, random_state=rng) + >>> res = stats.ttest_1samp(rvs, popmean=0) + >>> ci = res.confidence_interval(confidence_level=0.95) + >>> ci + ConfidenceInterval(low=-0.3193887540880017, high=0.2898583388980972) + + The bounds of the 95% confidence interval are the + minimum and maximum values of the parameter `popmean` for which the + p-value of the test would be 0.05. + + >>> res = stats.ttest_1samp(rvs, popmean=ci.low) + >>> np.testing.assert_allclose(res.pvalue, 0.05) + >>> res = stats.ttest_1samp(rvs, popmean=ci.high) + >>> np.testing.assert_allclose(res.pvalue, 0.05) + + Under certain assumptions about the population from which a sample + is drawn, the confidence interval with confidence level 95% is expected + to contain the true population mean in 95% of sample replications. + + >>> rvs = stats.norm.rvs(size=(50, 1000), loc=1, random_state=rng) + >>> res = stats.ttest_1samp(rvs, popmean=0) + >>> ci = res.confidence_interval() + >>> contains_pop_mean = (ci.low < 1) & (ci.high > 1) + >>> contains_pop_mean.sum() + 953 + + """ + a, axis = _chk_asarray(a, axis) + + n = a.shape[axis] + df = n - 1 + + mean = np.mean(a, axis) + try: + popmean = np.squeeze(popmean, axis=axis) + except ValueError as e: + raise ValueError("`popmean.shape[axis]` must equal 1.") from e + d = mean - popmean + v = _var(a, axis, ddof=1) + denom = np.sqrt(v / n) + + with np.errstate(divide='ignore', invalid='ignore'): + t = np.divide(d, denom)[()] + prob = _get_pvalue(t, distributions.t(df), alternative) + + # when nan_policy='omit', `df` can be different for different axis-slices + df = np.broadcast_to(df, t.shape)[()] + # _axis_nan_policy decorator doesn't play well with strings + alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative] + return TtestResult(t, prob, df=df, alternative=alternative_num, + standard_error=denom, estimate=mean) + + +def _t_confidence_interval(df, t, confidence_level, alternative): + # Input validation on `alternative` is already done + # We just need IV on confidence_level + if confidence_level < 0 or confidence_level > 1: + message = "`confidence_level` must be a number between 0 and 1." + raise ValueError(message) + + if alternative < 0: # 'less' + p = confidence_level + low, high = np.broadcast_arrays(-np.inf, special.stdtrit(df, p)) + elif alternative > 0: # 'greater' + p = 1 - confidence_level + low, high = np.broadcast_arrays(special.stdtrit(df, p), np.inf) + elif alternative == 0: # 'two-sided' + tail_probability = (1 - confidence_level)/2 + p = tail_probability, 1-tail_probability + # axis of p must be the zeroth and orthogonal to all the rest + p = np.reshape(p, [2] + [1]*np.asarray(df).ndim) + low, high = special.stdtrit(df, p) + else: # alternative is NaN when input is empty (see _axis_nan_policy) + p, nans = np.broadcast_arrays(t, np.nan) + low, high = nans, nans + + return low[()], high[()] + +def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative): + + d = mean1 - mean2 + with np.errstate(divide='ignore', invalid='ignore'): + t = np.divide(d, denom)[()] + prob = _get_pvalue(t, distributions.t(df), alternative) + + return (t, prob) + + +def _unequal_var_ttest_denom(v1, n1, v2, n2): + vn1 = v1 / n1 + vn2 = v2 / n2 + with np.errstate(divide='ignore', invalid='ignore'): + df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) + + # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0). + # Hence it doesn't matter what df is as long as it's not NaN. + df = np.where(np.isnan(df), 1, df) + denom = np.sqrt(vn1 + vn2) + return df, denom + + +def _equal_var_ttest_denom(v1, n1, v2, n2): + # If there is a single observation in one sample, this formula for pooled + # variance breaks down because the variance of that sample is undefined. + # The pooled variance is still defined, though, because the (n-1) in the + # numerator should cancel with the (n-1) in the denominator, leaving only + # the sum of squared differences from the mean: zero. + v1 = np.where(n1 == 1, 0, v1)[()] + v2 = np.where(n2 == 1, 0, v2)[()] + + df = n1 + n2 - 2.0 + svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df + denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2)) + return df, denom + + +Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) + + +def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, + equal_var=True, alternative="two-sided"): + r""" + T-test for means of two independent samples from descriptive statistics. + + This is a test for the null hypothesis that two independent + samples have identical average (expected) values. + + Parameters + ---------- + mean1 : array_like + The mean(s) of sample 1. + std1 : array_like + The corrected sample standard deviation of sample 1 (i.e. ``ddof=1``). + nobs1 : array_like + The number(s) of observations of sample 1. + mean2 : array_like + The mean(s) of sample 2. + std2 : array_like + The corrected sample standard deviation of sample 2 (i.e. ``ddof=1``). + nobs2 : array_like + The number(s) of observations of sample 2. + equal_var : bool, optional + If True (default), perform a standard independent 2 sample test + that assumes equal population variances [1]_. + If False, perform Welch's t-test, which does not assume equal + population variance [2]_. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions are unequal. + * 'less': the mean of the first distribution is less than the + mean of the second distribution. + * 'greater': the mean of the first distribution is greater than the + mean of the second distribution. + + .. versionadded:: 1.6.0 + + Returns + ------- + statistic : float or array + The calculated t-statistics. + pvalue : float or array + The two-tailed p-value. + + See Also + -------- + scipy.stats.ttest_ind + + Notes + ----- + The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the + standard error. Therefore, the statistic will be positive when `mean1` is + greater than `mean2` and negative when `mean1` is less than `mean2`. + + This method does not check whether any of the elements of `std1` or `std2` + are negative. If any elements of the `std1` or `std2` parameters are + negative in a call to this method, this method will return the same result + as if it were passed ``numpy.abs(std1)`` and ``numpy.abs(std2)``, + respectively, instead; no exceptions or warnings will be emitted. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test + + .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test + + Examples + -------- + Suppose we have the summary data for two samples, as follows (with the + Sample Variance being the corrected sample variance):: + + Sample Sample + Size Mean Variance + Sample 1 13 15.0 87.5 + Sample 2 11 12.0 39.0 + + Apply the t-test to this data (with the assumption that the population + variances are equal): + + >>> import numpy as np + >>> from scipy.stats import ttest_ind_from_stats + >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, + ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) + Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) + + For comparison, here is the data from which those summary statistics + were taken. With this data, we can compute the same result using + `scipy.stats.ttest_ind`: + + >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) + >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) + >>> from scipy.stats import ttest_ind + >>> ttest_ind(a, b) + Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) + + Suppose we instead have binary data and would like to apply a t-test to + compare the proportion of 1s in two independent groups:: + + Number of Sample Sample + Size ones Mean Variance + Sample 1 150 30 0.2 0.161073 + Sample 2 200 45 0.225 0.175251 + + The sample mean :math:`\hat{p}` is the proportion of ones in the sample + and the variance for a binary observation is estimated by + :math:`\hat{p}(1-\hat{p})`. + + >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.161073), nobs1=150, + ... mean2=0.225, std2=np.sqrt(0.175251), nobs2=200) + Ttest_indResult(statistic=-0.5627187905196761, pvalue=0.5739887114209541) + + For comparison, we could compute the t statistic and p-value using + arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above. + + >>> group1 = np.array([1]*30 + [0]*(150-30)) + >>> group2 = np.array([1]*45 + [0]*(200-45)) + >>> ttest_ind(group1, group2) + Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258) + + """ + mean1 = np.asarray(mean1) + std1 = np.asarray(std1) + mean2 = np.asarray(mean2) + std2 = np.asarray(std2) + if equal_var: + df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) + else: + df, denom = _unequal_var_ttest_denom(std1**2, nobs1, + std2**2, nobs2) + + res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative) + return Ttest_indResult(*res) + + +@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2, + result_to_tuple=unpack_TtestResult, n_outputs=6) +def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate', + permutations=None, random_state=None, alternative="two-sided", + trim=0): + """ + Calculate the T-test for the means of *two independent* samples of scores. + + This is a test for the null hypothesis that 2 independent samples + have identical average (expected) values. This test assumes that the + populations have identical variances by default. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + equal_var : bool, optional + If True (default), perform a standard independent 2 sample test + that assumes equal population variances [1]_. + If False, perform Welch's t-test, which does not assume equal + population variance [2]_. + + .. versionadded:: 0.11.0 + + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + The 'omit' option is not currently available for permutation tests or + one-sided asympyotic tests. + + permutations : non-negative int, np.inf, or None (default), optional + If 0 or None (default), use the t-distribution to calculate p-values. + Otherwise, `permutations` is the number of random permutations that + will be used to estimate p-values using a permutation test. If + `permutations` equals or exceeds the number of distinct partitions of + the pooled data, an exact test is performed instead (i.e. each + distinct partition is used exactly once). See Notes for details. + + .. versionadded:: 1.7.0 + + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Pseudorandom number generator state used to generate permutations + (used only when `permutations` is not None). + + .. versionadded:: 1.7.0 + + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + are unequal. + * 'less': the mean of the distribution underlying the first sample + is less than the mean of the distribution underlying the second + sample. + * 'greater': the mean of the distribution underlying the first + sample is greater than the mean of the distribution underlying + the second sample. + + .. versionadded:: 1.6.0 + + trim : float, optional + If nonzero, performs a trimmed (Yuen's) t-test. + Defines the fraction of elements to be trimmed from each end of the + input samples. If 0 (default), no elements will be trimmed from either + side. The number of trimmed elements from each tail is the floor of the + trim times the number of elements. Valid range is [0, .5). + + .. versionadded:: 1.7 + + Returns + ------- + result : `~scipy.stats._result_classes.TtestResult` + An object with the following attributes: + + statistic : float or ndarray + The t-statistic. + pvalue : float or ndarray + The p-value associated with the given alternative. + df : float or ndarray + The number of degrees of freedom used in calculation of the + t-statistic. This is always NaN for a permutation t-test. + + .. versionadded:: 1.11.0 + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the difference in + population means for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields ``low`` and ``high``. + When a permutation t-test is performed, the confidence interval + is not computed, and fields ``low`` and ``high`` contain NaN. + + .. versionadded:: 1.11.0 + + Notes + ----- + Suppose we observe two independent samples, e.g. flower petal lengths, and + we are considering whether the two samples were drawn from the same + population (e.g. the same species of flower or two species with similar + petal characteristics) or two different populations. + + The t-test quantifies the difference between the arithmetic means + of the two samples. The p-value quantifies the probability of observing + as or more extreme values assuming the null hypothesis, that the + samples are drawn from populations with the same population means, is true. + A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that + our observation is not so unlikely to have occurred by chance. Therefore, + we do not reject the null hypothesis of equal population means. + If the p-value is smaller than our threshold, then we have evidence + against the null hypothesis of equal population means. + + By default, the p-value is determined by comparing the t-statistic of the + observed data against a theoretical t-distribution. + When ``1 < permutations < binom(n, k)``, where + + * ``k`` is the number of observations in `a`, + * ``n`` is the total number of observations in `a` and `b`, and + * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), + + the data are pooled (concatenated), randomly assigned to either group `a` + or `b`, and the t-statistic is calculated. This process is performed + repeatedly (`permutation` times), generating a distribution of the + t-statistic under the null hypothesis, and the t-statistic of the observed + data is compared to this distribution to determine the p-value. + Specifically, the p-value reported is the "achieved significance level" + (ASL) as defined in 4.4 of [3]_. Note that there are other ways of + estimating p-values using randomized permutation tests; for other + options, see the more general `permutation_test`. + + When ``permutations >= binom(n, k)``, an exact test is performed: the data + are partitioned between the groups in each distinct way exactly once. + + The permutation test can be computationally expensive and not necessarily + more accurate than the analytical test, but it does not make strong + assumptions about the shape of the underlying distribution. + + Use of trimming is commonly referred to as the trimmed t-test. At times + called Yuen's t-test, this is an extension of Welch's t-test, with the + difference being the use of winsorized means in calculation of the variance + and the trimmed sample size in calculation of the statistic. Trimming is + recommended if the underlying distribution is long-tailed or contaminated + with outliers [4]_. + + The statistic is calculated as ``(np.mean(a) - np.mean(b))/se``, where + ``se`` is the standard error. Therefore, the statistic will be positive + when the sample mean of `a` is greater than the sample mean of `b` and + negative when the sample mean of `a` is less than the sample mean of + `b`. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test + + .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test + + .. [3] B. Efron and T. Hastie. Computer Age Statistical Inference. (2016). + + .. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population + Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR, + www.jstor.org/stable/2334299. Accessed 30 Mar. 2021. + + .. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and + Performance of the Two-Sample Trimmed t." Biometrika, vol. 60, + no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550. + Accessed 30 Mar. 2021. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + + Test with sample with identical means: + + >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + >>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs2) + Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015) + >>> stats.ttest_ind(rvs1, rvs2, equal_var=False) + Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064) + + `ttest_ind` underestimates p for unequal variances: + + >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs3) + Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033) + >>> stats.ttest_ind(rvs1, rvs3, equal_var=False) + Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867) + + When ``n1 != n2``, the equal variance t-statistic is no longer equal to the + unequal variance t-statistic: + + >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs4) + Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703) + >>> stats.ttest_ind(rvs1, rvs4, equal_var=False) + Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811) + + T-test with different means, variance, and n: + + >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng) + >>> stats.ttest_ind(rvs1, rvs5) + Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885) + >>> stats.ttest_ind(rvs1, rvs5, equal_var=False) + Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686) + + When performing a permutation test, more permutations typically yields + more accurate results. Use a ``np.random.Generator`` to ensure + reproducibility: + + >>> stats.ttest_ind(rvs1, rvs5, permutations=10000, + ... random_state=rng) + Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052994700529947) + + Take these two samples, one of which has an extreme tail. + + >>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3) + >>> b = (1.1, 2.9, 4.2) + + Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example, + using 20% trimming, ``trim=.2``, the test will reduce the impact of one + (``np.floor(trim*len(a))``) element from each tail of sample `a`. It will + have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0. + + >>> stats.ttest_ind(a, b, trim=.2) + Ttest_indResult(statistic=3.4463884028073513, + pvalue=0.01369338726499547) + """ + if not (0 <= trim < .5): + raise ValueError("Trimming percentage should be 0 <= `trim` < .5.") + + NaN = _get_nan(a, b) + + if a.size == 0 or b.size == 0: + # _axis_nan_policy decorator ensures this only happens with 1d input + return TtestResult(NaN, NaN, df=NaN, alternative=NaN, + standard_error=NaN, estimate=NaN) + + if permutations is not None and permutations != 0: + if trim != 0: + raise ValueError("Permutations are currently not supported " + "with trimming.") + if permutations < 0 or (np.isfinite(permutations) and + int(permutations) != permutations): + raise ValueError("Permutations must be a non-negative integer.") + + t, prob = _permutation_ttest(a, b, permutations=permutations, + axis=axis, equal_var=equal_var, + nan_policy=nan_policy, + random_state=random_state, + alternative=alternative) + df, denom, estimate = NaN, NaN, NaN + + else: + n1 = a.shape[axis] + n2 = b.shape[axis] + + if trim == 0: + if equal_var: + old_errstate = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + v1 = _var(a, axis, ddof=1) + v2 = _var(b, axis, ddof=1) + if equal_var: + np.seterr(**old_errstate) + m1 = np.mean(a, axis) + m2 = np.mean(b, axis) + else: + v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis) + v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis) + + if equal_var: + df, denom = _equal_var_ttest_denom(v1, n1, v2, n2) + else: + df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2) + t, prob = _ttest_ind_from_stats(m1, m2, denom, df, alternative) + + # when nan_policy='omit', `df` can be different for different axis-slices + df = np.broadcast_to(df, t.shape)[()] + estimate = m1-m2 + + # _axis_nan_policy decorator doesn't play well with strings + alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative] + return TtestResult(t, prob, df=df, alternative=alternative_num, + standard_error=denom, estimate=estimate) + + +def _ttest_trim_var_mean_len(a, trim, axis): + """Variance, mean, and length of winsorized input along specified axis""" + # for use with `ttest_ind` when trimming. + # further calculations in this test assume that the inputs are sorted. + # From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..." + a = np.sort(a, axis=axis) + + # `g` is the number of elements to be replaced on each tail, converted + # from a percentage amount of trimming + n = a.shape[axis] + g = int(n * trim) + + # Calculate the Winsorized variance of the input samples according to + # specified `g` + v = _calculate_winsorized_variance(a, g, axis) + + # the total number of elements in the trimmed samples + n -= 2 * g + + # calculate the g-times trimmed mean, as defined in [4] (1-1) + m = trim_mean(a, trim, axis=axis) + return v, m, n + + +def _calculate_winsorized_variance(a, g, axis): + """Calculates g-times winsorized variance along specified axis""" + # it is expected that the input `a` is sorted along the correct axis + if g == 0: + return _var(a, ddof=1, axis=axis) + # move the intended axis to the end that way it is easier to manipulate + a_win = np.moveaxis(a, axis, -1) + + # save where NaNs are for later use. + nans_indices = np.any(np.isnan(a_win), axis=-1) + + # Winsorization and variance calculation are done in one step in [4] + # (1-3), but here winsorization is done first; replace the left and + # right sides with the repeating value. This can be see in effect in ( + # 1-3) in [4], where the leftmost and rightmost tails are replaced with + # `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the + # right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in + # array indexing. + a_win[..., :g] = a_win[..., [g]] + a_win[..., -g:] = a_win[..., [-g - 1]] + + # Determine the variance. In [4], the degrees of freedom is expressed as + # `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of + # page 369, beginning of page 370). This is converted to NumPy's format, + # `n - ddof` for use with `np.var`. The result is converted to an + # array to accommodate indexing later. + var_win = np.asarray(_var(a_win, ddof=(2 * g + 1), axis=-1)) + + # with `nan_policy='propagate'`, NaNs may be completely trimmed out + # because they were sorted into the tail of the array. In these cases, + # replace computed variances with `np.nan`. + var_win[nans_indices] = np.nan + return var_win + + +def _permutation_distribution_t(data, permutations, size_a, equal_var, + random_state=None): + """Generation permutation distribution of t statistic""" + + random_state = check_random_state(random_state) + + # prepare permutation indices + size = data.shape[-1] + # number of distinct combinations + n_max = special.comb(size, size_a) + + if permutations < n_max: + perm_generator = (random_state.permutation(size) + for i in range(permutations)) + else: + permutations = n_max + perm_generator = (np.concatenate(z) + for z in _all_partitions(size_a, size-size_a)) + + t_stat = [] + for indices in _batch_generator(perm_generator, batch=50): + # get one batch from perm_generator at a time as a list + indices = np.array(indices) + # generate permutations + data_perm = data[..., indices] + # move axis indexing permutations to position 0 to broadcast + # nicely with t_stat_observed, which doesn't have this dimension + data_perm = np.moveaxis(data_perm, -2, 0) + + a = data_perm[..., :size_a] + b = data_perm[..., size_a:] + t_stat.append(_calc_t_stat(a, b, equal_var)) + + t_stat = np.concatenate(t_stat, axis=0) + + return t_stat, permutations, n_max + + +def _calc_t_stat(a, b, equal_var, axis=-1): + """Calculate the t statistic along the given dimension.""" + na = a.shape[axis] + nb = b.shape[axis] + avg_a = np.mean(a, axis=axis) + avg_b = np.mean(b, axis=axis) + var_a = _var(a, axis=axis, ddof=1) + var_b = _var(b, axis=axis, ddof=1) + + if not equal_var: + denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1] + else: + denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1] + + return (avg_a-avg_b)/denom + + +def _permutation_ttest(a, b, permutations, axis=0, equal_var=True, + nan_policy='propagate', random_state=None, + alternative="two-sided"): + """ + Calculates the T-test for the means of TWO INDEPENDENT samples of scores + using permutation methods. + + This test is similar to `stats.ttest_ind`, except it doesn't rely on an + approximate normality assumption since it uses a permutation test. + This function is only called from ttest_ind when permutations is not None. + + Parameters + ---------- + a, b : array_like + The arrays must be broadcastable, except along the dimension + corresponding to `axis` (the zeroth, by default). + axis : int, optional + The axis over which to operate on a and b. + permutations : int, optional + Number of permutations used to calculate p-value. If greater than or + equal to the number of distinct permutations, perform an exact test. + equal_var : bool, optional + If False, an equal variance (Welch's) t-test is conducted. Otherwise, + an ordinary t-test is conducted. + random_state : {None, int, `numpy.random.Generator`}, optional + If `seed` is None the `numpy.random.Generator` singleton is used. + If `seed` is an int, a new ``Generator`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` instance then that instance is + used. + Pseudorandom number generator state used for generating random + permutations. + + Returns + ------- + statistic : float or array + The calculated t-statistic. + pvalue : float or array + The p-value. + + """ + random_state = check_random_state(random_state) + + t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis) + + na = a.shape[axis] + mat = _broadcast_concatenate((a, b), axis=axis) + mat = np.moveaxis(mat, axis, -1) + + t_stat, permutations, n_max = _permutation_distribution_t( + mat, permutations, size_a=na, equal_var=equal_var, + random_state=random_state) + + compare = {"less": np.less_equal, + "greater": np.greater_equal, + "two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))} + + # Calculate the p-values + cmps = compare[alternative](t_stat, t_stat_observed) + # Randomized test p-value calculation should use biased estimate; see e.g. + # https://www.degruyter.com/document/doi/10.2202/1544-6115.1585/ + adjustment = 1 if n_max > permutations else 0 + pvalues = (cmps.sum(axis=0) + adjustment) / (permutations + adjustment) + + # nans propagate naturally in statistic calculation, but need to be + # propagated manually into pvalues + if nan_policy == 'propagate' and np.isnan(t_stat_observed).any(): + if np.ndim(pvalues) == 0: + pvalues = np.float64(np.nan) + else: + pvalues[np.isnan(t_stat_observed)] = np.nan + + return (t_stat_observed, pvalues) + + +def _get_len(a, axis, msg): + try: + n = a.shape[axis] + except IndexError: + raise AxisError(axis, a.ndim, msg) from None + return n + + +@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2, + result_to_tuple=unpack_TtestResult, n_outputs=6, + paired=True) +def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"): + """Calculate the t-test on TWO RELATED samples of scores, a and b. + + This is a test for the null hypothesis that two related or + repeated samples have identical average (expected) values. + + Parameters + ---------- + a, b : array_like + The arrays must have the same shape. + axis : int or None, optional + Axis along which to compute test. If None, compute over the whole + arrays, `a`, and `b`. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + are unequal. + * 'less': the mean of the distribution underlying the first sample + is less than the mean of the distribution underlying the second + sample. + * 'greater': the mean of the distribution underlying the first + sample is greater than the mean of the distribution underlying + the second sample. + + .. versionadded:: 1.6.0 + + Returns + ------- + result : `~scipy.stats._result_classes.TtestResult` + An object with the following attributes: + + statistic : float or array + The t-statistic. + pvalue : float or array + The p-value associated with the given alternative. + df : float or array + The number of degrees of freedom used in calculation of the + t-statistic; this is one less than the size of the sample + (``a.shape[axis]``). + + .. versionadded:: 1.10.0 + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the difference in + population means for the given confidence level. + The confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. + + .. versionadded:: 1.10.0 + + Notes + ----- + Examples for use are scores of the same set of student in + different exams, or repeated sampling from the same units. The + test measures whether the average score differs significantly + across samples (e.g. exams). If we observe a large p-value, for + example greater than 0.05 or 0.1 then we cannot reject the null + hypothesis of identical average scores. If the p-value is smaller + than the threshold, e.g. 1%, 5% or 10%, then we reject the null + hypothesis of equal averages. Small p-values are associated with + large t-statistics. + + The t-statistic is calculated as ``np.mean(a - b)/se``, where ``se`` is the + standard error. Therefore, the t-statistic will be positive when the sample + mean of ``a - b`` is greater than zero and negative when the sample mean of + ``a - b`` is less than zero. + + References + ---------- + https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + + >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + >>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + ... + stats.norm.rvs(scale=0.2, size=500, random_state=rng)) + >>> stats.ttest_rel(rvs1, rvs2) + TtestResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672, df=499) + >>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng) + ... + stats.norm.rvs(scale=0.2, size=500, random_state=rng)) + >>> stats.ttest_rel(rvs1, rvs3) + TtestResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09, df=499) + + """ + a, b, axis = _chk2_asarray(a, b, axis) + + na = _get_len(a, axis, "first argument") + nb = _get_len(b, axis, "second argument") + if na != nb: + raise ValueError('unequal length arrays') + + if na == 0 or nb == 0: + # _axis_nan_policy decorator ensures this only happens with 1d input + NaN = _get_nan(a, b) + return TtestResult(NaN, NaN, df=NaN, alternative=NaN, + standard_error=NaN, estimate=NaN) + + n = a.shape[axis] + df = n - 1 + + d = (a - b).astype(np.float64) + v = _var(d, axis, ddof=1) + dm = np.mean(d, axis) + denom = np.sqrt(v / n) + + with np.errstate(divide='ignore', invalid='ignore'): + t = np.divide(dm, denom)[()] + prob = _get_pvalue(t, distributions.t(df), alternative) + + # when nan_policy='omit', `df` can be different for different axis-slices + df = np.broadcast_to(df, t.shape)[()] + + # _axis_nan_policy decorator doesn't play well with strings + alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative] + return TtestResult(t, prob, df=df, alternative=alternative_num, + standard_error=denom, estimate=dm) + + +# Map from names to lambda_ values used in power_divergence(). +_power_div_lambda_names = { + "pearson": 1, + "log-likelihood": 0, + "freeman-tukey": -0.5, + "mod-log-likelihood": -1, + "neyman": -2, + "cressie-read": 2/3, +} + + +def _count(a, axis=None): + """Count the number of non-masked elements of an array. + + This function behaves like `np.ma.count`, but is much faster + for ndarrays. + """ + if hasattr(a, 'count'): + num = a.count(axis=axis) + if isinstance(num, np.ndarray) and num.ndim == 0: + # In some cases, the `count` method returns a scalar array (e.g. + # np.array(3)), but we want a plain integer. + num = int(num) + else: + if axis is None: + num = a.size + else: + num = a.shape[axis] + return num + + +def _m_broadcast_to(a, shape): + if np.ma.isMaskedArray(a): + return np.ma.masked_array(np.broadcast_to(a, shape), + mask=np.broadcast_to(a.mask, shape)) + return np.broadcast_to(a, shape, subok=True) + + +Power_divergenceResult = namedtuple('Power_divergenceResult', + ('statistic', 'pvalue')) + + +def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None): + """Cressie-Read power divergence statistic and goodness of fit test. + + This function tests the null hypothesis that the categorical data + has the given frequencies, using the Cressie-Read power divergence + statistic. + + Parameters + ---------- + f_obs : array_like + Observed frequencies in each category. + f_exp : array_like, optional + Expected frequencies in each category. By default the categories are + assumed to be equally likely. + ddof : int, optional + "Delta degrees of freedom": adjustment to the degrees of freedom + for the p-value. The p-value is computed using a chi-squared + distribution with ``k - 1 - ddof`` degrees of freedom, where `k` + is the number of observed frequencies. The default value of `ddof` + is 0. + axis : int or None, optional + The axis of the broadcast result of `f_obs` and `f_exp` along which to + apply the test. If axis is None, all values in `f_obs` are treated + as a single data set. Default is 0. + lambda_ : float or str, optional + The power in the Cressie-Read power divergence statistic. The default + is 1. For convenience, `lambda_` may be assigned one of the following + strings, in which case the corresponding numerical value is used: + + * ``"pearson"`` (value 1) + Pearson's chi-squared statistic. In this case, the function is + equivalent to `chisquare`. + * ``"log-likelihood"`` (value 0) + Log-likelihood ratio. Also known as the G-test [3]_. + * ``"freeman-tukey"`` (value -1/2) + Freeman-Tukey statistic. + * ``"mod-log-likelihood"`` (value -1) + Modified log-likelihood ratio. + * ``"neyman"`` (value -2) + Neyman's statistic. + * ``"cressie-read"`` (value 2/3) + The power recommended in [5]_. + + Returns + ------- + res: Power_divergenceResult + An object containing attributes: + + statistic : float or ndarray + The Cressie-Read power divergence test statistic. The value is + a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D. + pvalue : float or ndarray + The p-value of the test. The value is a float if `ddof` and the + return value `stat` are scalars. + + See Also + -------- + chisquare + + Notes + ----- + This test is invalid when the observed or expected frequencies in each + category are too small. A typical rule is that all of the observed + and expected frequencies should be at least 5. + + Also, the sum of the observed and expected frequencies must be the same + for the test to be valid; `power_divergence` raises an error if the sums + do not agree within a relative tolerance of ``1e-8``. + + When `lambda_` is less than zero, the formula for the statistic involves + dividing by `f_obs`, so a warning or error may be generated if any value + in `f_obs` is 0. + + Similarly, a warning or error may be generated if any value in `f_exp` is + zero when `lambda_` >= 0. + + The default degrees of freedom, k-1, are for the case when no parameters + of the distribution are estimated. If p parameters are estimated by + efficient maximum likelihood then the correct degrees of freedom are + k-1-p. If the parameters are estimated in a different way, then the + dof can be between k-1-p and k-1. However, it is also possible that + the asymptotic distribution is not a chisquare, in which case this + test is not appropriate. + + This function handles masked arrays. If an element of `f_obs` or `f_exp` + is masked, then data at that position is ignored, and does not count + towards the size of the data set. + + .. versionadded:: 0.13.0 + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 8. + https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html + .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test + .. [3] "G-test", https://en.wikipedia.org/wiki/G-test + .. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and + practice of statistics in biological research", New York: Freeman + (1981) + .. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit + Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), + pp. 440-464. + + Examples + -------- + (See `chisquare` for more examples.) + + When just `f_obs` is given, it is assumed that the expected frequencies + are uniform and given by the mean of the observed frequencies. Here we + perform a G-test (i.e. use the log-likelihood ratio statistic): + + >>> import numpy as np + >>> from scipy.stats import power_divergence + >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood') + (2.006573162632538, 0.84823476779463769) + + The expected frequencies can be given with the `f_exp` argument: + + >>> power_divergence([16, 18, 16, 14, 12, 12], + ... f_exp=[16, 16, 16, 16, 16, 8], + ... lambda_='log-likelihood') + (3.3281031458963746, 0.6495419288047497) + + When `f_obs` is 2-D, by default the test is applied to each column. + + >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T + >>> obs.shape + (6, 2) + >>> power_divergence(obs, lambda_="log-likelihood") + (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225])) + + By setting ``axis=None``, the test is applied to all data in the array, + which is equivalent to applying the test to the flattened array. + + >>> power_divergence(obs, axis=None) + (23.31034482758621, 0.015975692534127565) + >>> power_divergence(obs.ravel()) + (23.31034482758621, 0.015975692534127565) + + `ddof` is the change to make to the default degrees of freedom. + + >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1) + (2.0, 0.73575888234288467) + + The calculation of the p-values is done by broadcasting the + test statistic with `ddof`. + + >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) + (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) + + `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has + shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting + `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared + statistics, we must use ``axis=1``: + + >>> power_divergence([16, 18, 16, 14, 12, 12], + ... f_exp=[[16, 16, 16, 16, 16, 8], + ... [8, 20, 20, 16, 12, 12]], + ... axis=1) + (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) + + """ + # Convert the input argument `lambda_` to a numerical value. + if isinstance(lambda_, str): + if lambda_ not in _power_div_lambda_names: + names = repr(list(_power_div_lambda_names.keys()))[1:-1] + raise ValueError(f"invalid string for lambda_: {lambda_!r}. " + f"Valid strings are {names}") + lambda_ = _power_div_lambda_names[lambda_] + elif lambda_ is None: + lambda_ = 1 + + f_obs = np.asanyarray(f_obs) + f_obs_float = f_obs.astype(np.float64) + + if f_exp is not None: + f_exp = np.asanyarray(f_exp) + bshape = np.broadcast_shapes(f_obs_float.shape, f_exp.shape) + f_obs_float = _m_broadcast_to(f_obs_float, bshape) + f_exp = _m_broadcast_to(f_exp, bshape) + rtol = 1e-8 # to pass existing tests + with np.errstate(invalid='ignore'): + f_obs_sum = f_obs_float.sum(axis=axis) + f_exp_sum = f_exp.sum(axis=axis) + relative_diff = (np.abs(f_obs_sum - f_exp_sum) / + np.minimum(f_obs_sum, f_exp_sum)) + diff_gt_tol = (relative_diff > rtol).any() + if diff_gt_tol: + msg = (f"For each axis slice, the sum of the observed " + f"frequencies must agree with the sum of the " + f"expected frequencies to a relative tolerance " + f"of {rtol}, but the percent differences are:\n" + f"{relative_diff}") + raise ValueError(msg) + + else: + # Ignore 'invalid' errors so the edge case of a data set with length 0 + # is handled without spurious warnings. + with np.errstate(invalid='ignore'): + f_exp = f_obs.mean(axis=axis, keepdims=True) + + # `terms` is the array of terms that are summed along `axis` to create + # the test statistic. We use some specialized code for a few special + # cases of lambda_. + if lambda_ == 1: + # Pearson's chi-squared statistic + terms = (f_obs_float - f_exp)**2 / f_exp + elif lambda_ == 0: + # Log-likelihood ratio (i.e. G-test) + terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp) + elif lambda_ == -1: + # Modified log-likelihood ratio + terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs) + else: + # General Cressie-Read power divergence. + terms = f_obs * ((f_obs / f_exp)**lambda_ - 1) + terms /= 0.5 * lambda_ * (lambda_ + 1) + + stat = terms.sum(axis=axis) + + num_obs = _count(terms, axis=axis) + ddof = asarray(ddof) + p = distributions.chi2.sf(stat, num_obs - 1 - ddof) + + return Power_divergenceResult(stat, p) + + +def chisquare(f_obs, f_exp=None, ddof=0, axis=0): + """Calculate a one-way chi-square test. + + The chi-square test tests the null hypothesis that the categorical data + has the given frequencies. + + Parameters + ---------- + f_obs : array_like + Observed frequencies in each category. + f_exp : array_like, optional + Expected frequencies in each category. By default the categories are + assumed to be equally likely. + ddof : int, optional + "Delta degrees of freedom": adjustment to the degrees of freedom + for the p-value. The p-value is computed using a chi-squared + distribution with ``k - 1 - ddof`` degrees of freedom, where `k` + is the number of observed frequencies. The default value of `ddof` + is 0. + axis : int or None, optional + The axis of the broadcast result of `f_obs` and `f_exp` along which to + apply the test. If axis is None, all values in `f_obs` are treated + as a single data set. Default is 0. + + Returns + ------- + res: Power_divergenceResult + An object containing attributes: + + statistic : float or ndarray + The chi-squared test statistic. The value is a float if `axis` is + None or `f_obs` and `f_exp` are 1-D. + pvalue : float or ndarray + The p-value of the test. The value is a float if `ddof` and the + result attribute `statistic` are scalars. + + See Also + -------- + scipy.stats.power_divergence + scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table. + scipy.stats.barnard_exact : An unconditional exact test. An alternative + to chi-squared test for small sample sizes. + + Notes + ----- + This test is invalid when the observed or expected frequencies in each + category are too small. A typical rule is that all of the observed + and expected frequencies should be at least 5. According to [3]_, the + total number of samples is recommended to be greater than 13, + otherwise exact tests (such as Barnard's Exact test) should be used + because they do not overreject. + + Also, the sum of the observed and expected frequencies must be the same + for the test to be valid; `chisquare` raises an error if the sums do not + agree within a relative tolerance of ``1e-8``. + + The default degrees of freedom, k-1, are for the case when no parameters + of the distribution are estimated. If p parameters are estimated by + efficient maximum likelihood then the correct degrees of freedom are + k-1-p. If the parameters are estimated in a different way, then the + dof can be between k-1-p and k-1. However, it is also possible that + the asymptotic distribution is not chi-square, in which case this test + is not appropriate. + + References + ---------- + .. [1] Lowry, Richard. "Concepts and Applications of Inferential + Statistics". Chapter 8. + https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html + .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test + .. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable + in the case of a correlated system of variables is such that it can be reasonably + supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50 + (1900), pp. 157-175. + .. [4] Mannan, R. William and E. Charles. Meslow. "Bird populations and + vegetation characteristics in managed and old-growth forests, + northeastern Oregon." Journal of Wildlife Management + 48, 1219-1238, :doi:`10.2307/3801783`, 1984. + + Examples + -------- + In [4]_, bird foraging behavior was investigated in an old-growth forest + of Oregon. + In the forest, 44% of the canopy volume was Douglas fir, + 24% was ponderosa pine, 29% was grand fir, and 3% was western larch. + The authors observed the behavior of several species of birds, one of + which was the red-breasted nuthatch. They made 189 observations of this + species foraging, recording 43 ("23%") of observations in Douglas fir, + 52 ("28%") in ponderosa pine, 54 ("29%") in grand fir, and 40 ("21%") in + western larch. + + Using a chi-square test, we can test the null hypothesis that the + proportions of foraging events are equal to the proportions of canopy + volume. The authors of the paper considered a p-value less than 1% to be + significant. + + Using the above proportions of canopy volume and observed events, we can + infer expected frequencies. + + >>> import numpy as np + >>> f_exp = np.array([44, 24, 29, 3]) / 100 * 189 + + The observed frequencies of foraging were: + + >>> f_obs = np.array([43, 52, 54, 40]) + + We can now compare the observed frequencies with the expected frequencies. + + >>> from scipy.stats import chisquare + >>> chisquare(f_obs=f_obs, f_exp=f_exp) + Power_divergenceResult(statistic=228.23515947653874, pvalue=3.3295585338846486e-49) + + The p-value is well below the chosen significance level. Hence, the + authors considered the difference to be significant and concluded + that the relative proportions of foraging events were not the same + as the relative proportions of tree canopy volume. + + Following are other generic examples to demonstrate how the other + parameters can be used. + + When just `f_obs` is given, it is assumed that the expected frequencies + are uniform and given by the mean of the observed frequencies. + + >>> chisquare([16, 18, 16, 14, 12, 12]) + Power_divergenceResult(statistic=2.0, pvalue=0.84914503608460956) + + With `f_exp` the expected frequencies can be given. + + >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8]) + Power_divergenceResult(statistic=3.5, pvalue=0.62338762774958223) + + When `f_obs` is 2-D, by default the test is applied to each column. + + >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T + >>> obs.shape + (6, 2) + >>> chisquare(obs) + Power_divergenceResult(statistic=array([2. , 6.66666667]), pvalue=array([0.84914504, 0.24663415])) + + By setting ``axis=None``, the test is applied to all data in the array, + which is equivalent to applying the test to the flattened array. + + >>> chisquare(obs, axis=None) + Power_divergenceResult(statistic=23.31034482758621, pvalue=0.015975692534127565) + >>> chisquare(obs.ravel()) + Power_divergenceResult(statistic=23.310344827586206, pvalue=0.01597569253412758) + + `ddof` is the change to make to the default degrees of freedom. + + >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1) + Power_divergenceResult(statistic=2.0, pvalue=0.7357588823428847) + + The calculation of the p-values is done by broadcasting the + chi-squared statistic with `ddof`. + + >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) + Power_divergenceResult(statistic=2.0, pvalue=array([0.84914504, 0.73575888, 0.5724067 ])) + + `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has + shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting + `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared + statistics, we use ``axis=1``: + + >>> chisquare([16, 18, 16, 14, 12, 12], + ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]], + ... axis=1) + Power_divergenceResult(statistic=array([3.5 , 9.25]), pvalue=array([0.62338763, 0.09949846])) + + """ # noqa: E501 + return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, + lambda_="pearson") + + +KstestResult = _make_tuple_bunch('KstestResult', ['statistic', 'pvalue'], + ['statistic_location', 'statistic_sign']) + + +def _compute_dplus(cdfvals, x): + """Computes D+ as used in the Kolmogorov-Smirnov test. + + Parameters + ---------- + cdfvals : array_like + Sorted array of CDF values between 0 and 1 + x: array_like + Sorted array of the stochastic variable itself + + Returns + ------- + res: Pair with the following elements: + - The maximum distance of the CDF values below Uniform(0, 1). + - The location at which the maximum is reached. + + """ + n = len(cdfvals) + dplus = (np.arange(1.0, n + 1) / n - cdfvals) + amax = dplus.argmax() + loc_max = x[amax] + return (dplus[amax], loc_max) + + +def _compute_dminus(cdfvals, x): + """Computes D- as used in the Kolmogorov-Smirnov test. + + Parameters + ---------- + cdfvals : array_like + Sorted array of CDF values between 0 and 1 + x: array_like + Sorted array of the stochastic variable itself + + Returns + ------- + res: Pair with the following elements: + - Maximum distance of the CDF values above Uniform(0, 1) + - The location at which the maximum is reached. + """ + n = len(cdfvals) + dminus = (cdfvals - np.arange(0.0, n)/n) + amax = dminus.argmax() + loc_max = x[amax] + return (dminus[amax], loc_max) + + +def _tuple_to_KstestResult(statistic, pvalue, + statistic_location, statistic_sign): + return KstestResult(statistic, pvalue, + statistic_location=statistic_location, + statistic_sign=statistic_sign) + + +def _KstestResult_to_tuple(res): + return *res, res.statistic_location, res.statistic_sign + + +@_axis_nan_policy_factory(_tuple_to_KstestResult, n_samples=1, n_outputs=4, + result_to_tuple=_KstestResult_to_tuple) +@_rename_parameter("mode", "method") +def ks_1samp(x, cdf, args=(), alternative='two-sided', method='auto'): + """ + Performs the one-sample Kolmogorov-Smirnov test for goodness of fit. + + This test compares the underlying distribution F(x) of a sample + against a given continuous distribution G(x). See Notes for a description + of the available null and alternative hypotheses. + + Parameters + ---------- + x : array_like + a 1-D array of observations of iid random variables. + cdf : callable + callable used to calculate the cdf. + args : tuple, sequence, optional + Distribution parameters, used with `cdf`. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes below. + method : {'auto', 'exact', 'approx', 'asymp'}, optional + Defines the distribution used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : selects one of the other options. + * 'exact' : uses the exact distribution of test statistic. + * 'approx' : approximates the two-sided probability with twice + the one-sided probability + * 'asymp': uses asymptotic distribution of test statistic + + Returns + ------- + res: KstestResult + An object containing attributes: + + statistic : float + KS test statistic, either D+, D-, or D (the maximum of the two) + pvalue : float + One-tailed or two-tailed p-value. + statistic_location : float + Value of `x` corresponding with the KS statistic; i.e., the + distance between the empirical distribution function and the + hypothesized cumulative distribution function is measured at this + observation. + statistic_sign : int + +1 if the KS statistic is the maximum positive difference between + the empirical distribution function and the hypothesized cumulative + distribution function (D+); -1 if the KS statistic is the maximum + negative difference (D-). + + + See Also + -------- + ks_2samp, kstest + + Notes + ----- + There are three options for the null and corresponding alternative + hypothesis that can be selected using the `alternative` parameter. + + - `two-sided`: The null hypothesis is that the two distributions are + identical, F(x)=G(x) for all x; the alternative is that they are not + identical. + + - `less`: The null hypothesis is that F(x) >= G(x) for all x; the + alternative is that F(x) < G(x) for at least one x. + + - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the + alternative is that F(x) > G(x) for at least one x. + + Note that the alternative hypotheses describe the *CDFs* of the + underlying distributions, not the observed values. For example, + suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in + x1 tend to be less than those in x2. + + Examples + -------- + Suppose we wish to test the null hypothesis that a sample is distributed + according to the standard normal. + We choose a confidence level of 95%; that is, we will reject the null + hypothesis in favor of the alternative if the p-value is less than 0.05. + + When testing uniformly distributed data, we would expect the + null hypothesis to be rejected. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> stats.ks_1samp(stats.uniform.rvs(size=100, random_state=rng), + ... stats.norm.cdf) + KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23) + + Indeed, the p-value is lower than our threshold of 0.05, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the data + are *not* distributed according to the standard normal. + + When testing random variates from the standard normal distribution, we + expect the data to be consistent with the null hypothesis most of the time. + + >>> x = stats.norm.rvs(size=100, random_state=rng) + >>> stats.ks_1samp(x, stats.norm.cdf) + KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717) + + As expected, the p-value of 0.92 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + Suppose, however, that the random variates are distributed according to + a normal distribution that is shifted toward greater values. In this case, + the cumulative density function (CDF) of the underlying distribution tends + to be *less* than the CDF of the standard normal. Therefore, we would + expect the null hypothesis to be rejected with ``alternative='less'``: + + >>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng) + >>> stats.ks_1samp(x, stats.norm.cdf, alternative='less') + KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743) + + and indeed, with p-value smaller than our threshold, we reject the null + hypothesis in favor of the alternative. + + """ + mode = method + + alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( + alternative.lower()[0], alternative) + if alternative not in ['two-sided', 'greater', 'less']: + raise ValueError("Unexpected alternative %s" % alternative) + + N = len(x) + x = np.sort(x) + cdfvals = cdf(x, *args) + np_one = np.int8(1) + + if alternative == 'greater': + Dplus, d_location = _compute_dplus(cdfvals, x) + return KstestResult(Dplus, distributions.ksone.sf(Dplus, N), + statistic_location=d_location, + statistic_sign=np_one) + + if alternative == 'less': + Dminus, d_location = _compute_dminus(cdfvals, x) + return KstestResult(Dminus, distributions.ksone.sf(Dminus, N), + statistic_location=d_location, + statistic_sign=-np_one) + + # alternative == 'two-sided': + Dplus, dplus_location = _compute_dplus(cdfvals, x) + Dminus, dminus_location = _compute_dminus(cdfvals, x) + if Dplus > Dminus: + D = Dplus + d_location = dplus_location + d_sign = np_one + else: + D = Dminus + d_location = dminus_location + d_sign = -np_one + + if mode == 'auto': # Always select exact + mode = 'exact' + if mode == 'exact': + prob = distributions.kstwo.sf(D, N) + elif mode == 'asymp': + prob = distributions.kstwobign.sf(D * np.sqrt(N)) + else: + # mode == 'approx' + prob = 2 * distributions.ksone.sf(D, N) + prob = np.clip(prob, 0, 1) + return KstestResult(D, prob, + statistic_location=d_location, + statistic_sign=d_sign) + + +Ks_2sampResult = KstestResult + + +def _compute_prob_outside_square(n, h): + """ + Compute the proportion of paths that pass outside the two diagonal lines. + + Parameters + ---------- + n : integer + n > 0 + h : integer + 0 <= h <= n + + Returns + ------- + p : float + The proportion of paths that pass outside the lines x-y = +/-h. + + """ + # Compute Pr(D_{n,n} >= h/n) + # Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) + # / binom(2n, n) + # This formulation exhibits subtractive cancellation. + # Instead divide each term by binom(2n, n), then factor common terms + # and use a Horner-like algorithm + # P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...))))) + + P = 0.0 + k = int(np.floor(n / h)) + while k >= 0: + p1 = 1.0 + # Each of the Ai terms has numerator and denominator with + # h simple terms. + for j in range(h): + p1 = (n - k * h - j) * p1 / (n + k * h + j + 1) + P = p1 * (1.0 - P) + k -= 1 + return 2 * P + + +def _count_paths_outside_method(m, n, g, h): + """Count the number of paths that pass outside the specified diagonal. + + Parameters + ---------- + m : integer + m > 0 + n : integer + n > 0 + g : integer + g is greatest common divisor of m and n + h : integer + 0 <= h <= lcm(m,n) + + Returns + ------- + p : float + The number of paths that go low. + The calculation may overflow - check for a finite answer. + + Notes + ----- + Count the integer lattice paths from (0, 0) to (m, n), which at some + point (x, y) along the path, satisfy: + m*y <= n*x - h*g + The paths make steps of size +1 in either positive x or positive y + directions. + + We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk. + Hodges, J.L. Jr., + "The Significance Probability of the Smirnov Two-Sample Test," + Arkiv fiur Matematik, 3, No. 43 (1958), 469-86. + + """ + # Compute #paths which stay lower than x/m-y/n = h/lcm(m,n) + # B(x, y) = #{paths from (0,0) to (x,y) without + # previously crossing the boundary} + # = binom(x, y) - #{paths which already reached the boundary} + # Multiply by the number of path extensions going from (x, y) to (m, n) + # Sum. + + # Probability is symmetrical in m, n. Computation below assumes m >= n. + if m < n: + m, n = n, m + mg = m // g + ng = n // g + + # Not every x needs to be considered. + # xj holds the list of x values to be checked. + # Wherever n*x/m + ng*h crosses an integer + lxj = n + (mg-h)//mg + xj = [(h + mg * j + ng-1)//ng for j in range(lxj)] + # B is an array just holding a few values of B(x,y), the ones needed. + # B[j] == B(x_j, j) + if lxj == 0: + return special.binom(m + n, n) + B = np.zeros(lxj) + B[0] = 1 + # Compute the B(x, y) terms + for j in range(1, lxj): + Bj = special.binom(xj[j] + j, j) + for i in range(j): + bin = special.binom(xj[j] - xj[i] + j - i, j-i) + Bj -= bin * B[i] + B[j] = Bj + # Compute the number of path extensions... + num_paths = 0 + for j in range(lxj): + bin = special.binom((m-xj[j]) + (n - j), n-j) + term = B[j] * bin + num_paths += term + return num_paths + + +def _attempt_exact_2kssamp(n1, n2, g, d, alternative): + """Attempts to compute the exact 2sample probability. + + n1, n2 are the sample sizes + g is the gcd(n1, n2) + d is the computed max difference in ECDFs + + Returns (success, d, probability) + """ + lcm = (n1 // g) * n2 + h = int(np.round(d * lcm)) + d = h * 1.0 / lcm + if h == 0: + return True, d, 1.0 + saw_fp_error, prob = False, np.nan + try: + with np.errstate(invalid="raise", over="raise"): + if alternative == 'two-sided': + if n1 == n2: + prob = _compute_prob_outside_square(n1, h) + else: + prob = _compute_outer_prob_inside_method(n1, n2, g, h) + else: + if n1 == n2: + # prob = binom(2n, n-h) / binom(2n, n) + # Evaluating in that form incurs roundoff errors + # from special.binom. Instead calculate directly + jrange = np.arange(h) + prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0)) + else: + with np.errstate(over='raise'): + num_paths = _count_paths_outside_method(n1, n2, g, h) + bin = special.binom(n1 + n2, n1) + if num_paths > bin or np.isinf(bin): + saw_fp_error = True + else: + prob = num_paths / bin + + except (FloatingPointError, OverflowError): + saw_fp_error = True + + if saw_fp_error: + return False, d, np.nan + if not (0 <= prob <= 1): + return False, d, prob + return True, d, prob + + +@_axis_nan_policy_factory(_tuple_to_KstestResult, n_samples=2, n_outputs=4, + result_to_tuple=_KstestResult_to_tuple) +@_rename_parameter("mode", "method") +def ks_2samp(data1, data2, alternative='two-sided', method='auto'): + """ + Performs the two-sample Kolmogorov-Smirnov test for goodness of fit. + + This test compares the underlying continuous distributions F(x) and G(x) + of two independent samples. See Notes for a description of the available + null and alternative hypotheses. + + Parameters + ---------- + data1, data2 : array_like, 1-Dimensional + Two arrays of sample observations assumed to be drawn from a continuous + distribution, sample sizes can be different. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes below. + method : {'auto', 'exact', 'asymp'}, optional + Defines the method used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : use 'exact' for small size arrays, 'asymp' for large + * 'exact' : use exact distribution of test statistic + * 'asymp' : use asymptotic distribution of test statistic + + Returns + ------- + res: KstestResult + An object containing attributes: + + statistic : float + KS test statistic. + pvalue : float + One-tailed or two-tailed p-value. + statistic_location : float + Value from `data1` or `data2` corresponding with the KS statistic; + i.e., the distance between the empirical distribution functions is + measured at this observation. + statistic_sign : int + +1 if the empirical distribution function of `data1` exceeds + the empirical distribution function of `data2` at + `statistic_location`, otherwise -1. + + See Also + -------- + kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp + + Notes + ----- + There are three options for the null and corresponding alternative + hypothesis that can be selected using the `alternative` parameter. + + - `less`: The null hypothesis is that F(x) >= G(x) for all x; the + alternative is that F(x) < G(x) for at least one x. The statistic + is the magnitude of the minimum (most negative) difference between the + empirical distribution functions of the samples. + + - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the + alternative is that F(x) > G(x) for at least one x. The statistic + is the maximum (most positive) difference between the empirical + distribution functions of the samples. + + - `two-sided`: The null hypothesis is that the two distributions are + identical, F(x)=G(x) for all x; the alternative is that they are not + identical. The statistic is the maximum absolute difference between the + empirical distribution functions of the samples. + + Note that the alternative hypotheses describe the *CDFs* of the + underlying distributions, not the observed values of the data. For example, + suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in + x1 tend to be less than those in x2. + + If the KS statistic is large, then the p-value will be small, and this may + be taken as evidence against the null hypothesis in favor of the + alternative. + + If ``method='exact'``, `ks_2samp` attempts to compute an exact p-value, + that is, the probability under the null hypothesis of obtaining a test + statistic value as extreme as the value computed from the data. + If ``method='asymp'``, the asymptotic Kolmogorov-Smirnov distribution is + used to compute an approximate p-value. + If ``method='auto'``, an exact p-value computation is attempted if both + sample sizes are less than 10000; otherwise, the asymptotic method is used. + In any case, if an exact p-value calculation is attempted and fails, a + warning will be emitted, and the asymptotic p-value will be returned. + + The 'two-sided' 'exact' computation computes the complementary probability + and then subtracts from 1. As such, the minimum probability it can return + is about 1e-16. While the algorithm itself is exact, numerical + errors may accumulate for large sample sizes. It is most suited to + situations in which one of the sample sizes is only a few thousand. + + We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_. + + References + ---------- + .. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov + Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-486. + + Examples + -------- + Suppose we wish to test the null hypothesis that two samples were drawn + from the same distribution. + We choose a confidence level of 95%; that is, we will reject the null + hypothesis in favor of the alternative if the p-value is less than 0.05. + + If the first sample were drawn from a uniform distribution and the second + were drawn from the standard normal, we would expect the null hypothesis + to be rejected. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> sample1 = stats.uniform.rvs(size=100, random_state=rng) + >>> sample2 = stats.norm.rvs(size=110, random_state=rng) + >>> stats.ks_2samp(sample1, sample2) + KstestResult(statistic=0.5454545454545454, pvalue=7.37417839555191e-15) + + Indeed, the p-value is lower than our threshold of 0.05, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the data + were *not* drawn from the same distribution. + + When both samples are drawn from the same distribution, we expect the data + to be consistent with the null hypothesis most of the time. + + >>> sample1 = stats.norm.rvs(size=105, random_state=rng) + >>> sample2 = stats.norm.rvs(size=95, random_state=rng) + >>> stats.ks_2samp(sample1, sample2) + KstestResult(statistic=0.10927318295739348, pvalue=0.5438289009927495) + + As expected, the p-value of 0.54 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + Suppose, however, that the first sample were drawn from + a normal distribution shifted toward greater values. In this case, + the cumulative density function (CDF) of the underlying distribution tends + to be *less* than the CDF underlying the second sample. Therefore, we would + expect the null hypothesis to be rejected with ``alternative='less'``: + + >>> sample1 = stats.norm.rvs(size=105, loc=0.5, random_state=rng) + >>> stats.ks_2samp(sample1, sample2, alternative='less') + KstestResult(statistic=0.4055137844611529, pvalue=3.5474563068855554e-08) + + and indeed, with p-value smaller than our threshold, we reject the null + hypothesis in favor of the alternative. + + """ + mode = method + + if mode not in ['auto', 'exact', 'asymp']: + raise ValueError(f'Invalid value for mode: {mode}') + alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get( + alternative.lower()[0], alternative) + if alternative not in ['two-sided', 'less', 'greater']: + raise ValueError(f'Invalid value for alternative: {alternative}') + MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N + if np.ma.is_masked(data1): + data1 = data1.compressed() + if np.ma.is_masked(data2): + data2 = data2.compressed() + data1 = np.sort(data1) + data2 = np.sort(data2) + n1 = data1.shape[0] + n2 = data2.shape[0] + if min(n1, n2) == 0: + raise ValueError('Data passed to ks_2samp must not be empty') + + data_all = np.concatenate([data1, data2]) + # using searchsorted solves equal data problem + cdf1 = np.searchsorted(data1, data_all, side='right') / n1 + cdf2 = np.searchsorted(data2, data_all, side='right') / n2 + cddiffs = cdf1 - cdf2 + + # Identify the location of the statistic + argminS = np.argmin(cddiffs) + argmaxS = np.argmax(cddiffs) + loc_minS = data_all[argminS] + loc_maxS = data_all[argmaxS] + + # Ensure sign of minS is not negative. + minS = np.clip(-cddiffs[argminS], 0, 1) + maxS = cddiffs[argmaxS] + + if alternative == 'less' or (alternative == 'two-sided' and minS > maxS): + d = minS + d_location = loc_minS + d_sign = -1 + else: + d = maxS + d_location = loc_maxS + d_sign = 1 + g = gcd(n1, n2) + n1g = n1 // g + n2g = n2 // g + prob = -np.inf + if mode == 'auto': + mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp' + elif mode == 'exact': + # If lcm(n1, n2) is too big, switch from exact to asymp + if n1g >= np.iinfo(np.int32).max / n2g: + mode = 'asymp' + warnings.warn( + f"Exact ks_2samp calculation not possible with samples sizes " + f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning, + stacklevel=3) + + if mode == 'exact': + success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative) + if not success: + mode = 'asymp' + warnings.warn(f"ks_2samp: Exact calculation unsuccessful. " + f"Switching to method={mode}.", RuntimeWarning, + stacklevel=3) + + if mode == 'asymp': + # The product n1*n2 is large. Use Smirnov's asymptoptic formula. + # Ensure float to avoid overflow in multiplication + # sorted because the one-sided formula is not symmetric in n1, n2 + m, n = sorted([float(n1), float(n2)], reverse=True) + en = m * n / (m + n) + if alternative == 'two-sided': + prob = distributions.kstwo.sf(d, np.round(en)) + else: + z = np.sqrt(en) * d + # Use Hodges' suggested approximation Eqn 5.3 + # Requires m to be the larger of (n1, n2) + expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0 + prob = np.exp(expt) + + prob = np.clip(prob, 0, 1) + # Currently, `d` is a Python float. We want it to be a NumPy type, so + # float64 is appropriate. An enhancement would be for `d` to respect the + # dtype of the input. + return KstestResult(np.float64(d), prob, statistic_location=d_location, + statistic_sign=np.int8(d_sign)) + + +def _parse_kstest_args(data1, data2, args, N): + # kstest allows many different variations of arguments. + # Pull out the parsing into a separate function + # (xvals, yvals, ) # 2sample + # (xvals, cdf function,..) + # (xvals, name of distribution, ...) + # (name of distribution, name of distribution, ...) + + # Returns xvals, yvals, cdf + # where cdf is a cdf function, or None + # and yvals is either an array_like of values, or None + # and xvals is array_like. + rvsfunc, cdf = None, None + if isinstance(data1, str): + rvsfunc = getattr(distributions, data1).rvs + elif callable(data1): + rvsfunc = data1 + + if isinstance(data2, str): + cdf = getattr(distributions, data2).cdf + data2 = None + elif callable(data2): + cdf = data2 + data2 = None + + data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1) + return data1, data2, cdf + + +def _kstest_n_samples(kwargs): + cdf = kwargs['cdf'] + return 1 if (isinstance(cdf, str) or callable(cdf)) else 2 + + +@_axis_nan_policy_factory(_tuple_to_KstestResult, n_samples=_kstest_n_samples, + n_outputs=4, result_to_tuple=_KstestResult_to_tuple) +@_rename_parameter("mode", "method") +def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', method='auto'): + """ + Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for + goodness of fit. + + The one-sample test compares the underlying distribution F(x) of a sample + against a given distribution G(x). The two-sample test compares the + underlying distributions of two independent samples. Both tests are valid + only for continuous distributions. + + Parameters + ---------- + rvs : str, array_like, or callable + If an array, it should be a 1-D array of observations of random + variables. + If a callable, it should be a function to generate random variables; + it is required to have a keyword argument `size`. + If a string, it should be the name of a distribution in `scipy.stats`, + which will be used to generate random variables. + cdf : str, array_like or callable + If array_like, it should be a 1-D array of observations of random + variables, and the two-sample test is performed + (and rvs must be array_like). + If a callable, that callable is used to calculate the cdf. + If a string, it should be the name of a distribution in `scipy.stats`, + which will be used as the cdf function. + args : tuple, sequence, optional + Distribution parameters, used if `rvs` or `cdf` are strings or + callables. + N : int, optional + Sample size if `rvs` is string or callable. Default is 20. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the null and alternative hypotheses. Default is 'two-sided'. + Please see explanations in the Notes below. + method : {'auto', 'exact', 'approx', 'asymp'}, optional + Defines the distribution used for calculating the p-value. + The following options are available (default is 'auto'): + + * 'auto' : selects one of the other options. + * 'exact' : uses the exact distribution of test statistic. + * 'approx' : approximates the two-sided probability with twice the + one-sided probability + * 'asymp': uses asymptotic distribution of test statistic + + Returns + ------- + res: KstestResult + An object containing attributes: + + statistic : float + KS test statistic, either D+, D-, or D (the maximum of the two) + pvalue : float + One-tailed or two-tailed p-value. + statistic_location : float + In a one-sample test, this is the value of `rvs` + corresponding with the KS statistic; i.e., the distance between + the empirical distribution function and the hypothesized cumulative + distribution function is measured at this observation. + + In a two-sample test, this is the value from `rvs` or `cdf` + corresponding with the KS statistic; i.e., the distance between + the empirical distribution functions is measured at this + observation. + statistic_sign : int + In a one-sample test, this is +1 if the KS statistic is the + maximum positive difference between the empirical distribution + function and the hypothesized cumulative distribution function + (D+); it is -1 if the KS statistic is the maximum negative + difference (D-). + + In a two-sample test, this is +1 if the empirical distribution + function of `rvs` exceeds the empirical distribution + function of `cdf` at `statistic_location`, otherwise -1. + + See Also + -------- + ks_1samp, ks_2samp + + Notes + ----- + There are three options for the null and corresponding alternative + hypothesis that can be selected using the `alternative` parameter. + + - `two-sided`: The null hypothesis is that the two distributions are + identical, F(x)=G(x) for all x; the alternative is that they are not + identical. + + - `less`: The null hypothesis is that F(x) >= G(x) for all x; the + alternative is that F(x) < G(x) for at least one x. + + - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the + alternative is that F(x) > G(x) for at least one x. + + Note that the alternative hypotheses describe the *CDFs* of the + underlying distributions, not the observed values. For example, + suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in + x1 tend to be less than those in x2. + + + Examples + -------- + Suppose we wish to test the null hypothesis that a sample is distributed + according to the standard normal. + We choose a confidence level of 95%; that is, we will reject the null + hypothesis in favor of the alternative if the p-value is less than 0.05. + + When testing uniformly distributed data, we would expect the + null hypothesis to be rejected. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> stats.kstest(stats.uniform.rvs(size=100, random_state=rng), + ... stats.norm.cdf) + KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23) + + Indeed, the p-value is lower than our threshold of 0.05, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the data + are *not* distributed according to the standard normal. + + When testing random variates from the standard normal distribution, we + expect the data to be consistent with the null hypothesis most of the time. + + >>> x = stats.norm.rvs(size=100, random_state=rng) + >>> stats.kstest(x, stats.norm.cdf) + KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717) + + As expected, the p-value of 0.92 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + Suppose, however, that the random variates are distributed according to + a normal distribution that is shifted toward greater values. In this case, + the cumulative density function (CDF) of the underlying distribution tends + to be *less* than the CDF of the standard normal. Therefore, we would + expect the null hypothesis to be rejected with ``alternative='less'``: + + >>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng) + >>> stats.kstest(x, stats.norm.cdf, alternative='less') + KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743) + + and indeed, with p-value smaller than our threshold, we reject the null + hypothesis in favor of the alternative. + + For convenience, the previous test can be performed using the name of the + distribution as the second argument. + + >>> stats.kstest(x, "norm", alternative='less') + KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743) + + The examples above have all been one-sample tests identical to those + performed by `ks_1samp`. Note that `kstest` can also perform two-sample + tests identical to those performed by `ks_2samp`. For example, when two + samples are drawn from the same distribution, we expect the data to be + consistent with the null hypothesis most of the time. + + >>> sample1 = stats.laplace.rvs(size=105, random_state=rng) + >>> sample2 = stats.laplace.rvs(size=95, random_state=rng) + >>> stats.kstest(sample1, sample2) + KstestResult(statistic=0.11779448621553884, pvalue=0.4494256912629795) + + As expected, the p-value of 0.45 is not below our threshold of 0.05, so + we cannot reject the null hypothesis. + + """ + # to not break compatibility with existing code + if alternative == 'two_sided': + alternative = 'two-sided' + if alternative not in ['two-sided', 'greater', 'less']: + raise ValueError("Unexpected alternative %s" % alternative) + xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N) + if cdf: + return ks_1samp(xvals, cdf, args=args, alternative=alternative, + method=method, _no_deco=True) + return ks_2samp(xvals, yvals, alternative=alternative, method=method, + _no_deco=True) + + +def tiecorrect(rankvals): + """Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests. + + Parameters + ---------- + rankvals : array_like + A 1-D sequence of ranks. Typically this will be the array + returned by `~scipy.stats.rankdata`. + + Returns + ------- + factor : float + Correction factor for U or H. + + See Also + -------- + rankdata : Assign ranks to the data + mannwhitneyu : Mann-Whitney rank test + kruskal : Kruskal-Wallis H test + + References + ---------- + .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral + Sciences. New York: McGraw-Hill. + + Examples + -------- + >>> from scipy.stats import tiecorrect, rankdata + >>> tiecorrect([1, 2.5, 2.5, 4]) + 0.9 + >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) + >>> ranks + array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) + >>> tiecorrect(ranks) + 0.9833333333333333 + + """ + arr = np.sort(rankvals) + idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0] + cnt = np.diff(idx).astype(np.float64) + + size = np.float64(arr.size) + return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size) + + +RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(RanksumsResult, n_samples=2) +def ranksums(x, y, alternative='two-sided'): + """Compute the Wilcoxon rank-sum statistic for two samples. + + The Wilcoxon rank-sum test tests the null hypothesis that two sets + of measurements are drawn from the same distribution. The alternative + hypothesis is that values in one sample are more likely to be + larger than the values in the other sample. + + This test should be used to compare two samples from continuous + distributions. It does not handle ties between measurements + in x and y. For tie-handling and an optional continuity correction + see `scipy.stats.mannwhitneyu`. + + Parameters + ---------- + x,y : array_like + The data from the two samples. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': one of the distributions (underlying `x` or `y`) is + stochastically greater than the other. + * 'less': the distribution underlying `x` is stochastically less + than the distribution underlying `y`. + * 'greater': the distribution underlying `x` is stochastically greater + than the distribution underlying `y`. + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The test statistic under the large-sample approximation that the + rank sum statistic is normally distributed. + pvalue : float + The p-value of the test. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test + + Examples + -------- + We can test the hypothesis that two independent unequal-sized samples are + drawn from the same distribution with computing the Wilcoxon rank-sum + statistic. + + >>> import numpy as np + >>> from scipy.stats import ranksums + >>> rng = np.random.default_rng() + >>> sample1 = rng.uniform(-1, 1, 200) + >>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution + >>> ranksums(sample1, sample2) + RanksumsResult(statistic=-7.887059, + pvalue=3.09390448e-15) # may vary + >>> ranksums(sample1, sample2, alternative='less') + RanksumsResult(statistic=-7.750585297581713, + pvalue=4.573497606342543e-15) # may vary + >>> ranksums(sample1, sample2, alternative='greater') + RanksumsResult(statistic=-7.750585297581713, + pvalue=0.9999999999999954) # may vary + + The p-value of less than ``0.05`` indicates that this test rejects the + hypothesis at the 5% significance level. + + """ + x, y = map(np.asarray, (x, y)) + n1 = len(x) + n2 = len(y) + alldata = np.concatenate((x, y)) + ranked = rankdata(alldata) + x = ranked[:n1] + s = np.sum(x, axis=0) + expected = n1 * (n1+n2+1) / 2.0 + z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) + pvalue = _get_pvalue(z, distributions.norm, alternative) + + return RanksumsResult(z[()], pvalue[()]) + + +KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(KruskalResult, n_samples=None) +def kruskal(*samples, nan_policy='propagate'): + """Compute the Kruskal-Wallis H-test for independent samples. + + The Kruskal-Wallis H-test tests the null hypothesis that the population + median of all of the groups are equal. It is a non-parametric version of + ANOVA. The test works on 2 or more independent samples, which may have + different sizes. Note that rejecting the null hypothesis does not + indicate which of the groups differs. Post hoc comparisons between + groups are required to determine which groups are different. + + Parameters + ---------- + sample1, sample2, ... : array_like + Two or more arrays with the sample measurements can be given as + arguments. Samples must be one-dimensional. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + statistic : float + The Kruskal-Wallis H statistic, corrected for ties. + pvalue : float + The p-value for the test using the assumption that H has a chi + square distribution. The p-value returned is the survival function of + the chi square distribution evaluated at H. + + See Also + -------- + f_oneway : 1-way ANOVA. + mannwhitneyu : Mann-Whitney rank test on two samples. + friedmanchisquare : Friedman test for repeated measurements. + + Notes + ----- + Due to the assumption that H has a chi square distribution, the number + of samples in each group must not be too small. A typical rule is + that each sample must have at least 5 measurements. + + References + ---------- + .. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in + One-Criterion Variance Analysis", Journal of the American Statistical + Association, Vol. 47, Issue 260, pp. 583-621, 1952. + .. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance + + Examples + -------- + >>> from scipy import stats + >>> x = [1, 3, 5, 7, 9] + >>> y = [2, 4, 6, 8, 10] + >>> stats.kruskal(x, y) + KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895) + + >>> x = [1, 1, 1] + >>> y = [2, 2, 2] + >>> z = [2, 2] + >>> stats.kruskal(x, y, z) + KruskalResult(statistic=7.0, pvalue=0.0301973834223185) + + """ + samples = list(map(np.asarray, samples)) + + num_groups = len(samples) + if num_groups < 2: + raise ValueError("Need at least two groups in stats.kruskal()") + + for sample in samples: + if sample.size == 0: + NaN = _get_nan(*samples) + return KruskalResult(NaN, NaN) + elif sample.ndim != 1: + raise ValueError("Samples must be one-dimensional.") + + n = np.asarray(list(map(len, samples))) + + if nan_policy not in ('propagate', 'raise', 'omit'): + raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'") + + contains_nan = False + for sample in samples: + cn = _contains_nan(sample, nan_policy) + if cn[0]: + contains_nan = True + break + + if contains_nan and nan_policy == 'omit': + for sample in samples: + sample = ma.masked_invalid(sample) + return mstats_basic.kruskal(*samples) + + if contains_nan and nan_policy == 'propagate': + return KruskalResult(np.nan, np.nan) + + alldata = np.concatenate(samples) + ranked = rankdata(alldata) + ties = tiecorrect(ranked) + if ties == 0: + raise ValueError('All numbers are identical in kruskal') + + # Compute sum^2/n for each group and sum + j = np.insert(np.cumsum(n), 0, 0) + ssbn = 0 + for i in range(num_groups): + ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i] + + totaln = np.sum(n, dtype=float) + h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1) + df = num_groups - 1 + h /= ties + + return KruskalResult(h, distributions.chi2.sf(h, df)) + + +FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', + ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(FriedmanchisquareResult, n_samples=None, paired=True) +def friedmanchisquare(*samples): + """Compute the Friedman test for repeated samples. + + The Friedman test tests the null hypothesis that repeated samples of + the same individuals have the same distribution. It is often used + to test for consistency among samples obtained in different ways. + For example, if two sampling techniques are used on the same set of + individuals, the Friedman test can be used to determine if the two + sampling techniques are consistent. + + Parameters + ---------- + sample1, sample2, sample3... : array_like + Arrays of observations. All of the arrays must have the same number + of elements. At least three samples must be given. + + Returns + ------- + statistic : float + The test statistic, correcting for ties. + pvalue : float + The associated p-value assuming that the test statistic has a chi + squared distribution. + + Notes + ----- + Due to the assumption that the test statistic has a chi squared + distribution, the p-value is only reliable for n > 10 and more than + 6 repeated samples. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Friedman_test + .. [2] P. Sprent and N.C. Smeeton, "Applied Nonparametric Statistical + Methods, Third Edition". Chapter 6, Section 6.3.2. + + Examples + -------- + In [2]_, the pulse rate (per minute) of a group of seven students was + measured before exercise, immediately after exercise and 5 minutes + after exercise. Is there evidence to suggest that the pulse rates on + these three occasions are similar? + + We begin by formulating a null hypothesis :math:`H_0`: + + The pulse rates are identical on these three occasions. + + Let's assess the plausibility of this hypothesis with a Friedman test. + + >>> from scipy.stats import friedmanchisquare + >>> before = [72, 96, 88, 92, 74, 76, 82] + >>> immediately_after = [120, 120, 132, 120, 101, 96, 112] + >>> five_min_after = [76, 95, 104, 96, 84, 72, 76] + >>> res = friedmanchisquare(before, immediately_after, five_min_after) + >>> res.statistic + 10.57142857142857 + >>> res.pvalue + 0.005063414171757498 + + Using a significance level of 5%, we would reject the null hypothesis in + favor of the alternative hypothesis: "the pulse rates are different on + these three occasions". + + """ + k = len(samples) + if k < 3: + raise ValueError('At least 3 sets of samples must be given ' + f'for Friedman test, got {k}.') + + n = len(samples[0]) + for i in range(1, k): + if len(samples[i]) != n: + raise ValueError('Unequal N in friedmanchisquare. Aborting.') + + # Rank data + data = np.vstack(samples).T + data = data.astype(float) + for i in range(len(data)): + data[i] = rankdata(data[i]) + + # Handle ties + ties = 0 + for d in data: + replist, repnum = find_repeats(array(d)) + for t in repnum: + ties += t * (t*t - 1) + c = 1 - ties / (k*(k*k - 1)*n) + + ssbn = np.sum(data.sum(axis=0)**2) + chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c + + return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1)) + + +BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', + ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(BrunnerMunzelResult, n_samples=2) +def brunnermunzel(x, y, alternative="two-sided", distribution="t", + nan_policy='propagate'): + """Compute the Brunner-Munzel test on samples x and y. + + The Brunner-Munzel test is a nonparametric test of the null hypothesis that + when values are taken one by one from each group, the probabilities of + getting large values in both groups are equal. + Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the + assumption of equivariance of two groups. Note that this does not assume + the distributions are same. This test works on two independent samples, + which may have different sizes. + + Parameters + ---------- + x, y : array_like + Array of samples, should be one-dimensional. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided' + * 'less': one-sided + * 'greater': one-sided + distribution : {'t', 'normal'}, optional + Defines how to get the p-value. + The following options are available (default is 't'): + + * 't': get the p-value by t-distribution + * 'normal': get the p-value by standard normal distribution. + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': returns nan + * 'raise': throws an error + * 'omit': performs the calculations ignoring nan values + + Returns + ------- + statistic : float + The Brunner-Munzer W statistic. + pvalue : float + p-value assuming an t distribution. One-sided or + two-sided, depending on the choice of `alternative` and `distribution`. + + See Also + -------- + mannwhitneyu : Mann-Whitney rank test on two samples. + + Notes + ----- + Brunner and Munzel recommended to estimate the p-value by t-distribution + when the size of data is 50 or less. If the size is lower than 10, it would + be better to use permuted Brunner Munzel test (see [2]_). + + References + ---------- + .. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher + problem: Asymptotic theory and a small-sample approximation". + Biometrical Journal. Vol. 42(2000): 17-25. + .. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the + non-parametric Behrens-Fisher problem". Computational Statistics and + Data Analysis. Vol. 51(2007): 5192-5204. + + Examples + -------- + >>> from scipy import stats + >>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1] + >>> x2 = [3,3,4,3,1,2,3,1,1,5,4] + >>> w, p_value = stats.brunnermunzel(x1, x2) + >>> w + 3.1374674823029505 + >>> p_value + 0.0057862086661515377 + + """ + + nx = len(x) + ny = len(y) + if nx == 0 or ny == 0: + NaN = _get_nan(x, y) + return BrunnerMunzelResult(NaN, NaN) + rankc = rankdata(np.concatenate((x, y))) + rankcx = rankc[0:nx] + rankcy = rankc[nx:nx+ny] + rankcx_mean = np.mean(rankcx) + rankcy_mean = np.mean(rankcy) + rankx = rankdata(x) + ranky = rankdata(y) + rankx_mean = np.mean(rankx) + ranky_mean = np.mean(ranky) + + Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0)) + Sx /= nx - 1 + Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0)) + Sy /= ny - 1 + + wbfn = nx * ny * (rankcy_mean - rankcx_mean) + wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy) + + if distribution == "t": + df_numer = np.power(nx * Sx + ny * Sy, 2.0) + df_denom = np.power(nx * Sx, 2.0) / (nx - 1) + df_denom += np.power(ny * Sy, 2.0) / (ny - 1) + df = df_numer / df_denom + + if (df_numer == 0) and (df_denom == 0): + message = ("p-value cannot be estimated with `distribution='t' " + "because degrees of freedom parameter is undefined " + "(0/0). Try using `distribution='normal'") + warnings.warn(message, RuntimeWarning, stacklevel=2) + + distribution = distributions.t(df) + elif distribution == "normal": + distribution = distributions.norm() + else: + raise ValueError( + "distribution should be 't' or 'normal'") + + p = _get_pvalue(-wbfn, distribution, alternative) + + return BrunnerMunzelResult(wbfn, p) + + +@_axis_nan_policy_factory(SignificanceResult, kwd_samples=['weights'], paired=True) +def combine_pvalues(pvalues, method='fisher', weights=None): + """ + Combine p-values from independent tests that bear upon the same hypothesis. + + These methods are intended only for combining p-values from hypothesis + tests based upon continuous distributions. + + Each method assumes that under the null hypothesis, the p-values are + sampled independently and uniformly from the interval [0, 1]. A test + statistic (different for each method) is computed and a combined + p-value is calculated based upon the distribution of this test statistic + under the null hypothesis. + + Parameters + ---------- + pvalues : array_like + Array of p-values assumed to come from independent tests based on + continuous distributions. + method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'} + + Name of method to use to combine p-values. + + The available methods are (see Notes for details): + + * 'fisher': Fisher's method (Fisher's combined probability test) + * 'pearson': Pearson's method + * 'mudholkar_george': Mudholkar's and George's method + * 'tippett': Tippett's method + * 'stouffer': Stouffer's Z-score method + weights : array_like, optional + Optional array of weights used only for Stouffer's Z-score method. + Ignored by other methods. + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : float + The statistic calculated by the specified method. + pvalue : float + The combined p-value. + + Examples + -------- + Suppose we wish to combine p-values from four independent tests + of the same null hypothesis using Fisher's method (default). + + >>> from scipy.stats import combine_pvalues + >>> pvalues = [0.1, 0.05, 0.02, 0.3] + >>> combine_pvalues(pvalues) + SignificanceResult(statistic=20.828626352604235, pvalue=0.007616871850449092) + + When the individual p-values carry different weights, consider Stouffer's + method. + + >>> weights = [1, 2, 3, 4] + >>> res = combine_pvalues(pvalues, method='stouffer', weights=weights) + >>> res.pvalue + 0.009578891494533616 + + Notes + ----- + If this function is applied to tests with a discrete statistics such as + any rank test or contingency-table test, it will yield systematically + wrong results, e.g. Fisher's method will systematically overestimate the + p-value [1]_. This problem becomes less severe for large sample sizes + when the discrete distributions become approximately continuous. + + The differences between the methods can be best illustrated by their + statistics and what aspects of a combination of p-values they emphasise + when considering significance [2]_. For example, methods emphasising large + p-values are more sensitive to strong false and true negatives; conversely + methods focussing on small p-values are sensitive to positives. + + * The statistics of Fisher's method (also known as Fisher's combined + probability test) [3]_ is :math:`-2\\sum_i \\log(p_i)`, which is + equivalent (as a test statistics) to the product of individual p-values: + :math:`\\prod_i p_i`. Under the null hypothesis, this statistics follows + a :math:`\\chi^2` distribution. This method emphasises small p-values. + * Pearson's method uses :math:`-2\\sum_i\\log(1-p_i)`, which is equivalent + to :math:`\\prod_i \\frac{1}{1-p_i}` [2]_. + It thus emphasises large p-values. + * Mudholkar and George compromise between Fisher's and Pearson's method by + averaging their statistics [4]_. Their method emphasises extreme + p-values, both close to 1 and 0. + * Stouffer's method [5]_ uses Z-scores and the statistic: + :math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the + standard normal distribution. The advantage of this method is that it is + straightforward to introduce weights, which can make Stouffer's method + more powerful than Fisher's method when the p-values are from studies + of different size [6]_ [7]_. + * Tippett's method uses the smallest p-value as a statistic. + (Mind that this minimum is not the combined p-value.) + + Fisher's method may be extended to combine p-values from dependent tests + [8]_. Extensions such as Brown's method and Kost's method are not currently + implemented. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete + Distributions." Journal of the American Statistical Association 57, + no. 297 (1962), 10-19. + .. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of + combining p-values." Biometrika 105.1 (2018): 239-246. + .. [3] https://en.wikipedia.org/wiki/Fisher%27s_method + .. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic + random variables." Metrika 30.1 (1983): 1-13. + .. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method + .. [6] Whitlock, M. C. "Combining probability from independent tests: the + weighted Z-method is superior to Fisher's approach." Journal of + Evolutionary Biology 18, no. 5 (2005): 1368-1373. + .. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method + for combining probabilities in meta-analysis." Journal of + Evolutionary Biology 24, no. 8 (2011): 1836-1841. + .. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method + + """ + if pvalues.size == 0: + NaN = _get_nan(pvalues) + return SignificanceResult(NaN, NaN) + + if method == 'fisher': + statistic = -2 * np.sum(np.log(pvalues)) + pval = distributions.chi2.sf(statistic, 2 * len(pvalues)) + elif method == 'pearson': + statistic = 2 * np.sum(np.log1p(-pvalues)) + pval = distributions.chi2.cdf(-statistic, 2 * len(pvalues)) + elif method == 'mudholkar_george': + normalizing_factor = np.sqrt(3/len(pvalues))/np.pi + statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues)) + nu = 5 * len(pvalues) + 4 + approx_factor = np.sqrt(nu / (nu - 2)) + pval = distributions.t.sf(statistic * normalizing_factor + * approx_factor, nu) + elif method == 'tippett': + statistic = np.min(pvalues) + pval = distributions.beta.cdf(statistic, 1, len(pvalues)) + elif method == 'stouffer': + if weights is None: + weights = np.ones_like(pvalues) + elif len(weights) != len(pvalues): + raise ValueError("pvalues and weights must be of the same size.") + + Zi = distributions.norm.isf(pvalues) + statistic = np.dot(weights, Zi) / np.linalg.norm(weights) + pval = distributions.norm.sf(statistic) + + else: + raise ValueError( + f"Invalid method {method!r}. Valid methods are 'fisher', " + "'pearson', 'mudholkar_george', 'tippett', and 'stouffer'" + ) + + return SignificanceResult(statistic, pval) + + +@dataclass +class QuantileTestResult: + r""" + Result of `scipy.stats.quantile_test`. + + Attributes + ---------- + statistic: float + The statistic used to calculate the p-value; either ``T1``, the + number of observations less than or equal to the hypothesized quantile, + or ``T2``, the number of observations strictly less than the + hypothesized quantile. Two test statistics are required to handle the + possibility the data was generated from a discrete or mixed + distribution. + + statistic_type : int + ``1`` or ``2`` depending on which of ``T1`` or ``T2`` was used to + calculate the p-value respectively. ``T1`` corresponds to the + ``"greater"`` alternative hypothesis and ``T2`` to the ``"less"``. For + the ``"two-sided"`` case, the statistic type that leads to smallest + p-value is used. For significant tests, ``statistic_type = 1`` means + there is evidence that the population quantile is significantly greater + than the hypothesized value and ``statistic_type = 2`` means there is + evidence that it is significantly less than the hypothesized value. + + pvalue : float + The p-value of the hypothesis test. + """ + statistic: float + statistic_type: int + pvalue: float + _alternative: list[str] = field(repr=False) + _x : np.ndarray = field(repr=False) + _p : float = field(repr=False) + + def confidence_interval(self, confidence_level=0.95): + """ + Compute the confidence interval of the quantile. + + Parameters + ---------- + confidence_level : float, default: 0.95 + Confidence level for the computed confidence interval + of the quantile. Default is 0.95. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence interval. + + Examples + -------- + >>> import numpy as np + >>> import scipy.stats as stats + >>> p = 0.75 # quantile of interest + >>> q = 0 # hypothesized value of the quantile + >>> x = np.exp(np.arange(0, 1.01, 0.01)) + >>> res = stats.quantile_test(x, q=q, p=p, alternative='less') + >>> lb, ub = res.confidence_interval() + >>> lb, ub + (-inf, 2.293318740264183) + >>> res = stats.quantile_test(x, q=q, p=p, alternative='two-sided') + >>> lb, ub = res.confidence_interval(0.9) + >>> lb, ub + (1.9542373206359396, 2.293318740264183) + """ + + alternative = self._alternative + p = self._p + x = np.sort(self._x) + n = len(x) + bd = stats.binom(n, p) + + if confidence_level <= 0 or confidence_level >= 1: + message = "`confidence_level` must be a number between 0 and 1." + raise ValueError(message) + + low_index = np.nan + high_index = np.nan + + if alternative == 'less': + p = 1 - confidence_level + low = -np.inf + high_index = int(bd.isf(p)) + high = x[high_index] if high_index < n else np.nan + elif alternative == 'greater': + p = 1 - confidence_level + low_index = int(bd.ppf(p)) - 1 + low = x[low_index] if low_index >= 0 else np.nan + high = np.inf + elif alternative == 'two-sided': + p = (1 - confidence_level) / 2 + low_index = int(bd.ppf(p)) - 1 + low = x[low_index] if low_index >= 0 else np.nan + high_index = int(bd.isf(p)) + high = x[high_index] if high_index < n else np.nan + + return ConfidenceInterval(low, high) + + +def quantile_test_iv(x, q, p, alternative): + + x = np.atleast_1d(x) + message = '`x` must be a one-dimensional array of numbers.' + if x.ndim != 1 or not np.issubdtype(x.dtype, np.number): + raise ValueError(message) + + q = np.array(q)[()] + message = "`q` must be a scalar." + if q.ndim != 0 or not np.issubdtype(q.dtype, np.number): + raise ValueError(message) + + p = np.array(p)[()] + message = "`p` must be a float strictly between 0 and 1." + if p.ndim != 0 or p >= 1 or p <= 0: + raise ValueError(message) + + alternatives = {'two-sided', 'less', 'greater'} + message = f"`alternative` must be one of {alternatives}" + if alternative not in alternatives: + raise ValueError(message) + + return x, q, p, alternative + + +def quantile_test(x, *, q=0, p=0.5, alternative='two-sided'): + r""" + Perform a quantile test and compute a confidence interval of the quantile. + + This function tests the null hypothesis that `q` is the value of the + quantile associated with probability `p` of the population underlying + sample `x`. For example, with default parameters, it tests that the + median of the population underlying `x` is zero. The function returns an + object including the test statistic, a p-value, and a method for computing + the confidence interval around the quantile. + + Parameters + ---------- + x : array_like + A one-dimensional sample. + q : float, default: 0 + The hypothesized value of the quantile. + p : float, default: 0.5 + The probability associated with the quantile; i.e. the proportion of + the population less than `q` is `p`. Must be strictly between 0 and + 1. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + The following options are available (default is 'two-sided'): + + * 'two-sided': the quantile associated with the probability `p` + is not `q`. + * 'less': the quantile associated with the probability `p` is less + than `q`. + * 'greater': the quantile associated with the probability `p` is + greater than `q`. + + Returns + ------- + result : QuantileTestResult + An object with the following attributes: + + statistic : float + One of two test statistics that may be used in the quantile test. + The first test statistic, ``T1``, is the proportion of samples in + `x` that are less than or equal to the hypothesized quantile + `q`. The second test statistic, ``T2``, is the proportion of + samples in `x` that are strictly less than the hypothesized + quantile `q`. + + When ``alternative = 'greater'``, ``T1`` is used to calculate the + p-value and ``statistic`` is set to ``T1``. + + When ``alternative = 'less'``, ``T2`` is used to calculate the + p-value and ``statistic`` is set to ``T2``. + + When ``alternative = 'two-sided'``, both ``T1`` and ``T2`` are + considered, and the one that leads to the smallest p-value is used. + + statistic_type : int + Either `1` or `2` depending on which of ``T1`` or ``T2`` was + used to calculate the p-value. + + pvalue : float + The p-value associated with the given alternative. + + The object also has the following method: + + confidence_interval(confidence_level=0.95) + Computes a confidence interval around the the + population quantile associated with the probability `p`. The + confidence interval is returned in a ``namedtuple`` with + fields `low` and `high`. Values are `nan` when there are + not enough observations to compute the confidence interval at + the desired confidence. + + Notes + ----- + This test and its method for computing confidence intervals are + non-parametric. They are valid if and only if the observations are i.i.d. + + The implementation of the test follows Conover [1]_. Two test statistics + are considered. + + ``T1``: The number of observations in `x` less than or equal to `q`. + + ``T1 = (x <= q).sum()`` + + ``T2``: The number of observations in `x` strictly less than `q`. + + ``T2 = (x < q).sum()`` + + The use of two test statistics is necessary to handle the possibility that + `x` was generated from a discrete or mixed distribution. + + The null hypothesis for the test is: + + H0: The :math:`p^{\mathrm{th}}` population quantile is `q`. + + and the null distribution for each test statistic is + :math:`\mathrm{binom}\left(n, p\right)`. When ``alternative='less'``, + the alternative hypothesis is: + + H1: The :math:`p^{\mathrm{th}}` population quantile is less than `q`. + + and the p-value is the probability that the binomial random variable + + .. math:: + Y \sim \mathrm{binom}\left(n, p\right) + + is greater than or equal to the observed value ``T2``. + + When ``alternative='greater'``, the alternative hypothesis is: + + H1: The :math:`p^{\mathrm{th}}` population quantile is greater than `q` + + and the p-value is the probability that the binomial random variable Y + is less than or equal to the observed value ``T1``. + + When ``alternative='two-sided'``, the alternative hypothesis is + + H1: `q` is not the :math:`p^{\mathrm{th}}` population quantile. + + and the p-value is twice the smaller of the p-values for the ``'less'`` + and ``'greater'`` cases. Both of these p-values can exceed 0.5 for the same + data, so the value is clipped into the interval :math:`[0, 1]`. + + The approach for confidence intervals is attributed to Thompson [2]_ and + later proven to be applicable to any set of i.i.d. samples [3]_. The + computation is based on the observation that the probability of a quantile + :math:`q` to be larger than any observations :math:`x_m (1\leq m \leq N)` + can be computed as + + .. math:: + + \mathbb{P}(x_m \leq q) = 1 - \sum_{k=0}^{m-1} \binom{N}{k} + q^k(1-q)^{N-k} + + By default, confidence intervals are computed for a 95% confidence level. + A common interpretation of a 95% confidence intervals is that if i.i.d. + samples are drawn repeatedly from the same population and confidence + intervals are formed each time, the confidence interval will contain the + true value of the specified quantile in approximately 95% of trials. + + A similar function is available in the QuantileNPCI R package [4]_. The + foundation is the same, but it computes the confidence interval bounds by + doing interpolations between the sample values, whereas this function uses + only sample values as bounds. Thus, ``quantile_test.confidence_interval`` + returns more conservative intervals (i.e., larger). + + The same computation of confidence intervals for quantiles is included in + the confintr package [5]_. + + Two-sided confidence intervals are not guaranteed to be optimal; i.e., + there may exist a tighter interval that may contain the quantile of + interest with probability larger than the confidence level. + Without further assumption on the samples (e.g., the nature of the + underlying distribution), the one-sided intervals are optimally tight. + + References + ---------- + .. [1] W. J. Conover. Practical Nonparametric Statistics, 3rd Ed. 1999. + .. [2] W. R. Thompson, "On Confidence Ranges for the Median and Other + Expectation Distributions for Populations of Unknown Distribution + Form," The Annals of Mathematical Statistics, vol. 7, no. 3, + pp. 122-128, 1936, Accessed: Sep. 18, 2019. [Online]. Available: + https://www.jstor.org/stable/2957563. + .. [3] H. A. David and H. N. Nagaraja, "Order Statistics in Nonparametric + Inference" in Order Statistics, John Wiley & Sons, Ltd, 2005, pp. + 159-170. Available: + https://onlinelibrary.wiley.com/doi/10.1002/0471722162.ch7. + .. [4] N. Hutson, A. Hutson, L. Yan, "QuantileNPCI: Nonparametric + Confidence Intervals for Quantiles," R package, + https://cran.r-project.org/package=QuantileNPCI + .. [5] M. Mayer, "confintr: Confidence Intervals," R package, + https://cran.r-project.org/package=confintr + + + Examples + -------- + + Suppose we wish to test the null hypothesis that the median of a population + is equal to 0.5. We choose a confidence level of 99%; that is, we will + reject the null hypothesis in favor of the alternative if the p-value is + less than 0.01. + + When testing random variates from the standard uniform distribution, which + has a median of 0.5, we expect the data to be consistent with the null + hypothesis most of the time. + + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng(6981396440634228121) + >>> rvs = stats.uniform.rvs(size=100, random_state=rng) + >>> stats.quantile_test(rvs, q=0.5, p=0.5) + QuantileTestResult(statistic=45, statistic_type=1, pvalue=0.36820161732669576) + + As expected, the p-value is not below our threshold of 0.01, so + we cannot reject the null hypothesis. + + When testing data from the standard *normal* distribution, which has a + median of 0, we would expect the null hypothesis to be rejected. + + >>> rvs = stats.norm.rvs(size=100, random_state=rng) + >>> stats.quantile_test(rvs, q=0.5, p=0.5) + QuantileTestResult(statistic=67, statistic_type=2, pvalue=0.0008737198369123724) + + Indeed, the p-value is lower than our threshold of 0.01, so we reject the + null hypothesis in favor of the default "two-sided" alternative: the median + of the population is *not* equal to 0.5. + + However, suppose we were to test the null hypothesis against the + one-sided alternative that the median of the population is *greater* than + 0.5. Since the median of the standard normal is less than 0.5, we would not + expect the null hypothesis to be rejected. + + >>> stats.quantile_test(rvs, q=0.5, p=0.5, alternative='greater') + QuantileTestResult(statistic=67, statistic_type=1, pvalue=0.9997956114162866) + + Unsurprisingly, with a p-value greater than our threshold, we would not + reject the null hypothesis in favor of the chosen alternative. + + The quantile test can be used for any quantile, not only the median. For + example, we can test whether the third quartile of the distribution + underlying the sample is greater than 0.6. + + >>> rvs = stats.uniform.rvs(size=100, random_state=rng) + >>> stats.quantile_test(rvs, q=0.6, p=0.75, alternative='greater') + QuantileTestResult(statistic=64, statistic_type=1, pvalue=0.00940696592998271) + + The p-value is lower than the threshold. We reject the null hypothesis in + favor of the alternative: the third quartile of the distribution underlying + our sample is greater than 0.6. + + `quantile_test` can also compute confidence intervals for any quantile. + + >>> rvs = stats.norm.rvs(size=100, random_state=rng) + >>> res = stats.quantile_test(rvs, q=0.6, p=0.75) + >>> ci = res.confidence_interval(confidence_level=0.95) + >>> ci + ConfidenceInterval(low=0.284491604437432, high=0.8912531024914844) + + When testing a one-sided alternative, the confidence interval contains + all observations such that if passed as `q`, the p-value of the + test would be greater than 0.05, and therefore the null hypothesis + would not be rejected. For example: + + >>> rvs.sort() + >>> q, p, alpha = 0.6, 0.75, 0.95 + >>> res = stats.quantile_test(rvs, q=q, p=p, alternative='less') + >>> ci = res.confidence_interval(confidence_level=alpha) + >>> for x in rvs[rvs <= ci.high]: + ... res = stats.quantile_test(rvs, q=x, p=p, alternative='less') + ... assert res.pvalue > 1-alpha + >>> for x in rvs[rvs > ci.high]: + ... res = stats.quantile_test(rvs, q=x, p=p, alternative='less') + ... assert res.pvalue < 1-alpha + + Also, if a 95% confidence interval is repeatedly generated for random + samples, the confidence interval will contain the true quantile value in + approximately 95% of replications. + + >>> dist = stats.rayleigh() # our "unknown" distribution + >>> p = 0.2 + >>> true_stat = dist.ppf(p) # the true value of the statistic + >>> n_trials = 1000 + >>> quantile_ci_contains_true_stat = 0 + >>> for i in range(n_trials): + ... data = dist.rvs(size=100, random_state=rng) + ... res = stats.quantile_test(data, p=p) + ... ci = res.confidence_interval(0.95) + ... if ci[0] < true_stat < ci[1]: + ... quantile_ci_contains_true_stat += 1 + >>> quantile_ci_contains_true_stat >= 950 + True + + This works with any distribution and any quantile, as long as the samples + are i.i.d. + """ + # Implementation carefully follows [1] 3.2 + # "H0: the p*th quantile of X is x*" + # To facilitate comparison with [1], we'll use variable names that + # best match Conover's notation + X, x_star, p_star, H1 = quantile_test_iv(x, q, p, alternative) + + # "We will use two test statistics in this test. Let T1 equal " + # "the number of observations less than or equal to x*, and " + # "let T2 equal the number of observations less than x*." + T1 = (X <= x_star).sum() + T2 = (X < x_star).sum() + + # "The null distribution of the test statistics T1 and T2 is " + # "the binomial distribution, with parameters n = sample size, and " + # "p = p* as given in the null hypothesis.... Y has the binomial " + # "distribution with parameters n and p*." + n = len(X) + Y = stats.binom(n=n, p=p_star) + + # "H1: the p* population quantile is less than x*" + if H1 == 'less': + # "The p-value is the probability that a binomial random variable Y " + # "is greater than *or equal to* the observed value of T2...using p=p*" + pvalue = Y.sf(T2-1) # Y.pmf(T2) + Y.sf(T2) + statistic = T2 + statistic_type = 2 + # "H1: the p* population quantile is greater than x*" + elif H1 == 'greater': + # "The p-value is the probability that a binomial random variable Y " + # "is less than or equal to the observed value of T1... using p = p*" + pvalue = Y.cdf(T1) + statistic = T1 + statistic_type = 1 + # "H1: x* is not the p*th population quantile" + elif H1 == 'two-sided': + # "The p-value is twice the smaller of the probabilities that a + # binomial random variable Y is less than or equal to the observed + # value of T1 or greater than or equal to the observed value of T2 + # using p=p*." + # Note: both one-sided p-values can exceed 0.5 for the same data, so + # `clip` + pvalues = [Y.cdf(T1), Y.sf(T2 - 1)] # [greater, less] + sorted_idx = np.argsort(pvalues) + pvalue = np.clip(2*pvalues[sorted_idx[0]], 0, 1) + if sorted_idx[0]: + statistic, statistic_type = T2, 2 + else: + statistic, statistic_type = T1, 1 + + return QuantileTestResult( + statistic=statistic, + statistic_type=statistic_type, + pvalue=pvalue, + _alternative=H1, + _x=X, + _p=p_star + ) + + +##################################### +# STATISTICAL DISTANCES # +##################################### + + +def wasserstein_distance_nd(u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute the Wasserstein-1 distance between two N-D discrete distributions. + + The Wasserstein distance, also called the Earth mover's distance or the + optimal transport distance, is a similarity metric between two probability + distributions [1]_. In the discrete case, the Wasserstein distance can be + understood as the cost of an optimal transport plan to convert one + distribution into the other. The cost is calculated as the product of the + amount of probability mass being moved and the distance it is being moved. + A brief and intuitive introduction can be found at [2]_. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + u_values : 2d array_like + A sample from a probability distribution or the support (set of all + possible values) of a probability distribution. Each element along + axis 0 is an observation or possible value, and axis 1 represents the + dimensionality of the distribution; i.e., each row is a vector + observation or possible value. + + v_values : 2d array_like + A sample from or the support of a second distribution. + + u_weights, v_weights : 1d array_like, optional + Weights or counts corresponding with the sample or probability masses + corresponding with the support values. Sum of elements must be positive + and finite. If unspecified, each value is assigned the same weight. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + Given two probability mass functions, :math:`u` + and :math:`v`, the first Wasserstein distance between the distributions + using the Euclidean norm is: + + .. math:: + + l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int \| x-y \|_2 \mathrm{d} \pi (x, y) + + where :math:`\Gamma (u, v)` is the set of (probability) distributions on + :math:`\mathbb{R}^n \times \mathbb{R}^n` whose marginals are :math:`u` and + :math:`v` on the first and second factors respectively. For a given value + :math:`x`, :math:`u(x)` gives the probabilty of :math:`u` at position + :math:`x`, and the same for :math:`v(x)`. + + This is also called the optimal transport problem or the Monge problem. + Let the finite point sets :math:`\{x_i\}` and :math:`\{y_j\}` denote + the support set of probability mass function :math:`u` and :math:`v` + respectively. The Monge problem can be expressed as follows, + + Let :math:`\Gamma` denote the transport plan, :math:`D` denote the + distance matrix and, + + .. math:: + + x = \text{vec}(\Gamma) \\ + c = \text{vec}(D) \\ + b = \begin{bmatrix} + u\\ + v\\ + \end{bmatrix} + + The :math:`\text{vec}()` function denotes the Vectorization function + that transforms a matrix into a column vector by vertically stacking + the columns of the matrix. + The tranport plan :math:`\Gamma` is a matrix :math:`[\gamma_{ij}]` in + which :math:`\gamma_{ij}` is a positive value representing the amount of + probability mass transported from :math:`u(x_i)` to :math:`v(y_i)`. + Summing over the rows of :math:`\Gamma` should give the source distribution + :math:`u` : :math:`\sum_j \gamma_{ij} = u(x_i)` holds for all :math:`i` + and summing over the columns of :math:`\Gamma` should give the target + distribution :math:`v`: :math:`\sum_i \gamma_{ij} = v(y_j)` holds for all + :math:`j`. + The distance matrix :math:`D` is a matrix :math:`[d_{ij}]`, in which + :math:`d_{ij} = d(x_i, y_j)`. + + Given :math:`\Gamma`, :math:`D`, :math:`b`, the Monge problem can be + tranformed into a linear programming problem by + taking :math:`A x = b` as constraints and :math:`z = c^T x` as minimization + target (sum of costs) , where matrix :math:`A` has the form + + .. math:: + + \begin{array} {rrrr|rrrr|r|rrrr} + 1 & 1 & \dots & 1 & 0 & 0 & \dots & 0 & \dots & 0 & 0 & \dots & + 0 \cr + 0 & 0 & \dots & 0 & 1 & 1 & \dots & 1 & \dots & 0 & 0 &\dots & + 0 \cr + \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots + & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr + 0 & 0 & \dots & 0 & 0 & 0 & \dots & 0 & \dots & 1 & 1 & \dots & + 1 \cr \hline + + 1 & 0 & \dots & 0 & 1 & 0 & \dots & \dots & \dots & 1 & 0 & \dots & + 0 \cr + 0 & 1 & \dots & 0 & 0 & 1 & \dots & \dots & \dots & 0 & 1 & \dots & + 0 \cr + \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & + \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \cr + 0 & 0 & \dots & 1 & 0 & 0 & \dots & 1 & \dots & 0 & 0 & \dots & 1 + \end{array} + + By solving the dual form of the above linear programming problem (with + solution :math:`y^*`), the Wasserstein distance :math:`l_1 (u, v)` can + be computed as :math:`b^T y^*`. + + The above solution is inspired by Vincent Herrmann's blog [3]_ . For a + more thorough explanation, see [4]_ . + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] "Wasserstein metric", + https://en.wikipedia.org/wiki/Wasserstein_metric + .. [2] Lili Weng, "What is Wasserstein distance?", Lil'log, + https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-wasserstein-distance. + .. [3] Hermann, Vincent. "Wasserstein GAN and the Kantorovich-Rubinstein + Duality". https://vincentherrmann.github.io/blog/wasserstein/. + .. [4] Peyré, Gabriel, and Marco Cuturi. "Computational optimal + transport." Center for Research in Economics and Statistics + Working Papers 2017-86 (2017). + + See Also + -------- + wasserstein_distance: Compute the Wasserstein-1 distance between two + 1D discrete distributions. + + Examples + -------- + Compute the Wasserstein distance between two three-dimensional samples, + each with two observations. + + >>> from scipy.stats import wasserstein_distance_nd + >>> wasserstein_distance_nd([[0, 2, 3], [1, 2, 5]], [[3, 2, 3], [4, 2, 5]]) + 3.0 + + Compute the Wasserstein distance between two two-dimensional distributions + with three and two weighted observations, respectively. + + >>> wasserstein_distance_nd([[0, 2.75], [2, 209.3], [0, 0]], + ... [[0.2, 0.322], [4.5, 25.1808]], + ... [0.4, 5.2, 0.114], [0.8, 1.5]) + 174.15840245217169 + """ + m, n = len(u_values), len(v_values) + u_values = asarray(u_values) + v_values = asarray(v_values) + + if u_values.ndim > 2 or v_values.ndim > 2: + raise ValueError('Invalid input values. The inputs must have either ' + 'one or two dimensions.') + # if dimensions are not equal throw error + if u_values.ndim != v_values.ndim: + raise ValueError('Invalid input values. Dimensions of inputs must be ' + 'equal.') + # if data is 1D then call the cdf_distance function + if u_values.ndim == 1 and v_values.ndim == 1: + return _cdf_distance(1, u_values, v_values, u_weights, v_weights) + + u_values, u_weights = _validate_distribution(u_values, u_weights) + v_values, v_weights = _validate_distribution(v_values, v_weights) + # if number of columns is not equal throw error + if u_values.shape[1] != v_values.shape[1]: + raise ValueError('Invalid input values. If two-dimensional, ' + '`u_values` and `v_values` must have the same ' + 'number of columns.') + + # if data contains np.inf then return inf or nan + if np.any(np.isinf(u_values)) ^ np.any(np.isinf(v_values)): + return np.inf + elif np.any(np.isinf(u_values)) and np.any(np.isinf(v_values)): + return np.nan + + # create constraints + A_upper_part = sparse.block_diag((np.ones((1, n)), ) * m) + A_lower_part = sparse.hstack((sparse.eye(n), ) * m) + # sparse constraint matrix of size (m + n)*(m * n) + A = sparse.vstack((A_upper_part, A_lower_part)) + A = sparse.coo_array(A) + + # get cost matrix + D = distance_matrix(u_values, v_values, p=2) + cost = D.ravel() + + # create the minimization target + p_u = np.full(m, 1/m) if u_weights is None else u_weights/np.sum(u_weights) + p_v = np.full(n, 1/n) if v_weights is None else v_weights/np.sum(v_weights) + b = np.concatenate((p_u, p_v), axis=0) + + # solving LP + constraints = LinearConstraint(A=A.T, ub=cost) + opt_res = milp(c=-b, constraints=constraints, bounds=(-np.inf, np.inf)) + return -opt_res.fun + + +def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute the Wasserstein-1 distance between two 1D discrete distributions. + + The Wasserstein distance, also called the Earth mover's distance or the + optimal transport distance, is a similarity metric between two probability + distributions [1]_. In the discrete case, the Wasserstein distance can be + understood as the cost of an optimal transport plan to convert one + distribution into the other. The cost is calculated as the product of the + amount of probability mass being moved and the distance it is being moved. + A brief and intuitive introduction can be found at [2]_. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + u_values : 1d array_like + A sample from a probability distribution or the support (set of all + possible values) of a probability distribution. Each element is an + observation or possible value. + + v_values : 1d array_like + A sample from or the support of a second distribution. + + u_weights, v_weights : 1d array_like, optional + Weights or counts corresponding with the sample or probability masses + corresponding with the support values. Sum of elements must be positive + and finite. If unspecified, each value is assigned the same weight. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + Given two 1D probability mass functions, :math:`u` and :math:`v`, the first + Wasserstein distance between the distributions is: + + .. math:: + + l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times + \mathbb{R}} |x-y| \mathrm{d} \pi (x, y) + + where :math:`\Gamma (u, v)` is the set of (probability) distributions on + :math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and + :math:`v` on the first and second factors respectively. For a given value + :math:`x`, :math:`u(x)` gives the probabilty of :math:`u` at position + :math:`x`, and the same for :math:`v(x)`. + + If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and + :math:`v`, this distance also equals to: + + .. math:: + + l_1(u, v) = \int_{-\infty}^{+\infty} |U-V| + + See [3]_ for a proof of the equivalence of both definitions. + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric + .. [2] Lili Weng, "What is Wasserstein distance?", Lil'log, + https://lilianweng.github.io/posts/2017-08-20-gan/#what-is-wasserstein-distance. + .. [3] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related + Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`. + + See Also + -------- + wasserstein_distance_nd: Compute the Wasserstein-1 distance between two N-D + discrete distributions. + + Examples + -------- + >>> from scipy.stats import wasserstein_distance + >>> wasserstein_distance([0, 1, 3], [5, 6, 8]) + 5.0 + >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2]) + 0.25 + >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4], + ... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5]) + 4.0781331438047861 + + """ + return _cdf_distance(1, u_values, v_values, u_weights, v_weights) + + +def energy_distance(u_values, v_values, u_weights=None, v_weights=None): + r"""Compute the energy distance between two 1D distributions. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The energy distance between two distributions :math:`u` and :math:`v`, whose + respective CDFs are :math:`U` and :math:`V`, equals to: + + .. math:: + + D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| - + \mathbb E|Y - Y'| \right)^{1/2} + + where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are + independent random variables whose probability distribution is :math:`u` + (resp. :math:`v`). + + Sometimes the square of this quantity is referred to as the "energy + distance" (e.g. in [2]_, [4]_), but as noted in [1]_ and [3]_, only the + definition above satisfies the axioms of a distance function (metric). + + As shown in [2]_, for one-dimensional real-valued variables, the energy + distance is linked to the non-distribution-free version of the Cramér-von + Mises distance: + + .. math:: + + D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2 + \right)^{1/2} + + Note that the common Cramér-von Mises criterion uses the distribution-free + version of the distance. See [2]_ (section 2), for more details about both + versions of the distance. + + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews: + Computational Statistics, 8(1):27-38 (2015). + .. [2] Szekely "E-statistics: The energy of statistical samples." Bowling + Green State University, Department of Mathematics and Statistics, + Technical Report 02-16 (2002). + .. [3] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance + .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, + Munos "The Cramer Distance as a Solution to Biased Wasserstein + Gradients" (2017). :arXiv:`1705.10743`. + + Examples + -------- + >>> from scipy.stats import energy_distance + >>> energy_distance([0], [2]) + 2.0000000000000004 + >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2]) + 1.0000000000000002 + >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ], + ... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8]) + 0.88003340976158217 + + """ + return np.sqrt(2) * _cdf_distance(2, u_values, v_values, + u_weights, v_weights) + + +def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None): + r""" + Compute, between two one-dimensional distributions :math:`u` and + :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the + statistical distance that is defined as: + + .. math:: + + l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p} + + p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2 + gives the energy distance. + + Parameters + ---------- + u_values, v_values : array_like + Values observed in the (empirical) distribution. + u_weights, v_weights : array_like, optional + Weight for each value. If unspecified, each value is assigned the same + weight. + `u_weights` (resp. `v_weights`) must have the same length as + `u_values` (resp. `v_values`). If the weight sum differs from 1, it + must still be positive and finite so that the weights can be normalized + to sum to 1. + + Returns + ------- + distance : float + The computed distance between the distributions. + + Notes + ----- + The input distributions can be empirical, therefore coming from samples + whose values are effectively inputs of the function, or they can be seen as + generalized functions, in which case they are weighted sums of Dirac delta + functions located at the specified values. + + References + ---------- + .. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, + Munos "The Cramer Distance as a Solution to Biased Wasserstein + Gradients" (2017). :arXiv:`1705.10743`. + + """ + u_values, u_weights = _validate_distribution(u_values, u_weights) + v_values, v_weights = _validate_distribution(v_values, v_weights) + + u_sorter = np.argsort(u_values) + v_sorter = np.argsort(v_values) + + all_values = np.concatenate((u_values, v_values)) + all_values.sort(kind='mergesort') + + # Compute the differences between pairs of successive values of u and v. + deltas = np.diff(all_values) + + # Get the respective positions of the values of u and v among the values of + # both distributions. + u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right') + v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right') + + # Calculate the CDFs of u and v using their weights, if specified. + if u_weights is None: + u_cdf = u_cdf_indices / u_values.size + else: + u_sorted_cumweights = np.concatenate(([0], + np.cumsum(u_weights[u_sorter]))) + u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1] + + if v_weights is None: + v_cdf = v_cdf_indices / v_values.size + else: + v_sorted_cumweights = np.concatenate(([0], + np.cumsum(v_weights[v_sorter]))) + v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1] + + # Compute the value of the integral based on the CDFs. + # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead + # of about 15%. + if p == 1: + return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas)) + if p == 2: + return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas))) + return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p), + deltas)), 1/p) + + +def _validate_distribution(values, weights): + """ + Validate the values and weights from a distribution input of `cdf_distance` + and return them as ndarray objects. + + Parameters + ---------- + values : array_like + Values observed in the (empirical) distribution. + weights : array_like + Weight for each value. + + Returns + ------- + values : ndarray + Values as ndarray. + weights : ndarray + Weights as ndarray. + + """ + # Validate the value array. + values = np.asarray(values, dtype=float) + if len(values) == 0: + raise ValueError("Distribution can't be empty.") + + # Validate the weight array, if specified. + if weights is not None: + weights = np.asarray(weights, dtype=float) + if len(weights) != len(values): + raise ValueError('Value and weight array-likes for the same ' + 'empirical distribution must be of the same size.') + if np.any(weights < 0): + raise ValueError('All weights must be non-negative.') + if not 0 < np.sum(weights) < np.inf: + raise ValueError('Weight array-like sum must be positive and ' + 'finite. Set as None for an equal distribution of ' + 'weight.') + + return values, weights + + return values, None + + +##################################### +# SUPPORT FUNCTIONS # +##################################### + +RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts')) + + +def find_repeats(arr): + """Find repeats and repeat counts. + + Parameters + ---------- + arr : array_like + Input array. This is cast to float64. + + Returns + ------- + values : ndarray + The unique values from the (flattened) input that are repeated. + + counts : ndarray + Number of times the corresponding 'value' is repeated. + + Notes + ----- + In numpy >= 1.9 `numpy.unique` provides similar functionality. The main + difference is that `find_repeats` only returns repeated values. + + Examples + -------- + >>> from scipy import stats + >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5]) + RepeatedResults(values=array([2.]), counts=array([4])) + + >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) + RepeatedResults(values=array([4., 5.]), counts=array([2, 2])) + + """ + # Note: always copies. + return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64))) + + +def _sum_of_squares(a, axis=0): + """Square each element of the input array, and return the sum(s) of that. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + sum_of_squares : ndarray + The sum along the given axis for (a**2). + + See Also + -------- + _square_of_sums : The square(s) of the sum(s) (the opposite of + `_sum_of_squares`). + + """ + a, axis = _chk_asarray(a, axis) + return np.sum(a*a, axis) + + +def _square_of_sums(a, axis=0): + """Sum elements of the input array, and return the square(s) of that sum. + + Parameters + ---------- + a : array_like + Input array. + axis : int or None, optional + Axis along which to calculate. Default is 0. If None, compute over + the whole array `a`. + + Returns + ------- + square_of_sums : float or ndarray + The square of the sum over `axis`. + + See Also + -------- + _sum_of_squares : The sum of squares (the opposite of `square_of_sums`). + + """ + a, axis = _chk_asarray(a, axis) + s = np.sum(a, axis) + if not np.isscalar(s): + return s.astype(float) * s + else: + return float(s) * s + + +def rankdata(a, method='average', *, axis=None, nan_policy='propagate'): + """Assign ranks to data, dealing with ties appropriately. + + By default (``axis=None``), the data array is first flattened, and a flat + array of ranks is returned. Separately reshape the rank array to the + shape of the data array if desired (see Examples). + + Ranks begin at 1. The `method` argument controls how ranks are assigned + to equal values. See [1]_ for further discussion of ranking methods. + + Parameters + ---------- + a : array_like + The array of values to be ranked. + method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional + The method used to assign ranks to tied elements. + The following methods are available (default is 'average'): + + * 'average': The average of the ranks that would have been assigned to + all the tied values is assigned to each value. + * 'min': The minimum of the ranks that would have been assigned to all + the tied values is assigned to each value. (This is also + referred to as "competition" ranking.) + * 'max': The maximum of the ranks that would have been assigned to all + the tied values is assigned to each value. + * 'dense': Like 'min', but the rank of the next highest element is + assigned the rank immediately after those assigned to the tied + elements. + * 'ordinal': All values are given a distinct rank, corresponding to + the order that the values occur in `a`. + axis : {None, int}, optional + Axis along which to perform the ranking. If ``None``, the data array + is first flattened. + nan_policy : {'propagate', 'omit', 'raise'}, optional + Defines how to handle when input contains nan. + The following options are available (default is 'propagate'): + + * 'propagate': propagates nans through the rank calculation + * 'omit': performs the calculations ignoring nan values + * 'raise': raises an error + + .. note:: + + When `nan_policy` is 'propagate', the output is an array of *all* + nans because ranks relative to nans in the input are undefined. + When `nan_policy` is 'omit', nans in `a` are ignored when ranking + the other values, and the corresponding locations of the output + are nan. + + .. versionadded:: 1.10 + + Returns + ------- + ranks : ndarray + An array of size equal to the size of `a`, containing rank + scores. + + References + ---------- + .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import rankdata + >>> rankdata([0, 2, 3, 2]) + array([ 1. , 2.5, 4. , 2.5]) + >>> rankdata([0, 2, 3, 2], method='min') + array([ 1, 2, 4, 2]) + >>> rankdata([0, 2, 3, 2], method='max') + array([ 1, 3, 4, 3]) + >>> rankdata([0, 2, 3, 2], method='dense') + array([ 1, 2, 3, 2]) + >>> rankdata([0, 2, 3, 2], method='ordinal') + array([ 1, 2, 4, 3]) + >>> rankdata([[0, 2], [3, 2]]).reshape(2,2) + array([[1. , 2.5], + [4. , 2.5]]) + >>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1) + array([[1. , 2.5, 2.5], + [2. , 1. , 3. ]]) + >>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="propagate") + array([nan, nan, nan, nan, nan, nan]) + >>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="omit") + array([ 2., 3., 4., nan, 1., nan]) + + """ + methods = ('average', 'min', 'max', 'dense', 'ordinal') + if method not in methods: + raise ValueError(f'unknown method "{method}"') + + x = np.asarray(a) + + if axis is None: + x = x.ravel() + axis = -1 + + if x.size == 0: + dtype = float if method == 'average' else np.dtype("long") + return np.empty(x.shape, dtype=dtype) + + contains_nan, nan_policy = _contains_nan(x, nan_policy) + + x = np.swapaxes(x, axis, -1) + ranks = _rankdata(x, method) + + if contains_nan: + i_nan = (np.isnan(x) if nan_policy == 'omit' + else np.isnan(x).any(axis=-1)) + ranks = ranks.astype(float, copy=False) + ranks[i_nan] = np.nan + + ranks = np.swapaxes(ranks, axis, -1) + return ranks + + +def _order_ranks(ranks, j): + # Reorder ascending order `ranks` according to `j` + ordered_ranks = np.empty(j.shape, dtype=ranks.dtype) + np.put_along_axis(ordered_ranks, j, ranks, axis=-1) + return ordered_ranks + + +def _rankdata(x, method, return_ties=False): + # Rank data `x` by desired `method`; `return_ties` if desired + shape = x.shape + + # Get sort order + kind = 'mergesort' if method == 'ordinal' else 'quicksort' + j = np.argsort(x, axis=-1, kind=kind) + ordinal_ranks = np.broadcast_to(np.arange(1, shape[-1]+1, dtype=int), shape) + + # Ordinal ranks is very easy because ties don't matter. We're done. + if method == 'ordinal': + return _order_ranks(ordinal_ranks, j) # never return ties + + # Sort array + y = np.take_along_axis(x, j, axis=-1) + # Logical indices of unique elements + i = np.concatenate([np.ones(shape[:-1] + (1,), dtype=np.bool_), + y[..., :-1] != y[..., 1:]], axis=-1) + + # Integer indices of unique elements + indices = np.arange(y.size)[i.ravel()] + # Counts of unique elements + counts = np.diff(indices, append=y.size) + + # Compute `'min'`, `'max'`, and `'mid'` ranks of unique elements + if method == 'min': + ranks = ordinal_ranks[i] + elif method == 'max': + ranks = ordinal_ranks[i] + counts - 1 + elif method == 'average': + ranks = ordinal_ranks[i] + (counts - 1)/2 + elif method == 'dense': + ranks = np.cumsum(i, axis=-1)[i] + + ranks = np.repeat(ranks, counts).reshape(shape) + ranks = _order_ranks(ranks, j) + + if return_ties: + # Tie information is returned in a format that is useful to functions that + # rely on this (private) function. Example: + # >>> x = np.asarray([3, 2, 1, 2, 2, 2, 1]) + # >>> _, t = _rankdata(x, 'average', return_ties=True) + # >>> t # array([2., 0., 4., 0., 0., 0., 1.]) # two 1s, four 2s, and one 3 + # Unlike ranks, tie counts are *not* reordered to correspond with the order of + # the input; e.g. the number of appearances of the lowest rank element comes + # first. This is a useful format because: + # - The shape of the result is the shape of the input. Different slices can + # have different numbers of tied elements but not result in a ragged array. + # - Functions that use `t` usually don't need to which each element of the + # original array is associated with each tie count; they perform a reduction + # over the tie counts onnly. The tie counts are naturally computed in a + # sorted order, so this does not unnecesarily reorder them. + # - One exception is `wilcoxon`, which needs the number of zeros. Zeros always + # have the lowest rank, so it is easy to find them at the zeroth index. + t = np.zeros(shape, dtype=float) + t[i] = counts + return ranks, t + return ranks + + +def expectile(a, alpha=0.5, *, weights=None): + r"""Compute the expectile at the specified level. + + Expectiles are a generalization of the expectation in the same way as + quantiles are a generalization of the median. The expectile at level + `alpha = 0.5` is the mean (average). See Notes for more details. + + Parameters + ---------- + a : array_like + Array containing numbers whose expectile is desired. + alpha : float, default: 0.5 + The level of the expectile; e.g., `alpha=0.5` gives the mean. + weights : array_like, optional + An array of weights associated with the values in `a`. + The `weights` must be broadcastable to the same shape as `a`. + Default is None, which gives each value a weight of 1.0. + An integer valued weight element acts like repeating the corresponding + observation in `a` that many times. See Notes for more details. + + Returns + ------- + expectile : ndarray + The empirical expectile at level `alpha`. + + See Also + -------- + numpy.mean : Arithmetic average + numpy.quantile : Quantile + + Notes + ----- + In general, the expectile at level :math:`\alpha` of a random variable + :math:`X` with cumulative distribution function (CDF) :math:`F` is given + by the unique solution :math:`t` of: + + .. math:: + + \alpha E((X - t)_+) = (1 - \alpha) E((t - X)_+) \,. + + Here, :math:`(x)_+ = \max(0, x)` is the positive part of :math:`x`. + This equation can be equivalently written as: + + .. math:: + + \alpha \int_t^\infty (x - t)\mathrm{d}F(x) + = (1 - \alpha) \int_{-\infty}^t (t - x)\mathrm{d}F(x) \,. + + The empirical expectile at level :math:`\alpha` (`alpha`) of a sample + :math:`a_i` (the array `a`) is defined by plugging in the empirical CDF of + `a`. Given sample or case weights :math:`w` (the array `weights`), it + reads :math:`F_a(x) = \frac{1}{\sum_i w_i} \sum_i w_i 1_{a_i \leq x}` + with indicator function :math:`1_{A}`. This leads to the definition of the + empirical expectile at level `alpha` as the unique solution :math:`t` of: + + .. math:: + + \alpha \sum_{i=1}^n w_i (a_i - t)_+ = + (1 - \alpha) \sum_{i=1}^n w_i (t - a_i)_+ \,. + + For :math:`\alpha=0.5`, this simplifies to the weighted average. + Furthermore, the larger :math:`\alpha`, the larger the value of the + expectile. + + As a final remark, the expectile at level :math:`\alpha` can also be + written as a minimization problem. One often used choice is + + .. math:: + + \operatorname{argmin}_t + E(\lvert 1_{t\geq X} - \alpha\rvert(t - X)^2) \,. + + References + ---------- + .. [1] W. K. Newey and J. L. Powell (1987), "Asymmetric Least Squares + Estimation and Testing," Econometrica, 55, 819-847. + .. [2] T. Gneiting (2009). "Making and Evaluating Point Forecasts," + Journal of the American Statistical Association, 106, 746 - 762. + :doi:`10.48550/arXiv.0912.0902` + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import expectile + >>> a = [1, 4, 2, -1] + >>> expectile(a, alpha=0.5) == np.mean(a) + True + >>> expectile(a, alpha=0.2) + 0.42857142857142855 + >>> expectile(a, alpha=0.8) + 2.5714285714285716 + >>> weights = [1, 3, 1, 1] + + """ + if alpha < 0 or alpha > 1: + raise ValueError( + "The expectile level alpha must be in the range [0, 1]." + ) + a = np.asarray(a) + + if weights is not None: + weights = np.broadcast_to(weights, a.shape) + + # This is the empirical equivalent of Eq. (13) with identification + # function from Table 9 (omitting a factor of 2) in [2] (their y is our + # data a, their x is our t) + def first_order(t): + return np.average(np.abs((a <= t) - alpha) * (t - a), weights=weights) + + if alpha >= 0.5: + x0 = np.average(a, weights=weights) + x1 = np.amax(a) + else: + x1 = np.average(a, weights=weights) + x0 = np.amin(a) + + if x0 == x1: + # a has a single unique element + return x0 + + # Note that the expectile is the unique solution, so no worries about + # finding a wrong root. + res = root_scalar(first_order, x0=x0, x1=x1) + return res.root diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py new file mode 100644 index 0000000000000000000000000000000000000000..555496461c1c63fb8b5039cbdfc9670c1b96b9a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py @@ -0,0 +1,237 @@ +import warnings +import numpy as np + +from scipy import stats +from ._stats_py import _get_pvalue, _rankdata +from . import _morestats +from ._axis_nan_policy import _broadcast_arrays +from ._hypotests import _get_wilcoxon_distr +from scipy._lib._util import _lazywhere, _get_nan + + +class WilcoxonDistribution: + + def __init__(self, n): + n = np.asarray(n).astype(int, copy=False) + self.n = n + self._dists = {ni: _get_wilcoxon_distr(ni) for ni in np.unique(n)} + + def _cdf1(self, k, n): + pmfs = self._dists[n] + return pmfs[:k + 1].sum() + + def _cdf(self, k, n): + return np.vectorize(self._cdf1, otypes=[float])(k, n) + + def _sf1(self, k, n): + pmfs = self._dists[n] + return pmfs[k:].sum() + + def _sf(self, k, n): + return np.vectorize(self._sf1, otypes=[float])(k, n) + + def mean(self): + return self.n * (self.n + 1) / 4 + + def _prep(self, k): + k = np.asarray(k).astype(int, copy=False) + mn = self.mean() + out = np.empty(k.shape, dtype=np.float64) + return k, mn, out + + def cdf(self, k): + k, mn, out = self._prep(k) + return _lazywhere(k <= mn, (k, self.n), self._cdf, + f2=lambda k, n: 1 - self._sf(k+1, n))[()] + + def sf(self, k): + k, mn, out = self._prep(k) + return _lazywhere(k <= mn, (k, self.n), self._sf, + f2=lambda k, n: 1 - self._cdf(k-1, n))[()] + + +def _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis): + + axis = np.asarray(axis)[()] + message = "`axis` must be an integer." + if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0: + raise ValueError(message) + + message = '`axis` must be compatible with the shape(s) of `x` (and `y`)' + try: + if y is None: + x = np.asarray(x) + d = x + else: + x, y = _broadcast_arrays((x, y), axis=axis) + d = x - y + d = np.moveaxis(d, axis, -1) + except np.AxisError as e: + raise ValueError(message) from e + + message = "`x` and `y` must have the same length along `axis`." + if y is not None and x.shape[axis] != y.shape[axis]: + raise ValueError(message) + + message = "`x` (and `y`, if provided) must be an array of real numbers." + if np.issubdtype(d.dtype, np.integer): + d = d.astype(np.float64) + if not np.issubdtype(d.dtype, np.floating): + raise ValueError(message) + + zero_method = str(zero_method).lower() + zero_methods = {"wilcox", "pratt", "zsplit"} + message = f"`zero_method` must be one of {zero_methods}." + if zero_method not in zero_methods: + raise ValueError(message) + + corrections = {True, False} + message = f"`correction` must be one of {corrections}." + if correction not in corrections: + raise ValueError(message) + + alternative = str(alternative).lower() + alternatives = {"two-sided", "less", "greater"} + message = f"`alternative` must be one of {alternatives}." + if alternative not in alternatives: + raise ValueError(message) + + if not isinstance(method, stats.PermutationMethod): + methods = {"auto", "approx", "exact"} + message = (f"`method` must be one of {methods} or " + "an instance of `stats.PermutationMethod`.") + if method not in methods: + raise ValueError(message) + + # logic unchanged here for backward compatibility + n_zero = np.sum(d == 0, axis=-1) + has_zeros = np.any(n_zero > 0) + if method == "auto": + if d.shape[-1] <= 50 and not has_zeros: + method = "exact" + else: + method = "approx" + + n_zero = np.sum(d == 0) + if n_zero > 0 and method == "exact": + method = "approx" + warnings.warn("Exact p-value calculation does not work if there are " + "zeros. Switching to normal approximation.", + stacklevel=2) + + if (method == "approx" and zero_method in ["wilcox", "pratt"] + and n_zero == d.size and d.size > 0 and d.ndim == 1): + raise ValueError("zero_method 'wilcox' and 'pratt' do not " + "work if x - y is zero for all elements.") + + if 0 < d.shape[-1] < 10 and method == "approx": + warnings.warn("Sample size too small for normal approximation.", stacklevel=2) + + return d, zero_method, correction, alternative, method, axis + + +def _wilcoxon_statistic(d, zero_method='wilcox'): + + i_zeros = (d == 0) + + if zero_method == 'wilcox': + # Wilcoxon's method for treating zeros was to remove them from + # the calculation. We do this by replacing 0s with NaNs, which + # are ignored anyway. + if not d.flags['WRITEABLE']: + d = d.copy() + d[i_zeros] = np.nan + + i_nan = np.isnan(d) + n_nan = np.sum(i_nan, axis=-1) + count = d.shape[-1] - n_nan + + r, t = _rankdata(abs(d), 'average', return_ties=True) + + r_plus = np.sum((d > 0) * r, axis=-1) + r_minus = np.sum((d < 0) * r, axis=-1) + + if zero_method == "zsplit": + # The "zero-split" method for treating zeros is to add half their contribution + # to r_plus and half to r_minus. + # See gh-2263 for the origin of this method. + r_zero_2 = np.sum(i_zeros * r, axis=-1) / 2 + r_plus += r_zero_2 + r_minus += r_zero_2 + + mn = count * (count + 1.) * 0.25 + se = count * (count + 1.) * (2. * count + 1.) + + if zero_method == "pratt": + # Pratt's method for treating zeros was just to modify the z-statistic. + + # normal approximation needs to be adjusted, see Cureton (1967) + n_zero = i_zeros.sum(axis=-1) + mn -= n_zero * (n_zero + 1.) * 0.25 + se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.) + + # zeros are not to be included in tie-correction. + # any tie counts corresponding with zeros are in the 0th column + t[i_zeros.any(axis=-1), 0] = 0 + + tie_correct = (t**3 - t).sum(axis=-1) + se -= tie_correct/2 + se = np.sqrt(se / 24) + + z = (r_plus - mn) / se + + return r_plus, r_minus, se, z, count + + +def _correction_sign(z, alternative): + if alternative == 'greater': + return 1 + elif alternative == 'less': + return -1 + else: + return np.sign(z) + + +def _wilcoxon_nd(x, y=None, zero_method='wilcox', correction=True, + alternative='two-sided', method='auto', axis=0): + + temp = _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis) + d, zero_method, correction, alternative, method, axis = temp + + if d.size == 0: + NaN = _get_nan(d) + res = _morestats.WilcoxonResult(statistic=NaN, pvalue=NaN) + if method == 'approx': + res.zstatistic = NaN + return res + + r_plus, r_minus, se, z, count = _wilcoxon_statistic(d, zero_method) + + if method == 'approx': + if correction: + sign = _correction_sign(z, alternative) + z -= sign * 0.5 / se + p = _get_pvalue(z, stats.norm, alternative) + elif method == 'exact': + dist = WilcoxonDistribution(count) + if alternative == 'less': + p = dist.cdf(r_plus) + elif alternative == 'greater': + p = dist.sf(r_plus) + else: + p = 2 * np.minimum(dist.sf(r_plus), dist.cdf(r_plus)) + p = np.clip(p, 0, 1) + else: # `PermutationMethod` instance (already validated) + p = stats.permutation_test( + (d,), lambda d: _wilcoxon_statistic(d, zero_method)[0], + permutation_type='samples', **method._asdict(), + alternative=alternative, axis=-1).pvalue + + # for backward compatibility... + statistic = np.minimum(r_plus, r_minus) if alternative=='two-sided' else r_plus + z = -np.abs(z) if (alternative == 'two-sided' and method == 'approx') else z + + res = _morestats.WilcoxonResult(statistic=statistic, pvalue=p[()]) + if method == 'approx': + res.zstatistic = z[()] + return res diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/biasedurn.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/biasedurn.py new file mode 100644 index 0000000000000000000000000000000000000000..f5e1cd5c84897ed9e65db1cf20d3281479d07a1f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/biasedurn.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + '_PyFishersNCHypergeometric', + '_PyWalleniusNCHypergeometric', + '_PyStochasticLib3' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="biasedurn", + private_modules=["_biasedurn"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/distributions.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9c37aa98c9545b2616c8d32e8f676d8d49289e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/distributions.py @@ -0,0 +1,24 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +# NOTE: To look at history using `git blame`, use `git blame -M -C -C` +# instead of `git blame -Lxxx,+x`. +# +from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen) # noqa: F401 + +from . import _continuous_distns +from . import _discrete_distns + +from ._continuous_distns import * # noqa: F403 +from ._levy_stable import levy_stable +from ._discrete_distns import * # noqa: F403 +from ._entropy import entropy + +# For backwards compatibility e.g. pymc expects distributions.__all__. +__all__ = ['rv_discrete', 'rv_continuous', 'rv_histogram', 'entropy'] # noqa: F405 + +# Add only the distribution names, not the *_gen names. +__all__ += _continuous_distns._distn_names +__all__ += ['levy_stable'] +__all__ += _discrete_distns._distn_names diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/mstats.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/mstats.py new file mode 100644 index 0000000000000000000000000000000000000000..88016af71803dc5c4ebadba168f22cdcd8273dbb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/mstats.py @@ -0,0 +1,140 @@ +""" +=================================================================== +Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) +=================================================================== + +.. currentmodule:: scipy.stats.mstats + +This module contains a large number of statistical functions that can +be used with masked arrays. + +Most of these functions are similar to those in `scipy.stats` but might +have small differences in the API or in the algorithm used. Since this +is a relatively new package, some API changes are still possible. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe + gmean + hmean + kurtosis + mode + mquantiles + hdmedian + hdquantiles + hdquantiles_sd + idealfourths + plotting_positions + meppf + moment + skew + tmean + tvar + tmin + tmax + tsem + variation + find_repeats + sem + trimmed_mean + trimmed_mean_ci + trimmed_std + trimmed_var + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + scoreatpercentile + +Correlation functions +===================== + +.. autosummary:: + :toctree: generated/ + + f_oneway + pearsonr + spearmanr + pointbiserialr + kendalltau + kendalltau_seasonal + linregress + siegelslopes + theilslopes + sen_seasonal_slopes + +Statistical tests +================= + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_onesamp + ttest_ind + ttest_rel + chisquare + kstest + ks_2samp + ks_1samp + ks_twosamp + mannwhitneyu + rankdata + kruskal + kruskalwallis + friedmanchisquare + brunnermunzel + skewtest + kurtosistest + normaltest + +Transformations +=============== + +.. autosummary:: + :toctree: generated/ + + obrientransform + trim + trima + trimmed_stde + trimr + trimtail + trimboth + winsorize + zmap + zscore + +Other +===== + +.. autosummary:: + :toctree: generated/ + + argstoarray + count_tied_groups + msign + compare_medians_ms + median_cihs + mjci + mquantiles_cimj + rsh + +""" +from . import _mstats_basic +from . import _mstats_extras +from ._mstats_basic import * # noqa: F403 +from ._mstats_extras import * # noqa: F403 +# Functions that support masked array input in stats but need to be kept in the +# mstats namespace for backwards compatibility: +from scipy.stats import gmean, hmean, zmap, zscore, chisquare + +__all__ = _mstats_basic.__all__ + _mstats_extras.__all__ +__all__ += ['gmean', 'hmean', 'zmap', 'zscore', 'chisquare'] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/mstats_extras.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/mstats_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..01a19f22b257d537762838f86dcfa10146c9a4a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/mstats_extras.py @@ -0,0 +1,26 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'compare_medians_ms', + 'hdquantiles', 'hdmedian', 'hdquantiles_sd', + 'idealfourths', + 'median_cihs','mjci','mquantiles_cimj', + 'rsh', + 'trimmed_mean_ci', 'ma', 'MaskedArray', 'mstats', + 'norm', 'beta', 't', 'binom' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mstats_extras", + private_modules=["_mstats_extras"], all=__all__, + attribute=name, correct_module="mstats") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/stats/sampling.py b/env-llmeval/lib/python3.10/site-packages/scipy/stats/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..a699221ba77bb8320023d70c3e47f562c08eab18 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/stats/sampling.py @@ -0,0 +1,68 @@ +""" +====================================================== +Random Number Generators (:mod:`scipy.stats.sampling`) +====================================================== + +.. currentmodule:: scipy.stats.sampling + +This module contains a collection of random number generators to sample +from univariate continuous and discrete distributions. It uses the +implementation of a C library called "UNU.RAN". The only exception is +RatioUniforms, which is a pure Python implementation of the +Ratio-of-Uniforms method. + +Generators Wrapped +================== + +For continuous distributions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + NumericalInverseHermite + NumericalInversePolynomial + TransformedDensityRejection + SimpleRatioUniforms + RatioUniforms + +For discrete distributions +-------------------------- + +.. autosummary:: + :toctree: generated/ + + DiscreteAliasUrn + DiscreteGuideTable + +Warnings / Errors used in :mod:`scipy.stats.sampling` +----------------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + UNURANError + + +Generators for pre-defined distributions +======================================== + +To easily apply the above methods for some of the continuous distributions +in :mod:`scipy.stats`, the following functionality can be used: + +.. autosummary:: + :toctree: generated/ + + FastGeneratorInversion + +""" +from ._sampling import FastGeneratorInversion, RatioUniforms # noqa: F401 +from ._unuran.unuran_wrapper import ( # noqa: F401 + TransformedDensityRejection, + DiscreteAliasUrn, + DiscreteGuideTable, + NumericalInversePolynomial, + NumericalInverseHermite, + SimpleRatioUniforms, + UNURANError +)