Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py +16 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py +79 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py +46 -0
- env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py +47 -0
- env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_19_data.npz +3 -0
- env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz +3 -0
- env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_6_data.npz +3 -0
- env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz +3 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py +642 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd +27 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py +795 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_binomtest.py +375 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_constants.py +39 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_continuous_distns.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_covariance.py +633 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_crosstab.py +204 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py +1952 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_distr_params.py +288 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_fit.py +1351 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_hypotests.py +2021 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_kde.py +728 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_morestats.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_mstats_extras.py +521 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_page_trend_test.py +479 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmc.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi +54 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_relative_risk.py +263 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_resampling.py +1870 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_result_classes.py +40 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_sampling.py +1314 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py +712 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_sobol.pyi +54 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats.pxd +9 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py +499 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_survival.py +686 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py +199 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_variation.py +121 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/_warnings_errors.py +38 -0
- env-llmeval/lib/python3.10/site-packages/scipy/stats/morestats.py +34 -0
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (224 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc
ADDED
Binary file (6.47 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc
ADDED
Binary file (5.95 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (923 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy import *
|
2 |
+
|
3 |
+
# from cupy import * doesn't overwrite these builtin names
|
4 |
+
from cupy import abs, max, min, round
|
5 |
+
|
6 |
+
# These imports may overwrite names from the import * above.
|
7 |
+
from ._aliases import *
|
8 |
+
|
9 |
+
# See the comment in the numpy __init__.py
|
10 |
+
__import__(__package__ + '.linalg')
|
11 |
+
|
12 |
+
from .linalg import matrix_transpose, vecdot
|
13 |
+
|
14 |
+
from ..common._helpers import *
|
15 |
+
|
16 |
+
__array_api_version__ = '2022.12'
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (495 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (1.05 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
from ..common import _aliases
|
6 |
+
|
7 |
+
from .._internal import get_xp
|
8 |
+
|
9 |
+
asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy')
|
10 |
+
asarray.__doc__ = _aliases._asarray.__doc__
|
11 |
+
del partial
|
12 |
+
|
13 |
+
import cupy as cp
|
14 |
+
bool = cp.bool_
|
15 |
+
|
16 |
+
# Basic renames
|
17 |
+
acos = cp.arccos
|
18 |
+
acosh = cp.arccosh
|
19 |
+
asin = cp.arcsin
|
20 |
+
asinh = cp.arcsinh
|
21 |
+
atan = cp.arctan
|
22 |
+
atan2 = cp.arctan2
|
23 |
+
atanh = cp.arctanh
|
24 |
+
bitwise_left_shift = cp.left_shift
|
25 |
+
bitwise_invert = cp.invert
|
26 |
+
bitwise_right_shift = cp.right_shift
|
27 |
+
concat = cp.concatenate
|
28 |
+
pow = cp.power
|
29 |
+
|
30 |
+
arange = get_xp(cp)(_aliases.arange)
|
31 |
+
empty = get_xp(cp)(_aliases.empty)
|
32 |
+
empty_like = get_xp(cp)(_aliases.empty_like)
|
33 |
+
eye = get_xp(cp)(_aliases.eye)
|
34 |
+
full = get_xp(cp)(_aliases.full)
|
35 |
+
full_like = get_xp(cp)(_aliases.full_like)
|
36 |
+
linspace = get_xp(cp)(_aliases.linspace)
|
37 |
+
ones = get_xp(cp)(_aliases.ones)
|
38 |
+
ones_like = get_xp(cp)(_aliases.ones_like)
|
39 |
+
zeros = get_xp(cp)(_aliases.zeros)
|
40 |
+
zeros_like = get_xp(cp)(_aliases.zeros_like)
|
41 |
+
UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult)
|
42 |
+
UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult)
|
43 |
+
UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult)
|
44 |
+
unique_all = get_xp(cp)(_aliases.unique_all)
|
45 |
+
unique_counts = get_xp(cp)(_aliases.unique_counts)
|
46 |
+
unique_inverse = get_xp(cp)(_aliases.unique_inverse)
|
47 |
+
unique_values = get_xp(cp)(_aliases.unique_values)
|
48 |
+
astype = _aliases.astype
|
49 |
+
std = get_xp(cp)(_aliases.std)
|
50 |
+
var = get_xp(cp)(_aliases.var)
|
51 |
+
permute_dims = get_xp(cp)(_aliases.permute_dims)
|
52 |
+
reshape = get_xp(cp)(_aliases.reshape)
|
53 |
+
argsort = get_xp(cp)(_aliases.argsort)
|
54 |
+
sort = get_xp(cp)(_aliases.sort)
|
55 |
+
nonzero = get_xp(cp)(_aliases.nonzero)
|
56 |
+
sum = get_xp(cp)(_aliases.sum)
|
57 |
+
prod = get_xp(cp)(_aliases.prod)
|
58 |
+
ceil = get_xp(cp)(_aliases.ceil)
|
59 |
+
floor = get_xp(cp)(_aliases.floor)
|
60 |
+
trunc = get_xp(cp)(_aliases.trunc)
|
61 |
+
matmul = get_xp(cp)(_aliases.matmul)
|
62 |
+
matrix_transpose = get_xp(cp)(_aliases.matrix_transpose)
|
63 |
+
tensordot = get_xp(cp)(_aliases.tensordot)
|
64 |
+
|
65 |
+
# These functions are completely new here. If the library already has them
|
66 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
67 |
+
if hasattr(cp, 'vecdot'):
|
68 |
+
vecdot = cp.vecdot
|
69 |
+
else:
|
70 |
+
vecdot = get_xp(cp)(_aliases.vecdot)
|
71 |
+
if hasattr(cp, 'isdtype'):
|
72 |
+
isdtype = cp.isdtype
|
73 |
+
else:
|
74 |
+
isdtype = get_xp(cp)(_aliases.isdtype)
|
75 |
+
|
76 |
+
__all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos',
|
77 |
+
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
78 |
+
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
79 |
+
'bitwise_right_shift', 'concat', 'pow']
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"ndarray",
|
5 |
+
"Device",
|
6 |
+
"Dtype",
|
7 |
+
]
|
8 |
+
|
9 |
+
import sys
|
10 |
+
from typing import (
|
11 |
+
Union,
|
12 |
+
TYPE_CHECKING,
|
13 |
+
)
|
14 |
+
|
15 |
+
from cupy import (
|
16 |
+
ndarray,
|
17 |
+
dtype,
|
18 |
+
int8,
|
19 |
+
int16,
|
20 |
+
int32,
|
21 |
+
int64,
|
22 |
+
uint8,
|
23 |
+
uint16,
|
24 |
+
uint32,
|
25 |
+
uint64,
|
26 |
+
float32,
|
27 |
+
float64,
|
28 |
+
)
|
29 |
+
|
30 |
+
from cupy.cuda.device import Device
|
31 |
+
|
32 |
+
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
33 |
+
Dtype = dtype[Union[
|
34 |
+
int8,
|
35 |
+
int16,
|
36 |
+
int32,
|
37 |
+
int64,
|
38 |
+
uint8,
|
39 |
+
uint16,
|
40 |
+
uint32,
|
41 |
+
uint64,
|
42 |
+
float32,
|
43 |
+
float64,
|
44 |
+
]]
|
45 |
+
else:
|
46 |
+
Dtype = dtype
|
env-llmeval/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy.linalg import *
|
2 |
+
# cupy.linalg doesn't have __all__. If it is added, replace this with
|
3 |
+
#
|
4 |
+
# from cupy.linalg import __all__ as linalg_all
|
5 |
+
_n = {}
|
6 |
+
exec('from cupy.linalg import *', _n)
|
7 |
+
del _n['__builtins__']
|
8 |
+
linalg_all = list(_n)
|
9 |
+
del _n
|
10 |
+
|
11 |
+
from ..common import _linalg
|
12 |
+
from .._internal import get_xp
|
13 |
+
from ._aliases import (matmul, matrix_transpose, tensordot, vecdot)
|
14 |
+
|
15 |
+
import cupy as cp
|
16 |
+
|
17 |
+
cross = get_xp(cp)(_linalg.cross)
|
18 |
+
outer = get_xp(cp)(_linalg.outer)
|
19 |
+
EighResult = _linalg.EighResult
|
20 |
+
QRResult = _linalg.QRResult
|
21 |
+
SlogdetResult = _linalg.SlogdetResult
|
22 |
+
SVDResult = _linalg.SVDResult
|
23 |
+
eigh = get_xp(cp)(_linalg.eigh)
|
24 |
+
qr = get_xp(cp)(_linalg.qr)
|
25 |
+
slogdet = get_xp(cp)(_linalg.slogdet)
|
26 |
+
svd = get_xp(cp)(_linalg.svd)
|
27 |
+
cholesky = get_xp(cp)(_linalg.cholesky)
|
28 |
+
matrix_rank = get_xp(cp)(_linalg.matrix_rank)
|
29 |
+
pinv = get_xp(cp)(_linalg.pinv)
|
30 |
+
matrix_norm = get_xp(cp)(_linalg.matrix_norm)
|
31 |
+
svdvals = get_xp(cp)(_linalg.svdvals)
|
32 |
+
diagonal = get_xp(cp)(_linalg.diagonal)
|
33 |
+
trace = get_xp(cp)(_linalg.trace)
|
34 |
+
|
35 |
+
# These functions are completely new here. If the library already has them
|
36 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
37 |
+
if hasattr(cp.linalg, 'vector_norm'):
|
38 |
+
vector_norm = cp.linalg.vector_norm
|
39 |
+
else:
|
40 |
+
vector_norm = get_xp(cp)(_linalg.vector_norm)
|
41 |
+
|
42 |
+
__all__ = linalg_all + _linalg.__all__
|
43 |
+
|
44 |
+
del get_xp
|
45 |
+
del cp
|
46 |
+
del linalg_all
|
47 |
+
del _linalg
|
env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_19_data.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38e8fc7b041df0b23d7e5ca15ead1a065e6467611ef9a848cc7db93f80adfd87
|
3 |
+
size 34050
|
env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14e222d34a7118c7284a1675c6feceee77b84df951a5c6ba2a5ee9ff3054fa1d
|
3 |
+
size 31231
|
env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_6_data.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b2a0736b541ebf5c4b9b4c00d6dab281e73c9fb9913c6e2581a781b37b602f9
|
3 |
+
size 15878
|
env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3dfab451d9d5c20243e0ed85cd8b6c9657669fb9a0f83b5be165585783d55b5
|
3 |
+
size 2164
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (278 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_axis_nan_policy.py
ADDED
@@ -0,0 +1,642 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
|
2 |
+
# When the two are combined, it can be tricky to get all the behavior just
|
3 |
+
# right. This file contains utility functions useful for scipy.stats functions
|
4 |
+
# that support `axis` and `nan_policy`, including a decorator that
|
5 |
+
# automatically adds `axis` and `nan_policy` arguments to a function.
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from functools import wraps
|
9 |
+
from scipy._lib._docscrape import FunctionDoc, Parameter
|
10 |
+
from scipy._lib._util import _contains_nan, AxisError, _get_nan
|
11 |
+
import inspect
|
12 |
+
|
13 |
+
|
14 |
+
def _broadcast_arrays(arrays, axis=None):
|
15 |
+
"""
|
16 |
+
Broadcast shapes of arrays, ignoring incompatibility of specified axes
|
17 |
+
"""
|
18 |
+
new_shapes = _broadcast_array_shapes(arrays, axis=axis)
|
19 |
+
if axis is None:
|
20 |
+
new_shapes = [new_shapes]*len(arrays)
|
21 |
+
return [np.broadcast_to(array, new_shape)
|
22 |
+
for array, new_shape in zip(arrays, new_shapes)]
|
23 |
+
|
24 |
+
|
25 |
+
def _broadcast_array_shapes(arrays, axis=None):
|
26 |
+
"""
|
27 |
+
Broadcast shapes of arrays, ignoring incompatibility of specified axes
|
28 |
+
"""
|
29 |
+
shapes = [np.asarray(arr).shape for arr in arrays]
|
30 |
+
return _broadcast_shapes(shapes, axis)
|
31 |
+
|
32 |
+
|
33 |
+
def _broadcast_shapes(shapes, axis=None):
|
34 |
+
"""
|
35 |
+
Broadcast shapes, ignoring incompatibility of specified axes
|
36 |
+
"""
|
37 |
+
if not shapes:
|
38 |
+
return shapes
|
39 |
+
|
40 |
+
# input validation
|
41 |
+
if axis is not None:
|
42 |
+
axis = np.atleast_1d(axis)
|
43 |
+
axis_int = axis.astype(int)
|
44 |
+
if not np.array_equal(axis_int, axis):
|
45 |
+
raise AxisError('`axis` must be an integer, a '
|
46 |
+
'tuple of integers, or `None`.')
|
47 |
+
axis = axis_int
|
48 |
+
|
49 |
+
# First, ensure all shapes have same number of dimensions by prepending 1s.
|
50 |
+
n_dims = max([len(shape) for shape in shapes])
|
51 |
+
new_shapes = np.ones((len(shapes), n_dims), dtype=int)
|
52 |
+
for row, shape in zip(new_shapes, shapes):
|
53 |
+
row[len(row)-len(shape):] = shape # can't use negative indices (-0:)
|
54 |
+
|
55 |
+
# Remove the shape elements of the axes to be ignored, but remember them.
|
56 |
+
if axis is not None:
|
57 |
+
axis[axis < 0] = n_dims + axis[axis < 0]
|
58 |
+
axis = np.sort(axis)
|
59 |
+
if axis[-1] >= n_dims or axis[0] < 0:
|
60 |
+
message = (f"`axis` is out of bounds "
|
61 |
+
f"for array of dimension {n_dims}")
|
62 |
+
raise AxisError(message)
|
63 |
+
|
64 |
+
if len(np.unique(axis)) != len(axis):
|
65 |
+
raise AxisError("`axis` must contain only distinct elements")
|
66 |
+
|
67 |
+
removed_shapes = new_shapes[:, axis]
|
68 |
+
new_shapes = np.delete(new_shapes, axis, axis=1)
|
69 |
+
|
70 |
+
# If arrays are broadcastable, shape elements that are 1 may be replaced
|
71 |
+
# with a corresponding non-1 shape element. Assuming arrays are
|
72 |
+
# broadcastable, that final shape element can be found with:
|
73 |
+
new_shape = np.max(new_shapes, axis=0)
|
74 |
+
# except in case of an empty array:
|
75 |
+
new_shape *= new_shapes.all(axis=0)
|
76 |
+
|
77 |
+
# Among all arrays, there can only be one unique non-1 shape element.
|
78 |
+
# Therefore, if any non-1 shape element does not match what we found
|
79 |
+
# above, the arrays must not be broadcastable after all.
|
80 |
+
if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
|
81 |
+
raise ValueError("Array shapes are incompatible for broadcasting.")
|
82 |
+
|
83 |
+
if axis is not None:
|
84 |
+
# Add back the shape elements that were ignored
|
85 |
+
new_axis = axis - np.arange(len(axis))
|
86 |
+
new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
|
87 |
+
for removed_shape in removed_shapes]
|
88 |
+
return new_shapes
|
89 |
+
else:
|
90 |
+
return tuple(new_shape)
|
91 |
+
|
92 |
+
|
93 |
+
def _broadcast_array_shapes_remove_axis(arrays, axis=None):
|
94 |
+
"""
|
95 |
+
Broadcast shapes of arrays, dropping specified axes
|
96 |
+
|
97 |
+
Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
|
98 |
+
the shape of the broadcast result after consuming/dropping `axis`.
|
99 |
+
In other words, return output shape of a typical hypothesis test on
|
100 |
+
`arrays` vectorized along `axis`.
|
101 |
+
|
102 |
+
Examples
|
103 |
+
--------
|
104 |
+
>>> import numpy as np
|
105 |
+
>>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes
|
106 |
+
>>> a = np.zeros((5, 2, 1))
|
107 |
+
>>> b = np.zeros((9, 3))
|
108 |
+
>>> _broadcast_array_shapes((a, b), 1)
|
109 |
+
(5, 3)
|
110 |
+
"""
|
111 |
+
# Note that here, `axis=None` means do not consume/drop any axes - _not_
|
112 |
+
# ravel arrays before broadcasting.
|
113 |
+
shapes = [arr.shape for arr in arrays]
|
114 |
+
return _broadcast_shapes_remove_axis(shapes, axis)
|
115 |
+
|
116 |
+
|
117 |
+
def _broadcast_shapes_remove_axis(shapes, axis=None):
|
118 |
+
"""
|
119 |
+
Broadcast shapes, dropping specified axes
|
120 |
+
|
121 |
+
Same as _broadcast_array_shapes, but given a sequence
|
122 |
+
of array shapes `shapes` instead of the arrays themselves.
|
123 |
+
"""
|
124 |
+
shapes = _broadcast_shapes(shapes, axis)
|
125 |
+
shape = shapes[0]
|
126 |
+
if axis is not None:
|
127 |
+
shape = np.delete(shape, axis)
|
128 |
+
return tuple(shape)
|
129 |
+
|
130 |
+
|
131 |
+
def _broadcast_concatenate(arrays, axis, paired=False):
|
132 |
+
"""Concatenate arrays along an axis with broadcasting."""
|
133 |
+
arrays = _broadcast_arrays(arrays, axis if not paired else None)
|
134 |
+
res = np.concatenate(arrays, axis=axis)
|
135 |
+
return res
|
136 |
+
|
137 |
+
|
138 |
+
# TODO: add support for `axis` tuples
|
139 |
+
def _remove_nans(samples, paired):
|
140 |
+
"Remove nans from paired or unpaired 1D samples"
|
141 |
+
# potential optimization: don't copy arrays that don't contain nans
|
142 |
+
if not paired:
|
143 |
+
return [sample[~np.isnan(sample)] for sample in samples]
|
144 |
+
|
145 |
+
# for paired samples, we need to remove the whole pair when any part
|
146 |
+
# has a nan
|
147 |
+
nans = np.isnan(samples[0])
|
148 |
+
for sample in samples[1:]:
|
149 |
+
nans = nans | np.isnan(sample)
|
150 |
+
not_nans = ~nans
|
151 |
+
return [sample[not_nans] for sample in samples]
|
152 |
+
|
153 |
+
|
154 |
+
def _remove_sentinel(samples, paired, sentinel):
|
155 |
+
"Remove sentinel values from paired or unpaired 1D samples"
|
156 |
+
# could consolidate with `_remove_nans`, but it's not quite as simple as
|
157 |
+
# passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
|
158 |
+
|
159 |
+
# potential optimization: don't copy arrays that don't contain sentinel
|
160 |
+
if not paired:
|
161 |
+
return [sample[sample != sentinel] for sample in samples]
|
162 |
+
|
163 |
+
# for paired samples, we need to remove the whole pair when any part
|
164 |
+
# has a nan
|
165 |
+
sentinels = (samples[0] == sentinel)
|
166 |
+
for sample in samples[1:]:
|
167 |
+
sentinels = sentinels | (sample == sentinel)
|
168 |
+
not_sentinels = ~sentinels
|
169 |
+
return [sample[not_sentinels] for sample in samples]
|
170 |
+
|
171 |
+
|
172 |
+
def _masked_arrays_2_sentinel_arrays(samples):
|
173 |
+
# masked arrays in `samples` are converted to regular arrays, and values
|
174 |
+
# corresponding with masked elements are replaced with a sentinel value
|
175 |
+
|
176 |
+
# return without modifying arrays if none have a mask
|
177 |
+
has_mask = False
|
178 |
+
for sample in samples:
|
179 |
+
mask = getattr(sample, 'mask', False)
|
180 |
+
has_mask = has_mask or np.any(mask)
|
181 |
+
if not has_mask:
|
182 |
+
return samples, None # None means there is no sentinel value
|
183 |
+
|
184 |
+
# Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
|
185 |
+
# values are always omitted, but there are different nan policies.
|
186 |
+
dtype = np.result_type(*samples)
|
187 |
+
dtype = dtype if np.issubdtype(dtype, np.number) else np.float64
|
188 |
+
for i in range(len(samples)):
|
189 |
+
# Things get more complicated if the arrays are of different types.
|
190 |
+
# We could have different sentinel values for each array, but
|
191 |
+
# the purpose of this code is convenience, not efficiency.
|
192 |
+
samples[i] = samples[i].astype(dtype, copy=False)
|
193 |
+
|
194 |
+
inexact = np.issubdtype(dtype, np.inexact)
|
195 |
+
info = np.finfo if inexact else np.iinfo
|
196 |
+
max_possible, min_possible = info(dtype).max, info(dtype).min
|
197 |
+
nextafter = np.nextafter if inexact else (lambda x, _: x - 1)
|
198 |
+
|
199 |
+
sentinel = max_possible
|
200 |
+
# For simplicity, min_possible/np.infs are not candidate sentinel values
|
201 |
+
while sentinel > min_possible:
|
202 |
+
for sample in samples:
|
203 |
+
if np.any(sample == sentinel): # choose a new sentinel value
|
204 |
+
sentinel = nextafter(sentinel, -np.inf)
|
205 |
+
break
|
206 |
+
else: # when sentinel value is OK, break the while loop
|
207 |
+
break
|
208 |
+
else:
|
209 |
+
message = ("This function replaces masked elements with sentinel "
|
210 |
+
"values, but the data contains all distinct values of this "
|
211 |
+
"data type. Consider promoting the dtype to `np.float64`.")
|
212 |
+
raise ValueError(message)
|
213 |
+
|
214 |
+
# replace masked elements with sentinel value
|
215 |
+
out_samples = []
|
216 |
+
for sample in samples:
|
217 |
+
mask = getattr(sample, 'mask', None)
|
218 |
+
if mask is not None: # turn all masked arrays into sentinel arrays
|
219 |
+
mask = np.broadcast_to(mask, sample.shape)
|
220 |
+
sample = sample.data.copy() if np.any(mask) else sample.data
|
221 |
+
sample = np.asarray(sample) # `sample.data` could be a memoryview?
|
222 |
+
sample[mask] = sentinel
|
223 |
+
out_samples.append(sample)
|
224 |
+
|
225 |
+
return out_samples, sentinel
|
226 |
+
|
227 |
+
|
228 |
+
def _check_empty_inputs(samples, axis):
|
229 |
+
"""
|
230 |
+
Check for empty sample; return appropriate output for a vectorized hypotest
|
231 |
+
"""
|
232 |
+
# if none of the samples are empty, we need to perform the test
|
233 |
+
if not any(sample.size == 0 for sample in samples):
|
234 |
+
return None
|
235 |
+
# otherwise, the statistic and p-value will be either empty arrays or
|
236 |
+
# arrays with NaNs. Produce the appropriate array and return it.
|
237 |
+
output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
|
238 |
+
output = np.ones(output_shape) * _get_nan(*samples)
|
239 |
+
return output
|
240 |
+
|
241 |
+
|
242 |
+
def _add_reduced_axes(res, reduced_axes, keepdims):
|
243 |
+
"""
|
244 |
+
Add reduced axes back to all the arrays in the result object
|
245 |
+
if keepdims = True.
|
246 |
+
"""
|
247 |
+
return ([np.expand_dims(output, reduced_axes) for output in res]
|
248 |
+
if keepdims else res)
|
249 |
+
|
250 |
+
|
251 |
+
# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims`
|
252 |
+
_name = 'axis'
|
253 |
+
_desc = (
|
254 |
+
"""If an int, the axis of the input along which to compute the statistic.
|
255 |
+
The statistic of each axis-slice (e.g. row) of the input will appear in a
|
256 |
+
corresponding element of the output.
|
257 |
+
If ``None``, the input will be raveled before computing the statistic."""
|
258 |
+
.split('\n'))
|
259 |
+
|
260 |
+
|
261 |
+
def _get_axis_params(default_axis=0, _name=_name, _desc=_desc): # bind NOW
|
262 |
+
_type = f"int or None, default: {default_axis}"
|
263 |
+
_axis_parameter_doc = Parameter(_name, _type, _desc)
|
264 |
+
_axis_parameter = inspect.Parameter(_name,
|
265 |
+
inspect.Parameter.KEYWORD_ONLY,
|
266 |
+
default=default_axis)
|
267 |
+
return _axis_parameter_doc, _axis_parameter
|
268 |
+
|
269 |
+
|
270 |
+
_name = 'nan_policy'
|
271 |
+
_type = "{'propagate', 'omit', 'raise'}"
|
272 |
+
_desc = (
|
273 |
+
"""Defines how to handle input NaNs.
|
274 |
+
|
275 |
+
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
|
276 |
+
which the statistic is computed, the corresponding entry of the output
|
277 |
+
will be NaN.
|
278 |
+
- ``omit``: NaNs will be omitted when performing the calculation.
|
279 |
+
If insufficient data remains in the axis slice along which the
|
280 |
+
statistic is computed, the corresponding entry of the output will be
|
281 |
+
NaN.
|
282 |
+
- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
|
283 |
+
.split('\n'))
|
284 |
+
_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
|
285 |
+
_nan_policy_parameter = inspect.Parameter(_name,
|
286 |
+
inspect.Parameter.KEYWORD_ONLY,
|
287 |
+
default='propagate')
|
288 |
+
|
289 |
+
_name = 'keepdims'
|
290 |
+
_type = "bool, default: False"
|
291 |
+
_desc = (
|
292 |
+
"""If this is set to True, the axes which are reduced are left
|
293 |
+
in the result as dimensions with size one. With this option,
|
294 |
+
the result will broadcast correctly against the input array."""
|
295 |
+
.split('\n'))
|
296 |
+
_keepdims_parameter_doc = Parameter(_name, _type, _desc)
|
297 |
+
_keepdims_parameter = inspect.Parameter(_name,
|
298 |
+
inspect.Parameter.KEYWORD_ONLY,
|
299 |
+
default=False)
|
300 |
+
|
301 |
+
_standard_note_addition = (
|
302 |
+
"""\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new
|
303 |
+
code) are converted to ``np.ndarray`` before the calculation is performed. In
|
304 |
+
this case, the output will be a scalar or ``np.ndarray`` of appropriate shape
|
305 |
+
rather than a 2D ``np.matrix``. Similarly, while masked elements of masked
|
306 |
+
arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a
|
307 |
+
masked array with ``mask=False``.""").split('\n')
|
308 |
+
|
309 |
+
|
310 |
+
def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
|
311 |
+
n_samples=1, paired=False,
|
312 |
+
result_to_tuple=None, too_small=0,
|
313 |
+
n_outputs=2, kwd_samples=[], override=None):
|
314 |
+
"""Factory for a wrapper that adds axis/nan_policy params to a function.
|
315 |
+
|
316 |
+
Parameters
|
317 |
+
----------
|
318 |
+
tuple_to_result : callable
|
319 |
+
Callable that returns an object of the type returned by the function
|
320 |
+
being wrapped (e.g. the namedtuple or dataclass returned by a
|
321 |
+
statistical test) provided the separate components (e.g. statistic,
|
322 |
+
pvalue).
|
323 |
+
default_axis : int, default: 0
|
324 |
+
The default value of the axis argument. Standard is 0 except when
|
325 |
+
backwards compatibility demands otherwise (e.g. `None`).
|
326 |
+
n_samples : int or callable, default: 1
|
327 |
+
The number of data samples accepted by the function
|
328 |
+
(e.g. `mannwhitneyu`), a callable that accepts a dictionary of
|
329 |
+
parameters passed into the function and returns the number of data
|
330 |
+
samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
|
331 |
+
of samples (e.g. `kruskal`).
|
332 |
+
paired : {False, True}
|
333 |
+
Whether the function being wrapped treats the samples as paired (i.e.
|
334 |
+
corresponding elements of each sample should be considered as different
|
335 |
+
components of the same sample.)
|
336 |
+
result_to_tuple : callable, optional
|
337 |
+
Function that unpacks the results of the function being wrapped into
|
338 |
+
a tuple. This is essentially the inverse of `tuple_to_result`. Default
|
339 |
+
is `None`, which is appropriate for statistical tests that return a
|
340 |
+
statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
|
341 |
+
too_small : int or callable, default: 0
|
342 |
+
The largest unnacceptably small sample for the function being wrapped.
|
343 |
+
For example, some functions require samples of size two or more or they
|
344 |
+
raise an error. This argument prevents the error from being raised when
|
345 |
+
input is not 1D and instead places a NaN in the corresponding element
|
346 |
+
of the result. If callable, it must accept a list of samples, axis,
|
347 |
+
and a dictionary of keyword arguments passed to the wrapper function as
|
348 |
+
arguments and return a bool indicating weather the samples passed are
|
349 |
+
too small.
|
350 |
+
n_outputs : int or callable, default: 2
|
351 |
+
The number of outputs produced by the function given 1d sample(s). For
|
352 |
+
example, hypothesis tests that return a namedtuple or result object
|
353 |
+
with attributes ``statistic`` and ``pvalue`` use the default
|
354 |
+
``n_outputs=2``; summary statistics with scalar output use
|
355 |
+
``n_outputs=1``. Alternatively, may be a callable that accepts a
|
356 |
+
dictionary of arguments passed into the wrapped function and returns
|
357 |
+
the number of outputs corresponding with those arguments.
|
358 |
+
kwd_samples : sequence, default: []
|
359 |
+
The names of keyword parameters that should be treated as samples. For
|
360 |
+
example, `gmean` accepts as its first argument a sample `a` but
|
361 |
+
also `weights` as a fourth, optional keyword argument. In this case, we
|
362 |
+
use `n_samples=1` and kwd_samples=['weights'].
|
363 |
+
override : dict, default: {'vectorization': False, 'nan_propagation': True}
|
364 |
+
Pass a dictionary with ``'vectorization': True`` to ensure that the
|
365 |
+
decorator overrides the function's behavior for multimensional input.
|
366 |
+
Use ``'nan_propagation': False`` to ensure that the decorator does not
|
367 |
+
override the function's behavior for ``nan_policy='propagate'``.
|
368 |
+
(See `scipy.stats.mode`, for example.)
|
369 |
+
"""
|
370 |
+
# Specify which existing behaviors the decorator must override
|
371 |
+
temp = override or {}
|
372 |
+
override = {'vectorization': False,
|
373 |
+
'nan_propagation': True}
|
374 |
+
override.update(temp)
|
375 |
+
|
376 |
+
if result_to_tuple is None:
|
377 |
+
def result_to_tuple(res):
|
378 |
+
return res
|
379 |
+
|
380 |
+
if not callable(too_small):
|
381 |
+
def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
|
382 |
+
for sample in samples:
|
383 |
+
if sample.shape[axis] <= too_small:
|
384 |
+
return True
|
385 |
+
return False
|
386 |
+
else:
|
387 |
+
is_too_small = too_small
|
388 |
+
|
389 |
+
def axis_nan_policy_decorator(hypotest_fun_in):
|
390 |
+
@wraps(hypotest_fun_in)
|
391 |
+
def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
|
392 |
+
|
393 |
+
if _no_deco: # for testing, decorator does nothing
|
394 |
+
return hypotest_fun_in(*args, **kwds)
|
395 |
+
|
396 |
+
# We need to be flexible about whether position or keyword
|
397 |
+
# arguments are used, but we need to make sure users don't pass
|
398 |
+
# both for the same parameter. To complicate matters, some
|
399 |
+
# functions accept samples with *args, and some functions already
|
400 |
+
# accept `axis` and `nan_policy` as positional arguments.
|
401 |
+
# The strategy is to make sure that there is no duplication
|
402 |
+
# between `args` and `kwds`, combine the two into `kwds`, then
|
403 |
+
# the samples, `nan_policy`, and `axis` from `kwds`, as they are
|
404 |
+
# dealt with separately.
|
405 |
+
|
406 |
+
# Check for intersection between positional and keyword args
|
407 |
+
params = list(inspect.signature(hypotest_fun_in).parameters)
|
408 |
+
if n_samples is None:
|
409 |
+
# Give unique names to each positional sample argument
|
410 |
+
# Note that *args can't be provided as a keyword argument
|
411 |
+
params = [f"arg{i}" for i in range(len(args))] + params[1:]
|
412 |
+
|
413 |
+
# raise if there are too many positional args
|
414 |
+
maxarg = (np.inf if inspect.getfullargspec(hypotest_fun_in).varargs
|
415 |
+
else len(inspect.getfullargspec(hypotest_fun_in).args))
|
416 |
+
if len(args) > maxarg: # let the function raise the right error
|
417 |
+
hypotest_fun_in(*args, **kwds)
|
418 |
+
|
419 |
+
# raise if multiple values passed for same parameter
|
420 |
+
d_args = dict(zip(params, args))
|
421 |
+
intersection = set(d_args) & set(kwds)
|
422 |
+
if intersection: # let the function raise the right error
|
423 |
+
hypotest_fun_in(*args, **kwds)
|
424 |
+
|
425 |
+
# Consolidate other positional and keyword args into `kwds`
|
426 |
+
kwds.update(d_args)
|
427 |
+
|
428 |
+
# rename avoids UnboundLocalError
|
429 |
+
if callable(n_samples):
|
430 |
+
# Future refactoring idea: no need for callable n_samples.
|
431 |
+
# Just replace `n_samples` and `kwd_samples` with a single
|
432 |
+
# list of the names of all samples, and treat all of them
|
433 |
+
# as `kwd_samples` are treated below.
|
434 |
+
n_samp = n_samples(kwds)
|
435 |
+
else:
|
436 |
+
n_samp = n_samples or len(args)
|
437 |
+
|
438 |
+
# get the number of outputs
|
439 |
+
n_out = n_outputs # rename to avoid UnboundLocalError
|
440 |
+
if callable(n_out):
|
441 |
+
n_out = n_out(kwds)
|
442 |
+
|
443 |
+
# If necessary, rearrange function signature: accept other samples
|
444 |
+
# as positional args right after the first n_samp args
|
445 |
+
kwd_samp = [name for name in kwd_samples
|
446 |
+
if kwds.get(name, None) is not None]
|
447 |
+
n_kwd_samp = len(kwd_samp)
|
448 |
+
if not kwd_samp:
|
449 |
+
hypotest_fun_out = hypotest_fun_in
|
450 |
+
else:
|
451 |
+
def hypotest_fun_out(*samples, **kwds):
|
452 |
+
new_kwds = dict(zip(kwd_samp, samples[n_samp:]))
|
453 |
+
kwds.update(new_kwds)
|
454 |
+
return hypotest_fun_in(*samples[:n_samp], **kwds)
|
455 |
+
|
456 |
+
# Extract the things we need here
|
457 |
+
try: # if something is missing
|
458 |
+
samples = [np.atleast_1d(kwds.pop(param))
|
459 |
+
for param in (params[:n_samp] + kwd_samp)]
|
460 |
+
except KeyError: # let the function raise the right error
|
461 |
+
# might need to revisit this if required arg is not a "sample"
|
462 |
+
hypotest_fun_in(*args, **kwds)
|
463 |
+
vectorized = True if 'axis' in params else False
|
464 |
+
vectorized = vectorized and not override['vectorization']
|
465 |
+
axis = kwds.pop('axis', default_axis)
|
466 |
+
nan_policy = kwds.pop('nan_policy', 'propagate')
|
467 |
+
keepdims = kwds.pop("keepdims", False)
|
468 |
+
del args # avoid the possibility of passing both `args` and `kwds`
|
469 |
+
|
470 |
+
# convert masked arrays to regular arrays with sentinel values
|
471 |
+
samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
|
472 |
+
|
473 |
+
# standardize to always work along last axis
|
474 |
+
reduced_axes = axis
|
475 |
+
if axis is None:
|
476 |
+
if samples:
|
477 |
+
# when axis=None, take the maximum of all dimensions since
|
478 |
+
# all the dimensions are reduced.
|
479 |
+
n_dims = np.max([sample.ndim for sample in samples])
|
480 |
+
reduced_axes = tuple(range(n_dims))
|
481 |
+
samples = [np.asarray(sample.ravel()) for sample in samples]
|
482 |
+
else:
|
483 |
+
samples = _broadcast_arrays(samples, axis=axis)
|
484 |
+
axis = np.atleast_1d(axis)
|
485 |
+
n_axes = len(axis)
|
486 |
+
# move all axes in `axis` to the end to be raveled
|
487 |
+
samples = [np.moveaxis(sample, axis, range(-len(axis), 0))
|
488 |
+
for sample in samples]
|
489 |
+
shapes = [sample.shape for sample in samples]
|
490 |
+
# New shape is unchanged for all axes _not_ in `axis`
|
491 |
+
# At the end, we append the product of the shapes of the axes
|
492 |
+
# in `axis`. Appending -1 doesn't work for zero-size arrays!
|
493 |
+
new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)
|
494 |
+
for shape in shapes]
|
495 |
+
samples = [sample.reshape(new_shape)
|
496 |
+
for sample, new_shape in zip(samples, new_shapes)]
|
497 |
+
axis = -1 # work over the last axis
|
498 |
+
NaN = _get_nan(*samples)
|
499 |
+
|
500 |
+
# if axis is not needed, just handle nan_policy and return
|
501 |
+
ndims = np.array([sample.ndim for sample in samples])
|
502 |
+
if np.all(ndims <= 1):
|
503 |
+
# Addresses nan_policy == "raise"
|
504 |
+
if nan_policy != 'propagate' or override['nan_propagation']:
|
505 |
+
contains_nan = [_contains_nan(sample, nan_policy)[0]
|
506 |
+
for sample in samples]
|
507 |
+
else:
|
508 |
+
# Behave as though there are no NaNs (even if there are)
|
509 |
+
contains_nan = [False]*len(samples)
|
510 |
+
|
511 |
+
# Addresses nan_policy == "propagate"
|
512 |
+
if any(contains_nan) and (nan_policy == 'propagate'
|
513 |
+
and override['nan_propagation']):
|
514 |
+
res = np.full(n_out, NaN)
|
515 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
516 |
+
return tuple_to_result(*res)
|
517 |
+
|
518 |
+
# Addresses nan_policy == "omit"
|
519 |
+
if any(contains_nan) and nan_policy == 'omit':
|
520 |
+
# consider passing in contains_nan
|
521 |
+
samples = _remove_nans(samples, paired)
|
522 |
+
|
523 |
+
# ideally, this is what the behavior would be:
|
524 |
+
# if is_too_small(samples):
|
525 |
+
# return tuple_to_result(NaN, NaN)
|
526 |
+
# but some existing functions raise exceptions, and changing
|
527 |
+
# behavior of those would break backward compatibility.
|
528 |
+
|
529 |
+
if sentinel:
|
530 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
531 |
+
res = hypotest_fun_out(*samples, **kwds)
|
532 |
+
res = result_to_tuple(res)
|
533 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
534 |
+
return tuple_to_result(*res)
|
535 |
+
|
536 |
+
# check for empty input
|
537 |
+
# ideally, move this to the top, but some existing functions raise
|
538 |
+
# exceptions for empty input, so overriding it would break
|
539 |
+
# backward compatibility.
|
540 |
+
empty_output = _check_empty_inputs(samples, axis)
|
541 |
+
# only return empty output if zero sized input is too small.
|
542 |
+
if (
|
543 |
+
empty_output is not None
|
544 |
+
and (is_too_small(samples, kwds) or empty_output.size == 0)
|
545 |
+
):
|
546 |
+
res = [empty_output.copy() for i in range(n_out)]
|
547 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
548 |
+
return tuple_to_result(*res)
|
549 |
+
|
550 |
+
# otherwise, concatenate all samples along axis, remembering where
|
551 |
+
# each separate sample begins
|
552 |
+
lengths = np.array([sample.shape[axis] for sample in samples])
|
553 |
+
split_indices = np.cumsum(lengths)
|
554 |
+
x = _broadcast_concatenate(samples, axis)
|
555 |
+
|
556 |
+
# Addresses nan_policy == "raise"
|
557 |
+
if nan_policy != 'propagate' or override['nan_propagation']:
|
558 |
+
contains_nan, _ = _contains_nan(x, nan_policy)
|
559 |
+
else:
|
560 |
+
contains_nan = False # behave like there are no NaNs
|
561 |
+
|
562 |
+
if vectorized and not contains_nan and not sentinel:
|
563 |
+
res = hypotest_fun_out(*samples, axis=axis, **kwds)
|
564 |
+
res = result_to_tuple(res)
|
565 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
566 |
+
return tuple_to_result(*res)
|
567 |
+
|
568 |
+
# Addresses nan_policy == "omit"
|
569 |
+
if contains_nan and nan_policy == 'omit':
|
570 |
+
def hypotest_fun(x):
|
571 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
572 |
+
samples = _remove_nans(samples, paired)
|
573 |
+
if sentinel:
|
574 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
575 |
+
if is_too_small(samples, kwds):
|
576 |
+
return np.full(n_out, NaN)
|
577 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
578 |
+
|
579 |
+
# Addresses nan_policy == "propagate"
|
580 |
+
elif (contains_nan and nan_policy == 'propagate'
|
581 |
+
and override['nan_propagation']):
|
582 |
+
def hypotest_fun(x):
|
583 |
+
if np.isnan(x).any():
|
584 |
+
return np.full(n_out, NaN)
|
585 |
+
|
586 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
587 |
+
if sentinel:
|
588 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
589 |
+
if is_too_small(samples, kwds):
|
590 |
+
return np.full(n_out, NaN)
|
591 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
592 |
+
|
593 |
+
else:
|
594 |
+
def hypotest_fun(x):
|
595 |
+
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
|
596 |
+
if sentinel:
|
597 |
+
samples = _remove_sentinel(samples, paired, sentinel)
|
598 |
+
if is_too_small(samples, kwds):
|
599 |
+
return np.full(n_out, NaN)
|
600 |
+
return result_to_tuple(hypotest_fun_out(*samples, **kwds))
|
601 |
+
|
602 |
+
x = np.moveaxis(x, axis, 0)
|
603 |
+
res = np.apply_along_axis(hypotest_fun, axis=0, arr=x)
|
604 |
+
res = _add_reduced_axes(res, reduced_axes, keepdims)
|
605 |
+
return tuple_to_result(*res)
|
606 |
+
|
607 |
+
_axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis)
|
608 |
+
doc = FunctionDoc(axis_nan_policy_wrapper)
|
609 |
+
parameter_names = [param.name for param in doc['Parameters']]
|
610 |
+
if 'axis' in parameter_names:
|
611 |
+
doc['Parameters'][parameter_names.index('axis')] = (
|
612 |
+
_axis_parameter_doc)
|
613 |
+
else:
|
614 |
+
doc['Parameters'].append(_axis_parameter_doc)
|
615 |
+
if 'nan_policy' in parameter_names:
|
616 |
+
doc['Parameters'][parameter_names.index('nan_policy')] = (
|
617 |
+
_nan_policy_parameter_doc)
|
618 |
+
else:
|
619 |
+
doc['Parameters'].append(_nan_policy_parameter_doc)
|
620 |
+
if 'keepdims' in parameter_names:
|
621 |
+
doc['Parameters'][parameter_names.index('keepdims')] = (
|
622 |
+
_keepdims_parameter_doc)
|
623 |
+
else:
|
624 |
+
doc['Parameters'].append(_keepdims_parameter_doc)
|
625 |
+
doc['Notes'] += _standard_note_addition
|
626 |
+
doc = str(doc).split("\n", 1)[1] # remove signature
|
627 |
+
axis_nan_policy_wrapper.__doc__ = str(doc)
|
628 |
+
|
629 |
+
sig = inspect.signature(axis_nan_policy_wrapper)
|
630 |
+
parameters = sig.parameters
|
631 |
+
parameter_list = list(parameters.values())
|
632 |
+
if 'axis' not in parameters:
|
633 |
+
parameter_list.append(_axis_parameter)
|
634 |
+
if 'nan_policy' not in parameters:
|
635 |
+
parameter_list.append(_nan_policy_parameter)
|
636 |
+
if 'keepdims' not in parameters:
|
637 |
+
parameter_list.append(_keepdims_parameter)
|
638 |
+
sig = sig.replace(parameters=parameter_list)
|
639 |
+
axis_nan_policy_wrapper.__signature__ = sig
|
640 |
+
|
641 |
+
return axis_nan_policy_wrapper
|
642 |
+
return axis_nan_policy_decorator
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (360 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Declare the class with cdef
|
2 |
+
cdef extern from "biasedurn/stocc.h" nogil:
|
3 |
+
cdef cppclass CFishersNCHypergeometric:
|
4 |
+
CFishersNCHypergeometric(int, int, int, double, double) except +
|
5 |
+
int mode()
|
6 |
+
double mean()
|
7 |
+
double variance()
|
8 |
+
double probability(int x)
|
9 |
+
double moments(double * mean, double * var)
|
10 |
+
|
11 |
+
cdef cppclass CWalleniusNCHypergeometric:
|
12 |
+
CWalleniusNCHypergeometric() except +
|
13 |
+
CWalleniusNCHypergeometric(int, int, int, double, double) except +
|
14 |
+
int mode()
|
15 |
+
double mean()
|
16 |
+
double variance()
|
17 |
+
double probability(int x)
|
18 |
+
double moments(double * mean, double * var)
|
19 |
+
|
20 |
+
cdef cppclass StochasticLib3:
|
21 |
+
StochasticLib3(int seed) except +
|
22 |
+
double Random() except +
|
23 |
+
void SetAccuracy(double accur)
|
24 |
+
int FishersNCHyp (int n, int m, int N, double odds) except +
|
25 |
+
int WalleniusNCHyp (int n, int m, int N, double odds) except +
|
26 |
+
double(*next_double)()
|
27 |
+
double(*next_normal)(const double m, const double s)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py
ADDED
@@ -0,0 +1,795 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import builtins
|
2 |
+
from warnings import catch_warnings, simplefilter
|
3 |
+
import numpy as np
|
4 |
+
from operator import index
|
5 |
+
from collections import namedtuple
|
6 |
+
|
7 |
+
__all__ = ['binned_statistic',
|
8 |
+
'binned_statistic_2d',
|
9 |
+
'binned_statistic_dd']
|
10 |
+
|
11 |
+
|
12 |
+
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
|
13 |
+
('statistic', 'bin_edges', 'binnumber'))
|
14 |
+
|
15 |
+
|
16 |
+
def binned_statistic(x, values, statistic='mean',
|
17 |
+
bins=10, range=None):
|
18 |
+
"""
|
19 |
+
Compute a binned statistic for one or more sets of data.
|
20 |
+
|
21 |
+
This is a generalization of a histogram function. A histogram divides
|
22 |
+
the space into bins, and returns the count of the number of points in
|
23 |
+
each bin. This function allows the computation of the sum, mean, median,
|
24 |
+
or other statistic of the values (or set of values) within each bin.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
x : (N,) array_like
|
29 |
+
A sequence of values to be binned.
|
30 |
+
values : (N,) array_like or list of (N,) array_like
|
31 |
+
The data on which the statistic will be computed. This must be
|
32 |
+
the same shape as `x`, or a set of sequences - each the same shape as
|
33 |
+
`x`. If `values` is a set of sequences, the statistic will be computed
|
34 |
+
on each independently.
|
35 |
+
statistic : string or callable, optional
|
36 |
+
The statistic to compute (default is 'mean').
|
37 |
+
The following statistics are available:
|
38 |
+
|
39 |
+
* 'mean' : compute the mean of values for points within each bin.
|
40 |
+
Empty bins will be represented by NaN.
|
41 |
+
* 'std' : compute the standard deviation within each bin. This
|
42 |
+
is implicitly calculated with ddof=0.
|
43 |
+
* 'median' : compute the median of values for points within each
|
44 |
+
bin. Empty bins will be represented by NaN.
|
45 |
+
* 'count' : compute the count of points within each bin. This is
|
46 |
+
identical to an unweighted histogram. `values` array is not
|
47 |
+
referenced.
|
48 |
+
* 'sum' : compute the sum of values for points within each bin.
|
49 |
+
This is identical to a weighted histogram.
|
50 |
+
* 'min' : compute the minimum of values for points within each bin.
|
51 |
+
Empty bins will be represented by NaN.
|
52 |
+
* 'max' : compute the maximum of values for point within each bin.
|
53 |
+
Empty bins will be represented by NaN.
|
54 |
+
* function : a user-defined function which takes a 1D array of
|
55 |
+
values, and outputs a single numerical statistic. This function
|
56 |
+
will be called on the values in each bin. Empty bins will be
|
57 |
+
represented by function([]), or NaN if this returns an error.
|
58 |
+
|
59 |
+
bins : int or sequence of scalars, optional
|
60 |
+
If `bins` is an int, it defines the number of equal-width bins in the
|
61 |
+
given range (10 by default). If `bins` is a sequence, it defines the
|
62 |
+
bin edges, including the rightmost edge, allowing for non-uniform bin
|
63 |
+
widths. Values in `x` that are smaller than lowest bin edge are
|
64 |
+
assigned to bin number 0, values beyond the highest bin are assigned to
|
65 |
+
``bins[-1]``. If the bin edges are specified, the number of bins will
|
66 |
+
be, (nx = len(bins)-1).
|
67 |
+
range : (float, float) or [(float, float)], optional
|
68 |
+
The lower and upper range of the bins. If not provided, range
|
69 |
+
is simply ``(x.min(), x.max())``. Values outside the range are
|
70 |
+
ignored.
|
71 |
+
|
72 |
+
Returns
|
73 |
+
-------
|
74 |
+
statistic : array
|
75 |
+
The values of the selected statistic in each bin.
|
76 |
+
bin_edges : array of dtype float
|
77 |
+
Return the bin edges ``(length(statistic)+1)``.
|
78 |
+
binnumber: 1-D ndarray of ints
|
79 |
+
Indices of the bins (corresponding to `bin_edges`) in which each value
|
80 |
+
of `x` belongs. Same length as `values`. A binnumber of `i` means the
|
81 |
+
corresponding value is between (bin_edges[i-1], bin_edges[i]).
|
82 |
+
|
83 |
+
See Also
|
84 |
+
--------
|
85 |
+
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
|
86 |
+
|
87 |
+
Notes
|
88 |
+
-----
|
89 |
+
All but the last (righthand-most) bin is half-open. In other words, if
|
90 |
+
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
|
91 |
+
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
|
92 |
+
``[3, 4]``, which *includes* 4.
|
93 |
+
|
94 |
+
.. versionadded:: 0.11.0
|
95 |
+
|
96 |
+
Examples
|
97 |
+
--------
|
98 |
+
>>> import numpy as np
|
99 |
+
>>> from scipy import stats
|
100 |
+
>>> import matplotlib.pyplot as plt
|
101 |
+
|
102 |
+
First some basic examples:
|
103 |
+
|
104 |
+
Create two evenly spaced bins in the range of the given sample, and sum the
|
105 |
+
corresponding values in each of those bins:
|
106 |
+
|
107 |
+
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
|
108 |
+
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
|
109 |
+
BinnedStatisticResult(statistic=array([4. , 4.5]),
|
110 |
+
bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
|
111 |
+
|
112 |
+
Multiple arrays of values can also be passed. The statistic is calculated
|
113 |
+
on each set independently:
|
114 |
+
|
115 |
+
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
|
116 |
+
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
|
117 |
+
BinnedStatisticResult(statistic=array([[4. , 4.5],
|
118 |
+
[8. , 9. ]]), bin_edges=array([1., 4., 7.]),
|
119 |
+
binnumber=array([1, 1, 1, 2, 2]))
|
120 |
+
|
121 |
+
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
|
122 |
+
... bins=3)
|
123 |
+
BinnedStatisticResult(statistic=array([1., 2., 4.]),
|
124 |
+
bin_edges=array([1., 2., 3., 4.]),
|
125 |
+
binnumber=array([1, 2, 1, 2, 3]))
|
126 |
+
|
127 |
+
As a second example, we now generate some random data of sailing boat speed
|
128 |
+
as a function of wind speed, and then determine how fast our boat is for
|
129 |
+
certain wind speeds:
|
130 |
+
|
131 |
+
>>> rng = np.random.default_rng()
|
132 |
+
>>> windspeed = 8 * rng.random(500)
|
133 |
+
>>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
|
134 |
+
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
|
135 |
+
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
|
136 |
+
>>> plt.figure()
|
137 |
+
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
|
138 |
+
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
|
139 |
+
... label='binned statistic of data')
|
140 |
+
>>> plt.legend()
|
141 |
+
|
142 |
+
Now we can use ``binnumber`` to select all datapoints with a windspeed
|
143 |
+
below 1:
|
144 |
+
|
145 |
+
>>> low_boatspeed = boatspeed[binnumber == 0]
|
146 |
+
|
147 |
+
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
|
148 |
+
plot of a distribution that shows the mean and distribution around that
|
149 |
+
mean per bin, on top of a regular histogram and the probability
|
150 |
+
distribution function:
|
151 |
+
|
152 |
+
>>> x = np.linspace(0, 5, num=500)
|
153 |
+
>>> x_pdf = stats.maxwell.pdf(x)
|
154 |
+
>>> samples = stats.maxwell.rvs(size=10000)
|
155 |
+
|
156 |
+
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
|
157 |
+
... statistic='mean', bins=25)
|
158 |
+
>>> bin_width = (bin_edges[1] - bin_edges[0])
|
159 |
+
>>> bin_centers = bin_edges[1:] - bin_width/2
|
160 |
+
|
161 |
+
>>> plt.figure()
|
162 |
+
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
|
163 |
+
... alpha=0.2, label='histogram of data')
|
164 |
+
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
|
165 |
+
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
|
166 |
+
... label='binned statistic of data')
|
167 |
+
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
|
168 |
+
>>> plt.legend(fontsize=10)
|
169 |
+
>>> plt.show()
|
170 |
+
|
171 |
+
"""
|
172 |
+
try:
|
173 |
+
N = len(bins)
|
174 |
+
except TypeError:
|
175 |
+
N = 1
|
176 |
+
|
177 |
+
if N != 1:
|
178 |
+
bins = [np.asarray(bins, float)]
|
179 |
+
|
180 |
+
if range is not None:
|
181 |
+
if len(range) == 2:
|
182 |
+
range = [range]
|
183 |
+
|
184 |
+
medians, edges, binnumbers = binned_statistic_dd(
|
185 |
+
[x], values, statistic, bins, range)
|
186 |
+
|
187 |
+
return BinnedStatisticResult(medians, edges[0], binnumbers)
|
188 |
+
|
189 |
+
|
190 |
+
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
|
191 |
+
('statistic', 'x_edge', 'y_edge',
|
192 |
+
'binnumber'))
|
193 |
+
|
194 |
+
|
195 |
+
def binned_statistic_2d(x, y, values, statistic='mean',
|
196 |
+
bins=10, range=None, expand_binnumbers=False):
|
197 |
+
"""
|
198 |
+
Compute a bidimensional binned statistic for one or more sets of data.
|
199 |
+
|
200 |
+
This is a generalization of a histogram2d function. A histogram divides
|
201 |
+
the space into bins, and returns the count of the number of points in
|
202 |
+
each bin. This function allows the computation of the sum, mean, median,
|
203 |
+
or other statistic of the values (or set of values) within each bin.
|
204 |
+
|
205 |
+
Parameters
|
206 |
+
----------
|
207 |
+
x : (N,) array_like
|
208 |
+
A sequence of values to be binned along the first dimension.
|
209 |
+
y : (N,) array_like
|
210 |
+
A sequence of values to be binned along the second dimension.
|
211 |
+
values : (N,) array_like or list of (N,) array_like
|
212 |
+
The data on which the statistic will be computed. This must be
|
213 |
+
the same shape as `x`, or a list of sequences - each with the same
|
214 |
+
shape as `x`. If `values` is such a list, the statistic will be
|
215 |
+
computed on each independently.
|
216 |
+
statistic : string or callable, optional
|
217 |
+
The statistic to compute (default is 'mean').
|
218 |
+
The following statistics are available:
|
219 |
+
|
220 |
+
* 'mean' : compute the mean of values for points within each bin.
|
221 |
+
Empty bins will be represented by NaN.
|
222 |
+
* 'std' : compute the standard deviation within each bin. This
|
223 |
+
is implicitly calculated with ddof=0.
|
224 |
+
* 'median' : compute the median of values for points within each
|
225 |
+
bin. Empty bins will be represented by NaN.
|
226 |
+
* 'count' : compute the count of points within each bin. This is
|
227 |
+
identical to an unweighted histogram. `values` array is not
|
228 |
+
referenced.
|
229 |
+
* 'sum' : compute the sum of values for points within each bin.
|
230 |
+
This is identical to a weighted histogram.
|
231 |
+
* 'min' : compute the minimum of values for points within each bin.
|
232 |
+
Empty bins will be represented by NaN.
|
233 |
+
* 'max' : compute the maximum of values for point within each bin.
|
234 |
+
Empty bins will be represented by NaN.
|
235 |
+
* function : a user-defined function which takes a 1D array of
|
236 |
+
values, and outputs a single numerical statistic. This function
|
237 |
+
will be called on the values in each bin. Empty bins will be
|
238 |
+
represented by function([]), or NaN if this returns an error.
|
239 |
+
|
240 |
+
bins : int or [int, int] or array_like or [array, array], optional
|
241 |
+
The bin specification:
|
242 |
+
|
243 |
+
* the number of bins for the two dimensions (nx = ny = bins),
|
244 |
+
* the number of bins in each dimension (nx, ny = bins),
|
245 |
+
* the bin edges for the two dimensions (x_edge = y_edge = bins),
|
246 |
+
* the bin edges in each dimension (x_edge, y_edge = bins).
|
247 |
+
|
248 |
+
If the bin edges are specified, the number of bins will be,
|
249 |
+
(nx = len(x_edge)-1, ny = len(y_edge)-1).
|
250 |
+
|
251 |
+
range : (2,2) array_like, optional
|
252 |
+
The leftmost and rightmost edges of the bins along each dimension
|
253 |
+
(if not specified explicitly in the `bins` parameters):
|
254 |
+
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
|
255 |
+
considered outliers and not tallied in the histogram.
|
256 |
+
expand_binnumbers : bool, optional
|
257 |
+
'False' (default): the returned `binnumber` is a shape (N,) array of
|
258 |
+
linearized bin indices.
|
259 |
+
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
|
260 |
+
ndarray, where each row gives the bin numbers in the corresponding
|
261 |
+
dimension.
|
262 |
+
See the `binnumber` returned value, and the `Examples` section.
|
263 |
+
|
264 |
+
.. versionadded:: 0.17.0
|
265 |
+
|
266 |
+
Returns
|
267 |
+
-------
|
268 |
+
statistic : (nx, ny) ndarray
|
269 |
+
The values of the selected statistic in each two-dimensional bin.
|
270 |
+
x_edge : (nx + 1) ndarray
|
271 |
+
The bin edges along the first dimension.
|
272 |
+
y_edge : (ny + 1) ndarray
|
273 |
+
The bin edges along the second dimension.
|
274 |
+
binnumber : (N,) array of ints or (2,N) ndarray of ints
|
275 |
+
This assigns to each element of `sample` an integer that represents the
|
276 |
+
bin in which this observation falls. The representation depends on the
|
277 |
+
`expand_binnumbers` argument. See `Notes` for details.
|
278 |
+
|
279 |
+
|
280 |
+
See Also
|
281 |
+
--------
|
282 |
+
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
|
283 |
+
|
284 |
+
Notes
|
285 |
+
-----
|
286 |
+
Binedges:
|
287 |
+
All but the last (righthand-most) bin is half-open. In other words, if
|
288 |
+
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
|
289 |
+
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
|
290 |
+
``[3, 4]``, which *includes* 4.
|
291 |
+
|
292 |
+
`binnumber`:
|
293 |
+
This returned argument assigns to each element of `sample` an integer that
|
294 |
+
represents the bin in which it belongs. The representation depends on the
|
295 |
+
`expand_binnumbers` argument. If 'False' (default): The returned
|
296 |
+
`binnumber` is a shape (N,) array of linearized indices mapping each
|
297 |
+
element of `sample` to its corresponding bin (using row-major ordering).
|
298 |
+
Note that the returned linearized bin indices are used for an array with
|
299 |
+
extra bins on the outer binedges to capture values outside of the defined
|
300 |
+
bin bounds.
|
301 |
+
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
|
302 |
+
each row indicates bin placements for each dimension respectively. In each
|
303 |
+
dimension, a binnumber of `i` means the corresponding value is between
|
304 |
+
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
|
305 |
+
|
306 |
+
.. versionadded:: 0.11.0
|
307 |
+
|
308 |
+
Examples
|
309 |
+
--------
|
310 |
+
>>> from scipy import stats
|
311 |
+
|
312 |
+
Calculate the counts with explicit bin-edges:
|
313 |
+
|
314 |
+
>>> x = [0.1, 0.1, 0.1, 0.6]
|
315 |
+
>>> y = [2.1, 2.6, 2.1, 2.1]
|
316 |
+
>>> binx = [0.0, 0.5, 1.0]
|
317 |
+
>>> biny = [2.0, 2.5, 3.0]
|
318 |
+
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
|
319 |
+
>>> ret.statistic
|
320 |
+
array([[2., 1.],
|
321 |
+
[1., 0.]])
|
322 |
+
|
323 |
+
The bin in which each sample is placed is given by the `binnumber`
|
324 |
+
returned parameter. By default, these are the linearized bin indices:
|
325 |
+
|
326 |
+
>>> ret.binnumber
|
327 |
+
array([5, 6, 5, 9])
|
328 |
+
|
329 |
+
The bin indices can also be expanded into separate entries for each
|
330 |
+
dimension using the `expand_binnumbers` parameter:
|
331 |
+
|
332 |
+
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
|
333 |
+
... expand_binnumbers=True)
|
334 |
+
>>> ret.binnumber
|
335 |
+
array([[1, 1, 1, 2],
|
336 |
+
[1, 2, 1, 1]])
|
337 |
+
|
338 |
+
Which shows that the first three elements belong in the xbin 1, and the
|
339 |
+
fourth into xbin 2; and so on for y.
|
340 |
+
|
341 |
+
"""
|
342 |
+
|
343 |
+
# This code is based on np.histogram2d
|
344 |
+
try:
|
345 |
+
N = len(bins)
|
346 |
+
except TypeError:
|
347 |
+
N = 1
|
348 |
+
|
349 |
+
if N != 1 and N != 2:
|
350 |
+
xedges = yedges = np.asarray(bins, float)
|
351 |
+
bins = [xedges, yedges]
|
352 |
+
|
353 |
+
medians, edges, binnumbers = binned_statistic_dd(
|
354 |
+
[x, y], values, statistic, bins, range,
|
355 |
+
expand_binnumbers=expand_binnumbers)
|
356 |
+
|
357 |
+
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
|
358 |
+
|
359 |
+
|
360 |
+
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
|
361 |
+
('statistic', 'bin_edges',
|
362 |
+
'binnumber'))
|
363 |
+
|
364 |
+
|
365 |
+
def _bincount(x, weights):
|
366 |
+
if np.iscomplexobj(weights):
|
367 |
+
a = np.bincount(x, np.real(weights))
|
368 |
+
b = np.bincount(x, np.imag(weights))
|
369 |
+
z = a + b*1j
|
370 |
+
|
371 |
+
else:
|
372 |
+
z = np.bincount(x, weights)
|
373 |
+
return z
|
374 |
+
|
375 |
+
|
376 |
+
def binned_statistic_dd(sample, values, statistic='mean',
|
377 |
+
bins=10, range=None, expand_binnumbers=False,
|
378 |
+
binned_statistic_result=None):
|
379 |
+
"""
|
380 |
+
Compute a multidimensional binned statistic for a set of data.
|
381 |
+
|
382 |
+
This is a generalization of a histogramdd function. A histogram divides
|
383 |
+
the space into bins, and returns the count of the number of points in
|
384 |
+
each bin. This function allows the computation of the sum, mean, median,
|
385 |
+
or other statistic of the values within each bin.
|
386 |
+
|
387 |
+
Parameters
|
388 |
+
----------
|
389 |
+
sample : array_like
|
390 |
+
Data to histogram passed as a sequence of N arrays of length D, or
|
391 |
+
as an (N,D) array.
|
392 |
+
values : (N,) array_like or list of (N,) array_like
|
393 |
+
The data on which the statistic will be computed. This must be
|
394 |
+
the same shape as `sample`, or a list of sequences - each with the
|
395 |
+
same shape as `sample`. If `values` is such a list, the statistic
|
396 |
+
will be computed on each independently.
|
397 |
+
statistic : string or callable, optional
|
398 |
+
The statistic to compute (default is 'mean').
|
399 |
+
The following statistics are available:
|
400 |
+
|
401 |
+
* 'mean' : compute the mean of values for points within each bin.
|
402 |
+
Empty bins will be represented by NaN.
|
403 |
+
* 'median' : compute the median of values for points within each
|
404 |
+
bin. Empty bins will be represented by NaN.
|
405 |
+
* 'count' : compute the count of points within each bin. This is
|
406 |
+
identical to an unweighted histogram. `values` array is not
|
407 |
+
referenced.
|
408 |
+
* 'sum' : compute the sum of values for points within each bin.
|
409 |
+
This is identical to a weighted histogram.
|
410 |
+
* 'std' : compute the standard deviation within each bin. This
|
411 |
+
is implicitly calculated with ddof=0. If the number of values
|
412 |
+
within a given bin is 0 or 1, the computed standard deviation value
|
413 |
+
will be 0 for the bin.
|
414 |
+
* 'min' : compute the minimum of values for points within each bin.
|
415 |
+
Empty bins will be represented by NaN.
|
416 |
+
* 'max' : compute the maximum of values for point within each bin.
|
417 |
+
Empty bins will be represented by NaN.
|
418 |
+
* function : a user-defined function which takes a 1D array of
|
419 |
+
values, and outputs a single numerical statistic. This function
|
420 |
+
will be called on the values in each bin. Empty bins will be
|
421 |
+
represented by function([]), or NaN if this returns an error.
|
422 |
+
|
423 |
+
bins : sequence or positive int, optional
|
424 |
+
The bin specification must be in one of the following forms:
|
425 |
+
|
426 |
+
* A sequence of arrays describing the bin edges along each dimension.
|
427 |
+
* The number of bins for each dimension (nx, ny, ... = bins).
|
428 |
+
* The number of bins for all dimensions (nx = ny = ... = bins).
|
429 |
+
range : sequence, optional
|
430 |
+
A sequence of lower and upper bin edges to be used if the edges are
|
431 |
+
not given explicitly in `bins`. Defaults to the minimum and maximum
|
432 |
+
values along each dimension.
|
433 |
+
expand_binnumbers : bool, optional
|
434 |
+
'False' (default): the returned `binnumber` is a shape (N,) array of
|
435 |
+
linearized bin indices.
|
436 |
+
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
|
437 |
+
ndarray, where each row gives the bin numbers in the corresponding
|
438 |
+
dimension.
|
439 |
+
See the `binnumber` returned value, and the `Examples` section of
|
440 |
+
`binned_statistic_2d`.
|
441 |
+
binned_statistic_result : binnedStatisticddResult
|
442 |
+
Result of a previous call to the function in order to reuse bin edges
|
443 |
+
and bin numbers with new values and/or a different statistic.
|
444 |
+
To reuse bin numbers, `expand_binnumbers` must have been set to False
|
445 |
+
(the default)
|
446 |
+
|
447 |
+
.. versionadded:: 0.17.0
|
448 |
+
|
449 |
+
Returns
|
450 |
+
-------
|
451 |
+
statistic : ndarray, shape(nx1, nx2, nx3,...)
|
452 |
+
The values of the selected statistic in each two-dimensional bin.
|
453 |
+
bin_edges : list of ndarrays
|
454 |
+
A list of D arrays describing the (nxi + 1) bin edges for each
|
455 |
+
dimension.
|
456 |
+
binnumber : (N,) array of ints or (D,N) ndarray of ints
|
457 |
+
This assigns to each element of `sample` an integer that represents the
|
458 |
+
bin in which this observation falls. The representation depends on the
|
459 |
+
`expand_binnumbers` argument. See `Notes` for details.
|
460 |
+
|
461 |
+
|
462 |
+
See Also
|
463 |
+
--------
|
464 |
+
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
|
465 |
+
|
466 |
+
Notes
|
467 |
+
-----
|
468 |
+
Binedges:
|
469 |
+
All but the last (righthand-most) bin is half-open in each dimension. In
|
470 |
+
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
|
471 |
+
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
|
472 |
+
last bin, however, is ``[3, 4]``, which *includes* 4.
|
473 |
+
|
474 |
+
`binnumber`:
|
475 |
+
This returned argument assigns to each element of `sample` an integer that
|
476 |
+
represents the bin in which it belongs. The representation depends on the
|
477 |
+
`expand_binnumbers` argument. If 'False' (default): The returned
|
478 |
+
`binnumber` is a shape (N,) array of linearized indices mapping each
|
479 |
+
element of `sample` to its corresponding bin (using row-major ordering).
|
480 |
+
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
|
481 |
+
each row indicates bin placements for each dimension respectively. In each
|
482 |
+
dimension, a binnumber of `i` means the corresponding value is between
|
483 |
+
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
|
484 |
+
|
485 |
+
.. versionadded:: 0.11.0
|
486 |
+
|
487 |
+
Examples
|
488 |
+
--------
|
489 |
+
>>> import numpy as np
|
490 |
+
>>> from scipy import stats
|
491 |
+
>>> import matplotlib.pyplot as plt
|
492 |
+
>>> from mpl_toolkits.mplot3d import Axes3D
|
493 |
+
|
494 |
+
Take an array of 600 (x, y) coordinates as an example.
|
495 |
+
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
|
496 |
+
of dimension `D+1` is required.
|
497 |
+
|
498 |
+
>>> mu = np.array([0., 1.])
|
499 |
+
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
|
500 |
+
>>> multinormal = stats.multivariate_normal(mu, sigma)
|
501 |
+
>>> data = multinormal.rvs(size=600, random_state=235412)
|
502 |
+
>>> data.shape
|
503 |
+
(600, 2)
|
504 |
+
|
505 |
+
Create bins and count how many arrays fall in each bin:
|
506 |
+
|
507 |
+
>>> N = 60
|
508 |
+
>>> x = np.linspace(-3, 3, N)
|
509 |
+
>>> y = np.linspace(-3, 4, N)
|
510 |
+
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
|
511 |
+
... statistic='count')
|
512 |
+
>>> bincounts = ret.statistic
|
513 |
+
|
514 |
+
Set the volume and the location of bars:
|
515 |
+
|
516 |
+
>>> dx = x[1] - x[0]
|
517 |
+
>>> dy = y[1] - y[0]
|
518 |
+
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
|
519 |
+
>>> z = 0
|
520 |
+
|
521 |
+
>>> bincounts = bincounts.ravel()
|
522 |
+
>>> x = x.ravel()
|
523 |
+
>>> y = y.ravel()
|
524 |
+
|
525 |
+
>>> fig = plt.figure()
|
526 |
+
>>> ax = fig.add_subplot(111, projection='3d')
|
527 |
+
>>> with np.errstate(divide='ignore'): # silence random axes3d warning
|
528 |
+
... ax.bar3d(x, y, z, dx, dy, bincounts)
|
529 |
+
|
530 |
+
Reuse bin numbers and bin edges with new values:
|
531 |
+
|
532 |
+
>>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
|
533 |
+
... binned_statistic_result=ret,
|
534 |
+
... statistic='mean')
|
535 |
+
"""
|
536 |
+
known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
|
537 |
+
if not callable(statistic) and statistic not in known_stats:
|
538 |
+
raise ValueError(f'invalid statistic {statistic!r}')
|
539 |
+
|
540 |
+
try:
|
541 |
+
bins = index(bins)
|
542 |
+
except TypeError:
|
543 |
+
# bins is not an integer
|
544 |
+
pass
|
545 |
+
# If bins was an integer-like object, now it is an actual Python int.
|
546 |
+
|
547 |
+
# NOTE: for _bin_edges(), see e.g. gh-11365
|
548 |
+
if isinstance(bins, int) and not np.isfinite(sample).all():
|
549 |
+
raise ValueError(f'{sample!r} contains non-finite values.')
|
550 |
+
|
551 |
+
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
|
552 |
+
# `Dlen` is the length of elements along each dimension.
|
553 |
+
# This code is based on np.histogramdd
|
554 |
+
try:
|
555 |
+
# `sample` is an ND-array.
|
556 |
+
Dlen, Ndim = sample.shape
|
557 |
+
except (AttributeError, ValueError):
|
558 |
+
# `sample` is a sequence of 1D arrays.
|
559 |
+
sample = np.atleast_2d(sample).T
|
560 |
+
Dlen, Ndim = sample.shape
|
561 |
+
|
562 |
+
# Store initial shape of `values` to preserve it in the output
|
563 |
+
values = np.asarray(values)
|
564 |
+
input_shape = list(values.shape)
|
565 |
+
# Make sure that `values` is 2D to iterate over rows
|
566 |
+
values = np.atleast_2d(values)
|
567 |
+
Vdim, Vlen = values.shape
|
568 |
+
|
569 |
+
# Make sure `values` match `sample`
|
570 |
+
if statistic != 'count' and Vlen != Dlen:
|
571 |
+
raise AttributeError('The number of `values` elements must match the '
|
572 |
+
'length of each `sample` dimension.')
|
573 |
+
|
574 |
+
try:
|
575 |
+
M = len(bins)
|
576 |
+
if M != Ndim:
|
577 |
+
raise AttributeError('The dimension of bins must be equal '
|
578 |
+
'to the dimension of the sample x.')
|
579 |
+
except TypeError:
|
580 |
+
bins = Ndim * [bins]
|
581 |
+
|
582 |
+
if binned_statistic_result is None:
|
583 |
+
nbin, edges, dedges = _bin_edges(sample, bins, range)
|
584 |
+
binnumbers = _bin_numbers(sample, nbin, edges, dedges)
|
585 |
+
else:
|
586 |
+
edges = binned_statistic_result.bin_edges
|
587 |
+
nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
|
588 |
+
# +1 for outlier bins
|
589 |
+
dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
|
590 |
+
binnumbers = binned_statistic_result.binnumber
|
591 |
+
|
592 |
+
# Avoid overflow with double precision. Complex `values` -> `complex128`.
|
593 |
+
result_type = np.result_type(values, np.float64)
|
594 |
+
result = np.empty([Vdim, nbin.prod()], dtype=result_type)
|
595 |
+
|
596 |
+
if statistic in {'mean', np.mean}:
|
597 |
+
result.fill(np.nan)
|
598 |
+
flatcount = _bincount(binnumbers, None)
|
599 |
+
a = flatcount.nonzero()
|
600 |
+
for vv in builtins.range(Vdim):
|
601 |
+
flatsum = _bincount(binnumbers, values[vv])
|
602 |
+
result[vv, a] = flatsum[a] / flatcount[a]
|
603 |
+
elif statistic in {'std', np.std}:
|
604 |
+
result.fill(np.nan)
|
605 |
+
flatcount = _bincount(binnumbers, None)
|
606 |
+
a = flatcount.nonzero()
|
607 |
+
for vv in builtins.range(Vdim):
|
608 |
+
flatsum = _bincount(binnumbers, values[vv])
|
609 |
+
delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers]
|
610 |
+
std = np.sqrt(
|
611 |
+
_bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a]
|
612 |
+
)
|
613 |
+
result[vv, a] = std
|
614 |
+
result = np.real(result)
|
615 |
+
elif statistic == 'count':
|
616 |
+
result = np.empty([Vdim, nbin.prod()], dtype=np.float64)
|
617 |
+
result.fill(0)
|
618 |
+
flatcount = _bincount(binnumbers, None)
|
619 |
+
a = np.arange(len(flatcount))
|
620 |
+
result[:, a] = flatcount[np.newaxis, :]
|
621 |
+
elif statistic in {'sum', np.sum}:
|
622 |
+
result.fill(0)
|
623 |
+
for vv in builtins.range(Vdim):
|
624 |
+
flatsum = _bincount(binnumbers, values[vv])
|
625 |
+
a = np.arange(len(flatsum))
|
626 |
+
result[vv, a] = flatsum
|
627 |
+
elif statistic in {'median', np.median}:
|
628 |
+
result.fill(np.nan)
|
629 |
+
for vv in builtins.range(Vdim):
|
630 |
+
i = np.lexsort((values[vv], binnumbers))
|
631 |
+
_, j, counts = np.unique(binnumbers[i],
|
632 |
+
return_index=True, return_counts=True)
|
633 |
+
mid = j + (counts - 1) / 2
|
634 |
+
mid_a = values[vv, i][np.floor(mid).astype(int)]
|
635 |
+
mid_b = values[vv, i][np.ceil(mid).astype(int)]
|
636 |
+
medians = (mid_a + mid_b) / 2
|
637 |
+
result[vv, binnumbers[i][j]] = medians
|
638 |
+
elif statistic in {'min', np.min}:
|
639 |
+
result.fill(np.nan)
|
640 |
+
for vv in builtins.range(Vdim):
|
641 |
+
i = np.argsort(values[vv])[::-1] # Reversed so the min is last
|
642 |
+
result[vv, binnumbers[i]] = values[vv, i]
|
643 |
+
elif statistic in {'max', np.max}:
|
644 |
+
result.fill(np.nan)
|
645 |
+
for vv in builtins.range(Vdim):
|
646 |
+
i = np.argsort(values[vv])
|
647 |
+
result[vv, binnumbers[i]] = values[vv, i]
|
648 |
+
elif callable(statistic):
|
649 |
+
with np.errstate(invalid='ignore'), catch_warnings():
|
650 |
+
simplefilter("ignore", RuntimeWarning)
|
651 |
+
try:
|
652 |
+
null = statistic([])
|
653 |
+
except Exception:
|
654 |
+
null = np.nan
|
655 |
+
if np.iscomplexobj(null):
|
656 |
+
result = result.astype(np.complex128)
|
657 |
+
result.fill(null)
|
658 |
+
try:
|
659 |
+
_calc_binned_statistic(
|
660 |
+
Vdim, binnumbers, result, values, statistic
|
661 |
+
)
|
662 |
+
except ValueError:
|
663 |
+
result = result.astype(np.complex128)
|
664 |
+
_calc_binned_statistic(
|
665 |
+
Vdim, binnumbers, result, values, statistic
|
666 |
+
)
|
667 |
+
|
668 |
+
# Shape into a proper matrix
|
669 |
+
result = result.reshape(np.append(Vdim, nbin))
|
670 |
+
|
671 |
+
# Remove outliers (indices 0 and -1 for each bin-dimension).
|
672 |
+
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
|
673 |
+
result = result[core]
|
674 |
+
|
675 |
+
# Unravel binnumbers into an ndarray, each row the bins for each dimension
|
676 |
+
if expand_binnumbers and Ndim > 1:
|
677 |
+
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
|
678 |
+
|
679 |
+
if np.any(result.shape[1:] != nbin - 2):
|
680 |
+
raise RuntimeError('Internal Shape Error')
|
681 |
+
|
682 |
+
# Reshape to have output (`result`) match input (`values`) shape
|
683 |
+
result = result.reshape(input_shape[:-1] + list(nbin-2))
|
684 |
+
|
685 |
+
return BinnedStatisticddResult(result, edges, binnumbers)
|
686 |
+
|
687 |
+
|
688 |
+
def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func):
|
689 |
+
unique_bin_numbers = np.unique(bin_numbers)
|
690 |
+
for vv in builtins.range(Vdim):
|
691 |
+
bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
|
692 |
+
values, vv)
|
693 |
+
for i in unique_bin_numbers:
|
694 |
+
stat = stat_func(np.array(bin_map[i]))
|
695 |
+
if np.iscomplexobj(stat) and not np.iscomplexobj(result):
|
696 |
+
raise ValueError("The statistic function returns complex ")
|
697 |
+
result[vv, i] = stat
|
698 |
+
|
699 |
+
|
700 |
+
def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
|
701 |
+
""" Create hashmap of bin ids to values in bins
|
702 |
+
key: bin number
|
703 |
+
value: list of binned data
|
704 |
+
"""
|
705 |
+
bin_map = dict()
|
706 |
+
for i in unique_bin_numbers:
|
707 |
+
bin_map[i] = []
|
708 |
+
for i in builtins.range(len(bin_numbers)):
|
709 |
+
bin_map[bin_numbers[i]].append(values[vv, i])
|
710 |
+
return bin_map
|
711 |
+
|
712 |
+
|
713 |
+
def _bin_edges(sample, bins=None, range=None):
|
714 |
+
""" Create edge arrays
|
715 |
+
"""
|
716 |
+
Dlen, Ndim = sample.shape
|
717 |
+
|
718 |
+
nbin = np.empty(Ndim, int) # Number of bins in each dimension
|
719 |
+
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
|
720 |
+
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
|
721 |
+
|
722 |
+
# Select range for each dimension
|
723 |
+
# Used only if number of bins is given.
|
724 |
+
if range is None:
|
725 |
+
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
|
726 |
+
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
|
727 |
+
else:
|
728 |
+
if len(range) != Ndim:
|
729 |
+
raise ValueError(
|
730 |
+
f"range given for {len(range)} dimensions; {Ndim} required")
|
731 |
+
smin = np.empty(Ndim)
|
732 |
+
smax = np.empty(Ndim)
|
733 |
+
for i in builtins.range(Ndim):
|
734 |
+
if range[i][1] < range[i][0]:
|
735 |
+
raise ValueError(
|
736 |
+
"In {}range, start must be <= stop".format(
|
737 |
+
f"dimension {i + 1} of " if Ndim > 1 else ""))
|
738 |
+
smin[i], smax[i] = range[i]
|
739 |
+
|
740 |
+
# Make sure the bins have a finite width.
|
741 |
+
for i in builtins.range(len(smin)):
|
742 |
+
if smin[i] == smax[i]:
|
743 |
+
smin[i] = smin[i] - .5
|
744 |
+
smax[i] = smax[i] + .5
|
745 |
+
|
746 |
+
# Preserve sample floating point precision in bin edges
|
747 |
+
edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
|
748 |
+
else float)
|
749 |
+
|
750 |
+
# Create edge arrays
|
751 |
+
for i in builtins.range(Ndim):
|
752 |
+
if np.isscalar(bins[i]):
|
753 |
+
nbin[i] = bins[i] + 2 # +2 for outlier bins
|
754 |
+
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
|
755 |
+
dtype=edges_dtype)
|
756 |
+
else:
|
757 |
+
edges[i] = np.asarray(bins[i], edges_dtype)
|
758 |
+
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
|
759 |
+
dedges[i] = np.diff(edges[i])
|
760 |
+
|
761 |
+
nbin = np.asarray(nbin)
|
762 |
+
|
763 |
+
return nbin, edges, dedges
|
764 |
+
|
765 |
+
|
766 |
+
def _bin_numbers(sample, nbin, edges, dedges):
|
767 |
+
"""Compute the bin number each sample falls into, in each dimension
|
768 |
+
"""
|
769 |
+
Dlen, Ndim = sample.shape
|
770 |
+
|
771 |
+
sampBin = [
|
772 |
+
np.digitize(sample[:, i], edges[i])
|
773 |
+
for i in range(Ndim)
|
774 |
+
]
|
775 |
+
|
776 |
+
# Using `digitize`, values that fall on an edge are put in the right bin.
|
777 |
+
# For the rightmost bin, we want values equal to the right
|
778 |
+
# edge to be counted in the last bin, and not as an outlier.
|
779 |
+
for i in range(Ndim):
|
780 |
+
# Find the rounding precision
|
781 |
+
dedges_min = dedges[i].min()
|
782 |
+
if dedges_min == 0:
|
783 |
+
raise ValueError('The smallest edge difference is numerically 0.')
|
784 |
+
decimal = int(-np.log10(dedges_min)) + 6
|
785 |
+
# Find which points are on the rightmost edge.
|
786 |
+
on_edge = np.where((sample[:, i] >= edges[i][-1]) &
|
787 |
+
(np.around(sample[:, i], decimal) ==
|
788 |
+
np.around(edges[i][-1], decimal)))[0]
|
789 |
+
# Shift these points one bin to the left.
|
790 |
+
sampBin[i][on_edge] -= 1
|
791 |
+
|
792 |
+
# Compute the sample indices in the flattened statistic matrix.
|
793 |
+
binnumbers = np.ravel_multi_index(sampBin, nbin)
|
794 |
+
|
795 |
+
return binnumbers
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_binomtest.py
ADDED
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from math import sqrt
|
2 |
+
import numpy as np
|
3 |
+
from scipy._lib._util import _validate_int
|
4 |
+
from scipy.optimize import brentq
|
5 |
+
from scipy.special import ndtri
|
6 |
+
from ._discrete_distns import binom
|
7 |
+
from ._common import ConfidenceInterval
|
8 |
+
|
9 |
+
|
10 |
+
class BinomTestResult:
|
11 |
+
"""
|
12 |
+
Result of `scipy.stats.binomtest`.
|
13 |
+
|
14 |
+
Attributes
|
15 |
+
----------
|
16 |
+
k : int
|
17 |
+
The number of successes (copied from `binomtest` input).
|
18 |
+
n : int
|
19 |
+
The number of trials (copied from `binomtest` input).
|
20 |
+
alternative : str
|
21 |
+
Indicates the alternative hypothesis specified in the input
|
22 |
+
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
|
23 |
+
or ``'less'``.
|
24 |
+
statistic: float
|
25 |
+
The estimate of the proportion of successes.
|
26 |
+
pvalue : float
|
27 |
+
The p-value of the hypothesis test.
|
28 |
+
|
29 |
+
"""
|
30 |
+
def __init__(self, k, n, alternative, statistic, pvalue):
|
31 |
+
self.k = k
|
32 |
+
self.n = n
|
33 |
+
self.alternative = alternative
|
34 |
+
self.statistic = statistic
|
35 |
+
self.pvalue = pvalue
|
36 |
+
|
37 |
+
# add alias for backward compatibility
|
38 |
+
self.proportion_estimate = statistic
|
39 |
+
|
40 |
+
def __repr__(self):
|
41 |
+
s = ("BinomTestResult("
|
42 |
+
f"k={self.k}, "
|
43 |
+
f"n={self.n}, "
|
44 |
+
f"alternative={self.alternative!r}, "
|
45 |
+
f"statistic={self.statistic}, "
|
46 |
+
f"pvalue={self.pvalue})")
|
47 |
+
return s
|
48 |
+
|
49 |
+
def proportion_ci(self, confidence_level=0.95, method='exact'):
|
50 |
+
"""
|
51 |
+
Compute the confidence interval for ``statistic``.
|
52 |
+
|
53 |
+
Parameters
|
54 |
+
----------
|
55 |
+
confidence_level : float, optional
|
56 |
+
Confidence level for the computed confidence interval
|
57 |
+
of the estimated proportion. Default is 0.95.
|
58 |
+
method : {'exact', 'wilson', 'wilsoncc'}, optional
|
59 |
+
Selects the method used to compute the confidence interval
|
60 |
+
for the estimate of the proportion:
|
61 |
+
|
62 |
+
'exact' :
|
63 |
+
Use the Clopper-Pearson exact method [1]_.
|
64 |
+
'wilson' :
|
65 |
+
Wilson's method, without continuity correction ([2]_, [3]_).
|
66 |
+
'wilsoncc' :
|
67 |
+
Wilson's method, with continuity correction ([2]_, [3]_).
|
68 |
+
|
69 |
+
Default is ``'exact'``.
|
70 |
+
|
71 |
+
Returns
|
72 |
+
-------
|
73 |
+
ci : ``ConfidenceInterval`` object
|
74 |
+
The object has attributes ``low`` and ``high`` that hold the
|
75 |
+
lower and upper bounds of the confidence interval.
|
76 |
+
|
77 |
+
References
|
78 |
+
----------
|
79 |
+
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
|
80 |
+
fiducial limits illustrated in the case of the binomial,
|
81 |
+
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
|
82 |
+
.. [2] E. B. Wilson, Probable inference, the law of succession, and
|
83 |
+
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
|
84 |
+
(1927).
|
85 |
+
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
|
86 |
+
single proportion: comparison of seven methods, Statistics
|
87 |
+
in Medicine, 17, pp 857-872 (1998).
|
88 |
+
|
89 |
+
Examples
|
90 |
+
--------
|
91 |
+
>>> from scipy.stats import binomtest
|
92 |
+
>>> result = binomtest(k=7, n=50, p=0.1)
|
93 |
+
>>> result.statistic
|
94 |
+
0.14
|
95 |
+
>>> result.proportion_ci()
|
96 |
+
ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
|
97 |
+
"""
|
98 |
+
if method not in ('exact', 'wilson', 'wilsoncc'):
|
99 |
+
raise ValueError(f"method ('{method}') must be one of 'exact', "
|
100 |
+
"'wilson' or 'wilsoncc'.")
|
101 |
+
if not (0 <= confidence_level <= 1):
|
102 |
+
raise ValueError(f'confidence_level ({confidence_level}) must be in '
|
103 |
+
'the interval [0, 1].')
|
104 |
+
if method == 'exact':
|
105 |
+
low, high = _binom_exact_conf_int(self.k, self.n,
|
106 |
+
confidence_level,
|
107 |
+
self.alternative)
|
108 |
+
else:
|
109 |
+
# method is 'wilson' or 'wilsoncc'
|
110 |
+
low, high = _binom_wilson_conf_int(self.k, self.n,
|
111 |
+
confidence_level,
|
112 |
+
self.alternative,
|
113 |
+
correction=method == 'wilsoncc')
|
114 |
+
return ConfidenceInterval(low=low, high=high)
|
115 |
+
|
116 |
+
|
117 |
+
def _findp(func):
|
118 |
+
try:
|
119 |
+
p = brentq(func, 0, 1)
|
120 |
+
except RuntimeError:
|
121 |
+
raise RuntimeError('numerical solver failed to converge when '
|
122 |
+
'computing the confidence limits') from None
|
123 |
+
except ValueError as exc:
|
124 |
+
raise ValueError('brentq raised a ValueError; report this to the '
|
125 |
+
'SciPy developers') from exc
|
126 |
+
return p
|
127 |
+
|
128 |
+
|
129 |
+
def _binom_exact_conf_int(k, n, confidence_level, alternative):
|
130 |
+
"""
|
131 |
+
Compute the estimate and confidence interval for the binomial test.
|
132 |
+
|
133 |
+
Returns proportion, prop_low, prop_high
|
134 |
+
"""
|
135 |
+
if alternative == 'two-sided':
|
136 |
+
alpha = (1 - confidence_level) / 2
|
137 |
+
if k == 0:
|
138 |
+
plow = 0.0
|
139 |
+
else:
|
140 |
+
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
|
141 |
+
if k == n:
|
142 |
+
phigh = 1.0
|
143 |
+
else:
|
144 |
+
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
|
145 |
+
elif alternative == 'less':
|
146 |
+
alpha = 1 - confidence_level
|
147 |
+
plow = 0.0
|
148 |
+
if k == n:
|
149 |
+
phigh = 1.0
|
150 |
+
else:
|
151 |
+
phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
|
152 |
+
elif alternative == 'greater':
|
153 |
+
alpha = 1 - confidence_level
|
154 |
+
if k == 0:
|
155 |
+
plow = 0.0
|
156 |
+
else:
|
157 |
+
plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
|
158 |
+
phigh = 1.0
|
159 |
+
return plow, phigh
|
160 |
+
|
161 |
+
|
162 |
+
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
|
163 |
+
# This function assumes that the arguments have already been validated.
|
164 |
+
# In particular, `alternative` must be one of 'two-sided', 'less' or
|
165 |
+
# 'greater'.
|
166 |
+
p = k / n
|
167 |
+
if alternative == 'two-sided':
|
168 |
+
z = ndtri(0.5 + 0.5*confidence_level)
|
169 |
+
else:
|
170 |
+
z = ndtri(confidence_level)
|
171 |
+
|
172 |
+
# For reference, the formulas implemented here are from
|
173 |
+
# Newcombe (1998) (ref. [3] in the proportion_ci docstring).
|
174 |
+
denom = 2*(n + z**2)
|
175 |
+
center = (2*n*p + z**2)/denom
|
176 |
+
q = 1 - p
|
177 |
+
if correction:
|
178 |
+
if alternative == 'less' or k == 0:
|
179 |
+
lo = 0.0
|
180 |
+
else:
|
181 |
+
dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
|
182 |
+
lo = center - dlo
|
183 |
+
if alternative == 'greater' or k == n:
|
184 |
+
hi = 1.0
|
185 |
+
else:
|
186 |
+
dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
|
187 |
+
hi = center + dhi
|
188 |
+
else:
|
189 |
+
delta = z/denom * sqrt(4*n*p*q + z**2)
|
190 |
+
if alternative == 'less' or k == 0:
|
191 |
+
lo = 0.0
|
192 |
+
else:
|
193 |
+
lo = center - delta
|
194 |
+
if alternative == 'greater' or k == n:
|
195 |
+
hi = 1.0
|
196 |
+
else:
|
197 |
+
hi = center + delta
|
198 |
+
|
199 |
+
return lo, hi
|
200 |
+
|
201 |
+
|
202 |
+
def binomtest(k, n, p=0.5, alternative='two-sided'):
|
203 |
+
"""
|
204 |
+
Perform a test that the probability of success is p.
|
205 |
+
|
206 |
+
The binomial test [1]_ is a test of the null hypothesis that the
|
207 |
+
probability of success in a Bernoulli experiment is `p`.
|
208 |
+
|
209 |
+
Details of the test can be found in many texts on statistics, such
|
210 |
+
as section 24.5 of [2]_.
|
211 |
+
|
212 |
+
Parameters
|
213 |
+
----------
|
214 |
+
k : int
|
215 |
+
The number of successes.
|
216 |
+
n : int
|
217 |
+
The number of trials.
|
218 |
+
p : float, optional
|
219 |
+
The hypothesized probability of success, i.e. the expected
|
220 |
+
proportion of successes. The value must be in the interval
|
221 |
+
``0 <= p <= 1``. The default value is ``p = 0.5``.
|
222 |
+
alternative : {'two-sided', 'greater', 'less'}, optional
|
223 |
+
Indicates the alternative hypothesis. The default value is
|
224 |
+
'two-sided'.
|
225 |
+
|
226 |
+
Returns
|
227 |
+
-------
|
228 |
+
result : `~scipy.stats._result_classes.BinomTestResult` instance
|
229 |
+
The return value is an object with the following attributes:
|
230 |
+
|
231 |
+
k : int
|
232 |
+
The number of successes (copied from `binomtest` input).
|
233 |
+
n : int
|
234 |
+
The number of trials (copied from `binomtest` input).
|
235 |
+
alternative : str
|
236 |
+
Indicates the alternative hypothesis specified in the input
|
237 |
+
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
|
238 |
+
or ``'less'``.
|
239 |
+
statistic : float
|
240 |
+
The estimate of the proportion of successes.
|
241 |
+
pvalue : float
|
242 |
+
The p-value of the hypothesis test.
|
243 |
+
|
244 |
+
The object has the following methods:
|
245 |
+
|
246 |
+
proportion_ci(confidence_level=0.95, method='exact') :
|
247 |
+
Compute the confidence interval for ``statistic``.
|
248 |
+
|
249 |
+
Notes
|
250 |
+
-----
|
251 |
+
.. versionadded:: 1.7.0
|
252 |
+
|
253 |
+
References
|
254 |
+
----------
|
255 |
+
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
|
256 |
+
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
|
257 |
+
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
|
258 |
+
|
259 |
+
Examples
|
260 |
+
--------
|
261 |
+
>>> from scipy.stats import binomtest
|
262 |
+
|
263 |
+
A car manufacturer claims that no more than 10% of their cars are unsafe.
|
264 |
+
15 cars are inspected for safety, 3 were found to be unsafe. Test the
|
265 |
+
manufacturer's claim:
|
266 |
+
|
267 |
+
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
|
268 |
+
>>> result.pvalue
|
269 |
+
0.18406106910639114
|
270 |
+
|
271 |
+
The null hypothesis cannot be rejected at the 5% level of significance
|
272 |
+
because the returned p-value is greater than the critical value of 5%.
|
273 |
+
|
274 |
+
The test statistic is equal to the estimated proportion, which is simply
|
275 |
+
``3/15``:
|
276 |
+
|
277 |
+
>>> result.statistic
|
278 |
+
0.2
|
279 |
+
|
280 |
+
We can use the `proportion_ci()` method of the result to compute the
|
281 |
+
confidence interval of the estimate:
|
282 |
+
|
283 |
+
>>> result.proportion_ci(confidence_level=0.95)
|
284 |
+
ConfidenceInterval(low=0.05684686759024681, high=1.0)
|
285 |
+
|
286 |
+
"""
|
287 |
+
k = _validate_int(k, 'k', minimum=0)
|
288 |
+
n = _validate_int(n, 'n', minimum=1)
|
289 |
+
if k > n:
|
290 |
+
raise ValueError(f'k ({k}) must not be greater than n ({n}).')
|
291 |
+
|
292 |
+
if not (0 <= p <= 1):
|
293 |
+
raise ValueError(f"p ({p}) must be in range [0,1]")
|
294 |
+
|
295 |
+
if alternative not in ('two-sided', 'less', 'greater'):
|
296 |
+
raise ValueError(f"alternative ('{alternative}') not recognized; \n"
|
297 |
+
"must be 'two-sided', 'less' or 'greater'")
|
298 |
+
if alternative == 'less':
|
299 |
+
pval = binom.cdf(k, n, p)
|
300 |
+
elif alternative == 'greater':
|
301 |
+
pval = binom.sf(k-1, n, p)
|
302 |
+
else:
|
303 |
+
# alternative is 'two-sided'
|
304 |
+
d = binom.pmf(k, n, p)
|
305 |
+
rerr = 1 + 1e-7
|
306 |
+
if k == p * n:
|
307 |
+
# special case as shortcut, would also be handled by `else` below
|
308 |
+
pval = 1.
|
309 |
+
elif k < p * n:
|
310 |
+
ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
|
311 |
+
-d*rerr, np.ceil(p * n), n)
|
312 |
+
# y is the number of terms between mode and n that are <= d*rerr.
|
313 |
+
# ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
|
314 |
+
# if the first equality doesn't hold, y=n-ix. Otherwise, we
|
315 |
+
# need to include ix as well as the equality holds. Note that
|
316 |
+
# the equality will hold in very very rare situations due to rerr.
|
317 |
+
y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
|
318 |
+
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
|
319 |
+
else:
|
320 |
+
ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
|
321 |
+
d*rerr, 0, np.floor(p * n))
|
322 |
+
# y is the number of terms between 0 and mode that are <= d*rerr.
|
323 |
+
# we need to add a 1 to account for the 0 index.
|
324 |
+
# For comparing this with old behavior, see
|
325 |
+
# tst_binary_srch_for_binom_tst method in test_morestats.
|
326 |
+
y = ix + 1
|
327 |
+
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
|
328 |
+
|
329 |
+
pval = min(1.0, pval)
|
330 |
+
|
331 |
+
result = BinomTestResult(k=k, n=n, alternative=alternative,
|
332 |
+
statistic=k/n, pvalue=pval)
|
333 |
+
return result
|
334 |
+
|
335 |
+
|
336 |
+
def _binary_search_for_binom_tst(a, d, lo, hi):
|
337 |
+
"""
|
338 |
+
Conducts an implicit binary search on a function specified by `a`.
|
339 |
+
|
340 |
+
Meant to be used on the binomial PMF for the case of two-sided tests
|
341 |
+
to obtain the value on the other side of the mode where the tail
|
342 |
+
probability should be computed. The values on either side of
|
343 |
+
the mode are always in order, meaning binary search is applicable.
|
344 |
+
|
345 |
+
Parameters
|
346 |
+
----------
|
347 |
+
a : callable
|
348 |
+
The function over which to perform binary search. Its values
|
349 |
+
for inputs lo and hi should be in ascending order.
|
350 |
+
d : float
|
351 |
+
The value to search.
|
352 |
+
lo : int
|
353 |
+
The lower end of range to search.
|
354 |
+
hi : int
|
355 |
+
The higher end of the range to search.
|
356 |
+
|
357 |
+
Returns
|
358 |
+
-------
|
359 |
+
int
|
360 |
+
The index, i between lo and hi
|
361 |
+
such that a(i)<=d<a(i+1)
|
362 |
+
"""
|
363 |
+
while lo < hi:
|
364 |
+
mid = lo + (hi-lo)//2
|
365 |
+
midval = a(mid)
|
366 |
+
if midval < d:
|
367 |
+
lo = mid+1
|
368 |
+
elif midval > d:
|
369 |
+
hi = mid-1
|
370 |
+
else:
|
371 |
+
return mid
|
372 |
+
if a(lo) <= d:
|
373 |
+
return lo
|
374 |
+
else:
|
375 |
+
return lo-1
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_constants.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Statistics-related constants.
|
3 |
+
|
4 |
+
"""
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
|
9 |
+
_EPS = np.finfo(float).eps
|
10 |
+
|
11 |
+
# The largest [in magnitude] usable floating value.
|
12 |
+
_XMAX = np.finfo(float).max
|
13 |
+
|
14 |
+
# The log of the largest usable floating value; useful for knowing
|
15 |
+
# when exp(something) will overflow
|
16 |
+
_LOGXMAX = np.log(_XMAX)
|
17 |
+
|
18 |
+
# The smallest [in magnitude] usable (i.e. not subnormal) double precision
|
19 |
+
# floating value.
|
20 |
+
_XMIN = np.finfo(float).tiny
|
21 |
+
|
22 |
+
# The log of the smallest [in magnitude] usable (i.e not subnormal)
|
23 |
+
# double precision floating value.
|
24 |
+
_LOGXMIN = np.log(_XMIN)
|
25 |
+
|
26 |
+
# -special.psi(1)
|
27 |
+
_EULER = 0.577215664901532860606512090082402431042
|
28 |
+
|
29 |
+
# special.zeta(3, 1) Apery's constant
|
30 |
+
_ZETA3 = 1.202056903159594285399738161511449990765
|
31 |
+
|
32 |
+
# sqrt(pi)
|
33 |
+
_SQRT_PI = 1.772453850905516027298167483341145182798
|
34 |
+
|
35 |
+
# sqrt(2/pi)
|
36 |
+
_SQRT_2_OVER_PI = 0.7978845608028654
|
37 |
+
|
38 |
+
# log(sqrt(2/pi))
|
39 |
+
_LOG_SQRT_2_OVER_PI = -0.22579135264472744
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_continuous_distns.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_covariance.py
ADDED
@@ -0,0 +1,633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import cached_property
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from scipy import linalg
|
5 |
+
from scipy.stats import _multivariate
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = ["Covariance"]
|
9 |
+
|
10 |
+
|
11 |
+
class Covariance:
|
12 |
+
"""
|
13 |
+
Representation of a covariance matrix
|
14 |
+
|
15 |
+
Calculations involving covariance matrices (e.g. data whitening,
|
16 |
+
multivariate normal function evaluation) are often performed more
|
17 |
+
efficiently using a decomposition of the covariance matrix instead of the
|
18 |
+
covariance matrix itself. This class allows the user to construct an
|
19 |
+
object representing a covariance matrix using any of several
|
20 |
+
decompositions and perform calculations using a common interface.
|
21 |
+
|
22 |
+
.. note::
|
23 |
+
|
24 |
+
The `Covariance` class cannot be instantiated directly. Instead, use
|
25 |
+
one of the factory methods (e.g. `Covariance.from_diagonal`).
|
26 |
+
|
27 |
+
Examples
|
28 |
+
--------
|
29 |
+
The `Covariance` class is is used by calling one of its
|
30 |
+
factory methods to create a `Covariance` object, then pass that
|
31 |
+
representation of the `Covariance` matrix as a shape parameter of a
|
32 |
+
multivariate distribution.
|
33 |
+
|
34 |
+
For instance, the multivariate normal distribution can accept an array
|
35 |
+
representing a covariance matrix:
|
36 |
+
|
37 |
+
>>> from scipy import stats
|
38 |
+
>>> import numpy as np
|
39 |
+
>>> d = [1, 2, 3]
|
40 |
+
>>> A = np.diag(d) # a diagonal covariance matrix
|
41 |
+
>>> x = [4, -2, 5] # a point of interest
|
42 |
+
>>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=A)
|
43 |
+
>>> dist.pdf(x)
|
44 |
+
4.9595685102808205e-08
|
45 |
+
|
46 |
+
but the calculations are performed in a very generic way that does not
|
47 |
+
take advantage of any special properties of the covariance matrix. Because
|
48 |
+
our covariance matrix is diagonal, we can use ``Covariance.from_diagonal``
|
49 |
+
to create an object representing the covariance matrix, and
|
50 |
+
`multivariate_normal` can use this to compute the probability density
|
51 |
+
function more efficiently.
|
52 |
+
|
53 |
+
>>> cov = stats.Covariance.from_diagonal(d)
|
54 |
+
>>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=cov)
|
55 |
+
>>> dist.pdf(x)
|
56 |
+
4.9595685102808205e-08
|
57 |
+
|
58 |
+
"""
|
59 |
+
def __init__(self):
|
60 |
+
message = ("The `Covariance` class cannot be instantiated directly. "
|
61 |
+
"Please use one of the factory methods "
|
62 |
+
"(e.g. `Covariance.from_diagonal`).")
|
63 |
+
raise NotImplementedError(message)
|
64 |
+
|
65 |
+
@staticmethod
|
66 |
+
def from_diagonal(diagonal):
|
67 |
+
r"""
|
68 |
+
Return a representation of a covariance matrix from its diagonal.
|
69 |
+
|
70 |
+
Parameters
|
71 |
+
----------
|
72 |
+
diagonal : array_like
|
73 |
+
The diagonal elements of a diagonal matrix.
|
74 |
+
|
75 |
+
Notes
|
76 |
+
-----
|
77 |
+
Let the diagonal elements of a diagonal covariance matrix :math:`D` be
|
78 |
+
stored in the vector :math:`d`.
|
79 |
+
|
80 |
+
When all elements of :math:`d` are strictly positive, whitening of a
|
81 |
+
data point :math:`x` is performed by computing
|
82 |
+
:math:`x \cdot d^{-1/2}`, where the inverse square root can be taken
|
83 |
+
element-wise.
|
84 |
+
:math:`\log\det{D}` is calculated as :math:`-2 \sum(\log{d})`,
|
85 |
+
where the :math:`\log` operation is performed element-wise.
|
86 |
+
|
87 |
+
This `Covariance` class supports singular covariance matrices. When
|
88 |
+
computing ``_log_pdet``, non-positive elements of :math:`d` are
|
89 |
+
ignored. Whitening is not well defined when the point to be whitened
|
90 |
+
does not lie in the span of the columns of the covariance matrix. The
|
91 |
+
convention taken here is to treat the inverse square root of
|
92 |
+
non-positive elements of :math:`d` as zeros.
|
93 |
+
|
94 |
+
Examples
|
95 |
+
--------
|
96 |
+
Prepare a symmetric positive definite covariance matrix ``A`` and a
|
97 |
+
data point ``x``.
|
98 |
+
|
99 |
+
>>> import numpy as np
|
100 |
+
>>> from scipy import stats
|
101 |
+
>>> rng = np.random.default_rng()
|
102 |
+
>>> n = 5
|
103 |
+
>>> A = np.diag(rng.random(n))
|
104 |
+
>>> x = rng.random(size=n)
|
105 |
+
|
106 |
+
Extract the diagonal from ``A`` and create the `Covariance` object.
|
107 |
+
|
108 |
+
>>> d = np.diag(A)
|
109 |
+
>>> cov = stats.Covariance.from_diagonal(d)
|
110 |
+
|
111 |
+
Compare the functionality of the `Covariance` object against a
|
112 |
+
reference implementations.
|
113 |
+
|
114 |
+
>>> res = cov.whiten(x)
|
115 |
+
>>> ref = np.diag(d**-0.5) @ x
|
116 |
+
>>> np.allclose(res, ref)
|
117 |
+
True
|
118 |
+
>>> res = cov.log_pdet
|
119 |
+
>>> ref = np.linalg.slogdet(A)[-1]
|
120 |
+
>>> np.allclose(res, ref)
|
121 |
+
True
|
122 |
+
|
123 |
+
"""
|
124 |
+
return CovViaDiagonal(diagonal)
|
125 |
+
|
126 |
+
@staticmethod
|
127 |
+
def from_precision(precision, covariance=None):
|
128 |
+
r"""
|
129 |
+
Return a representation of a covariance from its precision matrix.
|
130 |
+
|
131 |
+
Parameters
|
132 |
+
----------
|
133 |
+
precision : array_like
|
134 |
+
The precision matrix; that is, the inverse of a square, symmetric,
|
135 |
+
positive definite covariance matrix.
|
136 |
+
covariance : array_like, optional
|
137 |
+
The square, symmetric, positive definite covariance matrix. If not
|
138 |
+
provided, this may need to be calculated (e.g. to evaluate the
|
139 |
+
cumulative distribution function of
|
140 |
+
`scipy.stats.multivariate_normal`) by inverting `precision`.
|
141 |
+
|
142 |
+
Notes
|
143 |
+
-----
|
144 |
+
Let the covariance matrix be :math:`A`, its precision matrix be
|
145 |
+
:math:`P = A^{-1}`, and :math:`L` be the lower Cholesky factor such
|
146 |
+
that :math:`L L^T = P`.
|
147 |
+
Whitening of a data point :math:`x` is performed by computing
|
148 |
+
:math:`x^T L`. :math:`\log\det{A}` is calculated as
|
149 |
+
:math:`-2tr(\log{L})`, where the :math:`\log` operation is performed
|
150 |
+
element-wise.
|
151 |
+
|
152 |
+
This `Covariance` class does not support singular covariance matrices
|
153 |
+
because the precision matrix does not exist for a singular covariance
|
154 |
+
matrix.
|
155 |
+
|
156 |
+
Examples
|
157 |
+
--------
|
158 |
+
Prepare a symmetric positive definite precision matrix ``P`` and a
|
159 |
+
data point ``x``. (If the precision matrix is not already available,
|
160 |
+
consider the other factory methods of the `Covariance` class.)
|
161 |
+
|
162 |
+
>>> import numpy as np
|
163 |
+
>>> from scipy import stats
|
164 |
+
>>> rng = np.random.default_rng()
|
165 |
+
>>> n = 5
|
166 |
+
>>> P = rng.random(size=(n, n))
|
167 |
+
>>> P = P @ P.T # a precision matrix must be positive definite
|
168 |
+
>>> x = rng.random(size=n)
|
169 |
+
|
170 |
+
Create the `Covariance` object.
|
171 |
+
|
172 |
+
>>> cov = stats.Covariance.from_precision(P)
|
173 |
+
|
174 |
+
Compare the functionality of the `Covariance` object against
|
175 |
+
reference implementations.
|
176 |
+
|
177 |
+
>>> res = cov.whiten(x)
|
178 |
+
>>> ref = x @ np.linalg.cholesky(P)
|
179 |
+
>>> np.allclose(res, ref)
|
180 |
+
True
|
181 |
+
>>> res = cov.log_pdet
|
182 |
+
>>> ref = -np.linalg.slogdet(P)[-1]
|
183 |
+
>>> np.allclose(res, ref)
|
184 |
+
True
|
185 |
+
|
186 |
+
"""
|
187 |
+
return CovViaPrecision(precision, covariance)
|
188 |
+
|
189 |
+
@staticmethod
|
190 |
+
def from_cholesky(cholesky):
|
191 |
+
r"""
|
192 |
+
Representation of a covariance provided via the (lower) Cholesky factor
|
193 |
+
|
194 |
+
Parameters
|
195 |
+
----------
|
196 |
+
cholesky : array_like
|
197 |
+
The lower triangular Cholesky factor of the covariance matrix.
|
198 |
+
|
199 |
+
Notes
|
200 |
+
-----
|
201 |
+
Let the covariance matrix be :math:`A` and :math:`L` be the lower
|
202 |
+
Cholesky factor such that :math:`L L^T = A`.
|
203 |
+
Whitening of a data point :math:`x` is performed by computing
|
204 |
+
:math:`L^{-1} x`. :math:`\log\det{A}` is calculated as
|
205 |
+
:math:`2tr(\log{L})`, where the :math:`\log` operation is performed
|
206 |
+
element-wise.
|
207 |
+
|
208 |
+
This `Covariance` class does not support singular covariance matrices
|
209 |
+
because the Cholesky decomposition does not exist for a singular
|
210 |
+
covariance matrix.
|
211 |
+
|
212 |
+
Examples
|
213 |
+
--------
|
214 |
+
Prepare a symmetric positive definite covariance matrix ``A`` and a
|
215 |
+
data point ``x``.
|
216 |
+
|
217 |
+
>>> import numpy as np
|
218 |
+
>>> from scipy import stats
|
219 |
+
>>> rng = np.random.default_rng()
|
220 |
+
>>> n = 5
|
221 |
+
>>> A = rng.random(size=(n, n))
|
222 |
+
>>> A = A @ A.T # make the covariance symmetric positive definite
|
223 |
+
>>> x = rng.random(size=n)
|
224 |
+
|
225 |
+
Perform the Cholesky decomposition of ``A`` and create the
|
226 |
+
`Covariance` object.
|
227 |
+
|
228 |
+
>>> L = np.linalg.cholesky(A)
|
229 |
+
>>> cov = stats.Covariance.from_cholesky(L)
|
230 |
+
|
231 |
+
Compare the functionality of the `Covariance` object against
|
232 |
+
reference implementation.
|
233 |
+
|
234 |
+
>>> from scipy.linalg import solve_triangular
|
235 |
+
>>> res = cov.whiten(x)
|
236 |
+
>>> ref = solve_triangular(L, x, lower=True)
|
237 |
+
>>> np.allclose(res, ref)
|
238 |
+
True
|
239 |
+
>>> res = cov.log_pdet
|
240 |
+
>>> ref = np.linalg.slogdet(A)[-1]
|
241 |
+
>>> np.allclose(res, ref)
|
242 |
+
True
|
243 |
+
|
244 |
+
"""
|
245 |
+
return CovViaCholesky(cholesky)
|
246 |
+
|
247 |
+
@staticmethod
|
248 |
+
def from_eigendecomposition(eigendecomposition):
|
249 |
+
r"""
|
250 |
+
Representation of a covariance provided via eigendecomposition
|
251 |
+
|
252 |
+
Parameters
|
253 |
+
----------
|
254 |
+
eigendecomposition : sequence
|
255 |
+
A sequence (nominally a tuple) containing the eigenvalue and
|
256 |
+
eigenvector arrays as computed by `scipy.linalg.eigh` or
|
257 |
+
`numpy.linalg.eigh`.
|
258 |
+
|
259 |
+
Notes
|
260 |
+
-----
|
261 |
+
Let the covariance matrix be :math:`A`, let :math:`V` be matrix of
|
262 |
+
eigenvectors, and let :math:`W` be the diagonal matrix of eigenvalues
|
263 |
+
such that `V W V^T = A`.
|
264 |
+
|
265 |
+
When all of the eigenvalues are strictly positive, whitening of a
|
266 |
+
data point :math:`x` is performed by computing
|
267 |
+
:math:`x^T (V W^{-1/2})`, where the inverse square root can be taken
|
268 |
+
element-wise.
|
269 |
+
:math:`\log\det{A}` is calculated as :math:`tr(\log{W})`,
|
270 |
+
where the :math:`\log` operation is performed element-wise.
|
271 |
+
|
272 |
+
This `Covariance` class supports singular covariance matrices. When
|
273 |
+
computing ``_log_pdet``, non-positive eigenvalues are ignored.
|
274 |
+
Whitening is not well defined when the point to be whitened
|
275 |
+
does not lie in the span of the columns of the covariance matrix. The
|
276 |
+
convention taken here is to treat the inverse square root of
|
277 |
+
non-positive eigenvalues as zeros.
|
278 |
+
|
279 |
+
Examples
|
280 |
+
--------
|
281 |
+
Prepare a symmetric positive definite covariance matrix ``A`` and a
|
282 |
+
data point ``x``.
|
283 |
+
|
284 |
+
>>> import numpy as np
|
285 |
+
>>> from scipy import stats
|
286 |
+
>>> rng = np.random.default_rng()
|
287 |
+
>>> n = 5
|
288 |
+
>>> A = rng.random(size=(n, n))
|
289 |
+
>>> A = A @ A.T # make the covariance symmetric positive definite
|
290 |
+
>>> x = rng.random(size=n)
|
291 |
+
|
292 |
+
Perform the eigendecomposition of ``A`` and create the `Covariance`
|
293 |
+
object.
|
294 |
+
|
295 |
+
>>> w, v = np.linalg.eigh(A)
|
296 |
+
>>> cov = stats.Covariance.from_eigendecomposition((w, v))
|
297 |
+
|
298 |
+
Compare the functionality of the `Covariance` object against
|
299 |
+
reference implementations.
|
300 |
+
|
301 |
+
>>> res = cov.whiten(x)
|
302 |
+
>>> ref = x @ (v @ np.diag(w**-0.5))
|
303 |
+
>>> np.allclose(res, ref)
|
304 |
+
True
|
305 |
+
>>> res = cov.log_pdet
|
306 |
+
>>> ref = np.linalg.slogdet(A)[-1]
|
307 |
+
>>> np.allclose(res, ref)
|
308 |
+
True
|
309 |
+
|
310 |
+
"""
|
311 |
+
return CovViaEigendecomposition(eigendecomposition)
|
312 |
+
|
313 |
+
def whiten(self, x):
|
314 |
+
"""
|
315 |
+
Perform a whitening transformation on data.
|
316 |
+
|
317 |
+
"Whitening" ("white" as in "white noise", in which each frequency has
|
318 |
+
equal magnitude) transforms a set of random variables into a new set of
|
319 |
+
random variables with unit-diagonal covariance. When a whitening
|
320 |
+
transform is applied to a sample of points distributed according to
|
321 |
+
a multivariate normal distribution with zero mean, the covariance of
|
322 |
+
the transformed sample is approximately the identity matrix.
|
323 |
+
|
324 |
+
Parameters
|
325 |
+
----------
|
326 |
+
x : array_like
|
327 |
+
An array of points. The last dimension must correspond with the
|
328 |
+
dimensionality of the space, i.e., the number of columns in the
|
329 |
+
covariance matrix.
|
330 |
+
|
331 |
+
Returns
|
332 |
+
-------
|
333 |
+
x_ : array_like
|
334 |
+
The transformed array of points.
|
335 |
+
|
336 |
+
References
|
337 |
+
----------
|
338 |
+
.. [1] "Whitening Transformation". Wikipedia.
|
339 |
+
https://en.wikipedia.org/wiki/Whitening_transformation
|
340 |
+
.. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
|
341 |
+
coloring linear transformation". Transactions of VSB 18.2
|
342 |
+
(2018): 31-35. :doi:`10.31490/tces-2018-0013`
|
343 |
+
|
344 |
+
Examples
|
345 |
+
--------
|
346 |
+
>>> import numpy as np
|
347 |
+
>>> from scipy import stats
|
348 |
+
>>> rng = np.random.default_rng()
|
349 |
+
>>> n = 3
|
350 |
+
>>> A = rng.random(size=(n, n))
|
351 |
+
>>> cov_array = A @ A.T # make matrix symmetric positive definite
|
352 |
+
>>> precision = np.linalg.inv(cov_array)
|
353 |
+
>>> cov_object = stats.Covariance.from_precision(precision)
|
354 |
+
>>> x = rng.multivariate_normal(np.zeros(n), cov_array, size=(10000))
|
355 |
+
>>> x_ = cov_object.whiten(x)
|
356 |
+
>>> np.cov(x_, rowvar=False) # near-identity covariance
|
357 |
+
array([[0.97862122, 0.00893147, 0.02430451],
|
358 |
+
[0.00893147, 0.96719062, 0.02201312],
|
359 |
+
[0.02430451, 0.02201312, 0.99206881]])
|
360 |
+
|
361 |
+
"""
|
362 |
+
return self._whiten(np.asarray(x))
|
363 |
+
|
364 |
+
def colorize(self, x):
|
365 |
+
"""
|
366 |
+
Perform a colorizing transformation on data.
|
367 |
+
|
368 |
+
"Colorizing" ("color" as in "colored noise", in which different
|
369 |
+
frequencies may have different magnitudes) transforms a set of
|
370 |
+
uncorrelated random variables into a new set of random variables with
|
371 |
+
the desired covariance. When a coloring transform is applied to a
|
372 |
+
sample of points distributed according to a multivariate normal
|
373 |
+
distribution with identity covariance and zero mean, the covariance of
|
374 |
+
the transformed sample is approximately the covariance matrix used
|
375 |
+
in the coloring transform.
|
376 |
+
|
377 |
+
Parameters
|
378 |
+
----------
|
379 |
+
x : array_like
|
380 |
+
An array of points. The last dimension must correspond with the
|
381 |
+
dimensionality of the space, i.e., the number of columns in the
|
382 |
+
covariance matrix.
|
383 |
+
|
384 |
+
Returns
|
385 |
+
-------
|
386 |
+
x_ : array_like
|
387 |
+
The transformed array of points.
|
388 |
+
|
389 |
+
References
|
390 |
+
----------
|
391 |
+
.. [1] "Whitening Transformation". Wikipedia.
|
392 |
+
https://en.wikipedia.org/wiki/Whitening_transformation
|
393 |
+
.. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
|
394 |
+
coloring linear transformation". Transactions of VSB 18.2
|
395 |
+
(2018): 31-35. :doi:`10.31490/tces-2018-0013`
|
396 |
+
|
397 |
+
Examples
|
398 |
+
--------
|
399 |
+
>>> import numpy as np
|
400 |
+
>>> from scipy import stats
|
401 |
+
>>> rng = np.random.default_rng(1638083107694713882823079058616272161)
|
402 |
+
>>> n = 3
|
403 |
+
>>> A = rng.random(size=(n, n))
|
404 |
+
>>> cov_array = A @ A.T # make matrix symmetric positive definite
|
405 |
+
>>> cholesky = np.linalg.cholesky(cov_array)
|
406 |
+
>>> cov_object = stats.Covariance.from_cholesky(cholesky)
|
407 |
+
>>> x = rng.multivariate_normal(np.zeros(n), np.eye(n), size=(10000))
|
408 |
+
>>> x_ = cov_object.colorize(x)
|
409 |
+
>>> cov_data = np.cov(x_, rowvar=False)
|
410 |
+
>>> np.allclose(cov_data, cov_array, rtol=3e-2)
|
411 |
+
True
|
412 |
+
"""
|
413 |
+
return self._colorize(np.asarray(x))
|
414 |
+
|
415 |
+
@property
|
416 |
+
def log_pdet(self):
|
417 |
+
"""
|
418 |
+
Log of the pseudo-determinant of the covariance matrix
|
419 |
+
"""
|
420 |
+
return np.array(self._log_pdet, dtype=float)[()]
|
421 |
+
|
422 |
+
@property
|
423 |
+
def rank(self):
|
424 |
+
"""
|
425 |
+
Rank of the covariance matrix
|
426 |
+
"""
|
427 |
+
return np.array(self._rank, dtype=int)[()]
|
428 |
+
|
429 |
+
@property
|
430 |
+
def covariance(self):
|
431 |
+
"""
|
432 |
+
Explicit representation of the covariance matrix
|
433 |
+
"""
|
434 |
+
return self._covariance
|
435 |
+
|
436 |
+
@property
|
437 |
+
def shape(self):
|
438 |
+
"""
|
439 |
+
Shape of the covariance array
|
440 |
+
"""
|
441 |
+
return self._shape
|
442 |
+
|
443 |
+
def _validate_matrix(self, A, name):
|
444 |
+
A = np.atleast_2d(A)
|
445 |
+
m, n = A.shape[-2:]
|
446 |
+
if m != n or A.ndim != 2 or not (np.issubdtype(A.dtype, np.integer) or
|
447 |
+
np.issubdtype(A.dtype, np.floating)):
|
448 |
+
message = (f"The input `{name}` must be a square, "
|
449 |
+
"two-dimensional array of real numbers.")
|
450 |
+
raise ValueError(message)
|
451 |
+
return A
|
452 |
+
|
453 |
+
def _validate_vector(self, A, name):
|
454 |
+
A = np.atleast_1d(A)
|
455 |
+
if A.ndim != 1 or not (np.issubdtype(A.dtype, np.integer) or
|
456 |
+
np.issubdtype(A.dtype, np.floating)):
|
457 |
+
message = (f"The input `{name}` must be a one-dimensional array "
|
458 |
+
"of real numbers.")
|
459 |
+
raise ValueError(message)
|
460 |
+
return A
|
461 |
+
|
462 |
+
|
463 |
+
class CovViaPrecision(Covariance):
|
464 |
+
|
465 |
+
def __init__(self, precision, covariance=None):
|
466 |
+
precision = self._validate_matrix(precision, 'precision')
|
467 |
+
if covariance is not None:
|
468 |
+
covariance = self._validate_matrix(covariance, 'covariance')
|
469 |
+
message = "`precision.shape` must equal `covariance.shape`."
|
470 |
+
if precision.shape != covariance.shape:
|
471 |
+
raise ValueError(message)
|
472 |
+
|
473 |
+
self._chol_P = np.linalg.cholesky(precision)
|
474 |
+
self._log_pdet = -2*np.log(np.diag(self._chol_P)).sum(axis=-1)
|
475 |
+
self._rank = precision.shape[-1] # must be full rank if invertible
|
476 |
+
self._precision = precision
|
477 |
+
self._cov_matrix = covariance
|
478 |
+
self._shape = precision.shape
|
479 |
+
self._allow_singular = False
|
480 |
+
|
481 |
+
def _whiten(self, x):
|
482 |
+
return x @ self._chol_P
|
483 |
+
|
484 |
+
@cached_property
|
485 |
+
def _covariance(self):
|
486 |
+
n = self._shape[-1]
|
487 |
+
return (linalg.cho_solve((self._chol_P, True), np.eye(n))
|
488 |
+
if self._cov_matrix is None else self._cov_matrix)
|
489 |
+
|
490 |
+
def _colorize(self, x):
|
491 |
+
return linalg.solve_triangular(self._chol_P.T, x.T, lower=False).T
|
492 |
+
|
493 |
+
|
494 |
+
def _dot_diag(x, d):
|
495 |
+
# If d were a full diagonal matrix, x @ d would always do what we want.
|
496 |
+
# Special treatment is needed for n-dimensional `d` in which each row
|
497 |
+
# includes only the diagonal elements of a covariance matrix.
|
498 |
+
return x * d if x.ndim < 2 else x * np.expand_dims(d, -2)
|
499 |
+
|
500 |
+
|
501 |
+
class CovViaDiagonal(Covariance):
|
502 |
+
|
503 |
+
def __init__(self, diagonal):
|
504 |
+
diagonal = self._validate_vector(diagonal, 'diagonal')
|
505 |
+
|
506 |
+
i_zero = diagonal <= 0
|
507 |
+
positive_diagonal = np.array(diagonal, dtype=np.float64)
|
508 |
+
|
509 |
+
positive_diagonal[i_zero] = 1 # ones don't affect determinant
|
510 |
+
self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1)
|
511 |
+
|
512 |
+
psuedo_reciprocals = 1 / np.sqrt(positive_diagonal)
|
513 |
+
psuedo_reciprocals[i_zero] = 0
|
514 |
+
|
515 |
+
self._sqrt_diagonal = np.sqrt(diagonal)
|
516 |
+
self._LP = psuedo_reciprocals
|
517 |
+
self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1)
|
518 |
+
self._covariance = np.apply_along_axis(np.diag, -1, diagonal)
|
519 |
+
self._i_zero = i_zero
|
520 |
+
self._shape = self._covariance.shape
|
521 |
+
self._allow_singular = True
|
522 |
+
|
523 |
+
def _whiten(self, x):
|
524 |
+
return _dot_diag(x, self._LP)
|
525 |
+
|
526 |
+
def _colorize(self, x):
|
527 |
+
return _dot_diag(x, self._sqrt_diagonal)
|
528 |
+
|
529 |
+
def _support_mask(self, x):
|
530 |
+
"""
|
531 |
+
Check whether x lies in the support of the distribution.
|
532 |
+
"""
|
533 |
+
return ~np.any(_dot_diag(x, self._i_zero), axis=-1)
|
534 |
+
|
535 |
+
|
536 |
+
class CovViaCholesky(Covariance):
|
537 |
+
|
538 |
+
def __init__(self, cholesky):
|
539 |
+
L = self._validate_matrix(cholesky, 'cholesky')
|
540 |
+
|
541 |
+
self._factor = L
|
542 |
+
self._log_pdet = 2*np.log(np.diag(self._factor)).sum(axis=-1)
|
543 |
+
self._rank = L.shape[-1] # must be full rank for cholesky
|
544 |
+
self._shape = L.shape
|
545 |
+
self._allow_singular = False
|
546 |
+
|
547 |
+
@cached_property
|
548 |
+
def _covariance(self):
|
549 |
+
return self._factor @ self._factor.T
|
550 |
+
|
551 |
+
def _whiten(self, x):
|
552 |
+
res = linalg.solve_triangular(self._factor, x.T, lower=True).T
|
553 |
+
return res
|
554 |
+
|
555 |
+
def _colorize(self, x):
|
556 |
+
return x @ self._factor.T
|
557 |
+
|
558 |
+
|
559 |
+
class CovViaEigendecomposition(Covariance):
|
560 |
+
|
561 |
+
def __init__(self, eigendecomposition):
|
562 |
+
eigenvalues, eigenvectors = eigendecomposition
|
563 |
+
eigenvalues = self._validate_vector(eigenvalues, 'eigenvalues')
|
564 |
+
eigenvectors = self._validate_matrix(eigenvectors, 'eigenvectors')
|
565 |
+
message = ("The shapes of `eigenvalues` and `eigenvectors` "
|
566 |
+
"must be compatible.")
|
567 |
+
try:
|
568 |
+
eigenvalues = np.expand_dims(eigenvalues, -2)
|
569 |
+
eigenvectors, eigenvalues = np.broadcast_arrays(eigenvectors,
|
570 |
+
eigenvalues)
|
571 |
+
eigenvalues = eigenvalues[..., 0, :]
|
572 |
+
except ValueError:
|
573 |
+
raise ValueError(message)
|
574 |
+
|
575 |
+
i_zero = eigenvalues <= 0
|
576 |
+
positive_eigenvalues = np.array(eigenvalues, dtype=np.float64)
|
577 |
+
|
578 |
+
positive_eigenvalues[i_zero] = 1 # ones don't affect determinant
|
579 |
+
self._log_pdet = np.sum(np.log(positive_eigenvalues), axis=-1)
|
580 |
+
|
581 |
+
psuedo_reciprocals = 1 / np.sqrt(positive_eigenvalues)
|
582 |
+
psuedo_reciprocals[i_zero] = 0
|
583 |
+
|
584 |
+
self._LP = eigenvectors * psuedo_reciprocals
|
585 |
+
self._LA = eigenvectors * np.sqrt(eigenvalues)
|
586 |
+
self._rank = positive_eigenvalues.shape[-1] - i_zero.sum(axis=-1)
|
587 |
+
self._w = eigenvalues
|
588 |
+
self._v = eigenvectors
|
589 |
+
self._shape = eigenvectors.shape
|
590 |
+
self._null_basis = eigenvectors * i_zero
|
591 |
+
# This is only used for `_support_mask`, not to decide whether
|
592 |
+
# the covariance is singular or not.
|
593 |
+
self._eps = _multivariate._eigvalsh_to_eps(eigenvalues) * 10**3
|
594 |
+
self._allow_singular = True
|
595 |
+
|
596 |
+
def _whiten(self, x):
|
597 |
+
return x @ self._LP
|
598 |
+
|
599 |
+
def _colorize(self, x):
|
600 |
+
return x @ self._LA.T
|
601 |
+
|
602 |
+
@cached_property
|
603 |
+
def _covariance(self):
|
604 |
+
return (self._v * self._w) @ self._v.T
|
605 |
+
|
606 |
+
def _support_mask(self, x):
|
607 |
+
"""
|
608 |
+
Check whether x lies in the support of the distribution.
|
609 |
+
"""
|
610 |
+
residual = np.linalg.norm(x @ self._null_basis, axis=-1)
|
611 |
+
in_support = residual < self._eps
|
612 |
+
return in_support
|
613 |
+
|
614 |
+
|
615 |
+
class CovViaPSD(Covariance):
|
616 |
+
"""
|
617 |
+
Representation of a covariance provided via an instance of _PSD
|
618 |
+
"""
|
619 |
+
|
620 |
+
def __init__(self, psd):
|
621 |
+
self._LP = psd.U
|
622 |
+
self._log_pdet = psd.log_pdet
|
623 |
+
self._rank = psd.rank
|
624 |
+
self._covariance = psd._M
|
625 |
+
self._shape = psd._M.shape
|
626 |
+
self._psd = psd
|
627 |
+
self._allow_singular = False # by default
|
628 |
+
|
629 |
+
def _whiten(self, x):
|
630 |
+
return x @ self._LP
|
631 |
+
|
632 |
+
def _support_mask(self, x):
|
633 |
+
return self._psd._support_mask(x)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_crosstab.py
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.sparse import coo_matrix
|
3 |
+
from scipy._lib._bunch import _make_tuple_bunch
|
4 |
+
|
5 |
+
|
6 |
+
CrosstabResult = _make_tuple_bunch(
|
7 |
+
"CrosstabResult", ["elements", "count"]
|
8 |
+
)
|
9 |
+
|
10 |
+
|
11 |
+
def crosstab(*args, levels=None, sparse=False):
|
12 |
+
"""
|
13 |
+
Return table of counts for each possible unique combination in ``*args``.
|
14 |
+
|
15 |
+
When ``len(args) > 1``, the array computed by this function is
|
16 |
+
often referred to as a *contingency table* [1]_.
|
17 |
+
|
18 |
+
The arguments must be sequences with the same length. The second return
|
19 |
+
value, `count`, is an integer array with ``len(args)`` dimensions. If
|
20 |
+
`levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``
|
21 |
+
is the number of unique elements in ``args[k]``.
|
22 |
+
|
23 |
+
Parameters
|
24 |
+
----------
|
25 |
+
*args : sequences
|
26 |
+
A sequence of sequences whose unique aligned elements are to be
|
27 |
+
counted. The sequences in args must all be the same length.
|
28 |
+
levels : sequence, optional
|
29 |
+
If `levels` is given, it must be a sequence that is the same length as
|
30 |
+
`args`. Each element in `levels` is either a sequence or None. If it
|
31 |
+
is a sequence, it gives the values in the corresponding sequence in
|
32 |
+
`args` that are to be counted. If any value in the sequences in `args`
|
33 |
+
does not occur in the corresponding sequence in `levels`, that value
|
34 |
+
is ignored and not counted in the returned array `count`. The default
|
35 |
+
value of `levels` for ``args[i]`` is ``np.unique(args[i])``
|
36 |
+
sparse : bool, optional
|
37 |
+
If True, return a sparse matrix. The matrix will be an instance of
|
38 |
+
the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices
|
39 |
+
must be 2-d, only two input sequences are allowed when `sparse` is
|
40 |
+
True. Default is False.
|
41 |
+
|
42 |
+
Returns
|
43 |
+
-------
|
44 |
+
res : CrosstabResult
|
45 |
+
An object containing the following attributes:
|
46 |
+
|
47 |
+
elements : tuple of numpy.ndarrays.
|
48 |
+
Tuple of length ``len(args)`` containing the arrays of elements
|
49 |
+
that are counted in `count`. These can be interpreted as the
|
50 |
+
labels of the corresponding dimensions of `count`. If `levels` was
|
51 |
+
given, then if ``levels[i]`` is not None, ``elements[i]`` will
|
52 |
+
hold the values given in ``levels[i]``.
|
53 |
+
count : numpy.ndarray or scipy.sparse.coo_matrix
|
54 |
+
Counts of the unique elements in ``zip(*args)``, stored in an
|
55 |
+
array. Also known as a *contingency table* when ``len(args) > 1``.
|
56 |
+
|
57 |
+
See Also
|
58 |
+
--------
|
59 |
+
numpy.unique
|
60 |
+
|
61 |
+
Notes
|
62 |
+
-----
|
63 |
+
.. versionadded:: 1.7.0
|
64 |
+
|
65 |
+
References
|
66 |
+
----------
|
67 |
+
.. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table
|
68 |
+
|
69 |
+
Examples
|
70 |
+
--------
|
71 |
+
>>> from scipy.stats.contingency import crosstab
|
72 |
+
|
73 |
+
Given the lists `a` and `x`, create a contingency table that counts the
|
74 |
+
frequencies of the corresponding pairs.
|
75 |
+
|
76 |
+
>>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
|
77 |
+
>>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
|
78 |
+
>>> res = crosstab(a, x)
|
79 |
+
>>> avals, xvals = res.elements
|
80 |
+
>>> avals
|
81 |
+
array(['A', 'B'], dtype='<U1')
|
82 |
+
>>> xvals
|
83 |
+
array(['X', 'Y', 'Z'], dtype='<U1')
|
84 |
+
>>> res.count
|
85 |
+
array([[2, 3, 0],
|
86 |
+
[1, 0, 4]])
|
87 |
+
|
88 |
+
So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.
|
89 |
+
|
90 |
+
Higher dimensional contingency tables can be created.
|
91 |
+
|
92 |
+
>>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]
|
93 |
+
>>> res = crosstab(a, x, p)
|
94 |
+
>>> res.count
|
95 |
+
array([[[2, 0],
|
96 |
+
[2, 1],
|
97 |
+
[0, 0]],
|
98 |
+
[[1, 0],
|
99 |
+
[0, 0],
|
100 |
+
[1, 3]]])
|
101 |
+
>>> res.count.shape
|
102 |
+
(2, 3, 2)
|
103 |
+
|
104 |
+
The values to be counted can be set by using the `levels` argument.
|
105 |
+
It allows the elements of interest in each input sequence to be
|
106 |
+
given explicitly instead finding the unique elements of the sequence.
|
107 |
+
|
108 |
+
For example, suppose one of the arguments is an array containing the
|
109 |
+
answers to a survey question, with integer values 1 to 4. Even if the
|
110 |
+
value 1 does not occur in the data, we want an entry for it in the table.
|
111 |
+
|
112 |
+
>>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur.
|
113 |
+
>>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur.
|
114 |
+
>>> options = [1, 2, 3, 4]
|
115 |
+
>>> res = crosstab(q1, q2, levels=(options, options))
|
116 |
+
>>> res.count
|
117 |
+
array([[0, 0, 0, 0],
|
118 |
+
[1, 1, 0, 1],
|
119 |
+
[1, 4, 0, 1],
|
120 |
+
[0, 3, 0, 3]])
|
121 |
+
|
122 |
+
If `levels` is given, but an element of `levels` is None, the unique values
|
123 |
+
of the corresponding argument are used. For example,
|
124 |
+
|
125 |
+
>>> res = crosstab(q1, q2, levels=(None, options))
|
126 |
+
>>> res.elements
|
127 |
+
[array([2, 3, 4]), [1, 2, 3, 4]]
|
128 |
+
>>> res.count
|
129 |
+
array([[1, 1, 0, 1],
|
130 |
+
[1, 4, 0, 1],
|
131 |
+
[0, 3, 0, 3]])
|
132 |
+
|
133 |
+
If we want to ignore the pairs where 4 occurs in ``q2``, we can
|
134 |
+
give just the values [1, 2] to `levels`, and the 4 will be ignored:
|
135 |
+
|
136 |
+
>>> res = crosstab(q1, q2, levels=(None, [1, 2]))
|
137 |
+
>>> res.elements
|
138 |
+
[array([2, 3, 4]), [1, 2]]
|
139 |
+
>>> res.count
|
140 |
+
array([[1, 1],
|
141 |
+
[1, 4],
|
142 |
+
[0, 3]])
|
143 |
+
|
144 |
+
Finally, let's repeat the first example, but return a sparse matrix:
|
145 |
+
|
146 |
+
>>> res = crosstab(a, x, sparse=True)
|
147 |
+
>>> res.count
|
148 |
+
<2x3 sparse matrix of type '<class 'numpy.int64'>'
|
149 |
+
with 4 stored elements in COOrdinate format>
|
150 |
+
>>> res.count.A
|
151 |
+
array([[2, 3, 0],
|
152 |
+
[1, 0, 4]])
|
153 |
+
|
154 |
+
"""
|
155 |
+
nargs = len(args)
|
156 |
+
if nargs == 0:
|
157 |
+
raise TypeError("At least one input sequence is required.")
|
158 |
+
|
159 |
+
len0 = len(args[0])
|
160 |
+
if not all(len(a) == len0 for a in args[1:]):
|
161 |
+
raise ValueError("All input sequences must have the same length.")
|
162 |
+
|
163 |
+
if sparse and nargs != 2:
|
164 |
+
raise ValueError("When `sparse` is True, only two input sequences "
|
165 |
+
"are allowed.")
|
166 |
+
|
167 |
+
if levels is None:
|
168 |
+
# Call np.unique with return_inverse=True on each argument.
|
169 |
+
actual_levels, indices = zip(*[np.unique(a, return_inverse=True)
|
170 |
+
for a in args])
|
171 |
+
else:
|
172 |
+
# `levels` is not None...
|
173 |
+
if len(levels) != nargs:
|
174 |
+
raise ValueError('len(levels) must equal the number of input '
|
175 |
+
'sequences')
|
176 |
+
|
177 |
+
args = [np.asarray(arg) for arg in args]
|
178 |
+
mask = np.zeros((nargs, len0), dtype=np.bool_)
|
179 |
+
inv = np.zeros((nargs, len0), dtype=np.intp)
|
180 |
+
actual_levels = []
|
181 |
+
for k, (levels_list, arg) in enumerate(zip(levels, args)):
|
182 |
+
if levels_list is None:
|
183 |
+
levels_list, inv[k, :] = np.unique(arg, return_inverse=True)
|
184 |
+
mask[k, :] = True
|
185 |
+
else:
|
186 |
+
q = arg == np.asarray(levels_list).reshape(-1, 1)
|
187 |
+
mask[k, :] = np.any(q, axis=0)
|
188 |
+
qnz = q.T.nonzero()
|
189 |
+
inv[k, qnz[0]] = qnz[1]
|
190 |
+
actual_levels.append(levels_list)
|
191 |
+
|
192 |
+
mask_all = mask.all(axis=0)
|
193 |
+
indices = tuple(inv[:, mask_all])
|
194 |
+
|
195 |
+
if sparse:
|
196 |
+
count = coo_matrix((np.ones(len(indices[0]), dtype=int),
|
197 |
+
(indices[0], indices[1])))
|
198 |
+
count.sum_duplicates()
|
199 |
+
else:
|
200 |
+
shape = [len(u) for u in actual_levels]
|
201 |
+
count = np.zeros(shape, dtype=int)
|
202 |
+
np.add.at(count, indices, 1)
|
203 |
+
|
204 |
+
return CrosstabResult(actual_levels, count)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_discrete_distns.py
ADDED
@@ -0,0 +1,1952 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Author: Travis Oliphant 2002-2011 with contributions from
|
3 |
+
# SciPy Developers 2004-2011
|
4 |
+
#
|
5 |
+
from functools import partial
|
6 |
+
|
7 |
+
from scipy import special
|
8 |
+
from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta
|
9 |
+
from scipy._lib._util import _lazywhere, rng_integers
|
10 |
+
from scipy.interpolate import interp1d
|
11 |
+
|
12 |
+
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from ._distn_infrastructure import (rv_discrete, get_distribution_names,
|
17 |
+
_check_shape, _ShapeInfo)
|
18 |
+
import scipy.stats._boost as _boost
|
19 |
+
from ._biasedurn import (_PyFishersNCHypergeometric,
|
20 |
+
_PyWalleniusNCHypergeometric,
|
21 |
+
_PyStochasticLib3)
|
22 |
+
|
23 |
+
|
24 |
+
def _isintegral(x):
|
25 |
+
return x == np.round(x)
|
26 |
+
|
27 |
+
|
28 |
+
class binom_gen(rv_discrete):
|
29 |
+
r"""A binomial discrete random variable.
|
30 |
+
|
31 |
+
%(before_notes)s
|
32 |
+
|
33 |
+
Notes
|
34 |
+
-----
|
35 |
+
The probability mass function for `binom` is:
|
36 |
+
|
37 |
+
.. math::
|
38 |
+
|
39 |
+
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
|
40 |
+
|
41 |
+
for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
|
42 |
+
|
43 |
+
`binom` takes :math:`n` and :math:`p` as shape parameters,
|
44 |
+
where :math:`p` is the probability of a single success
|
45 |
+
and :math:`1-p` is the probability of a single failure.
|
46 |
+
|
47 |
+
%(after_notes)s
|
48 |
+
|
49 |
+
%(example)s
|
50 |
+
|
51 |
+
See Also
|
52 |
+
--------
|
53 |
+
hypergeom, nbinom, nhypergeom
|
54 |
+
|
55 |
+
"""
|
56 |
+
def _shape_info(self):
|
57 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
58 |
+
_ShapeInfo("p", False, (0, 1), (True, True))]
|
59 |
+
|
60 |
+
def _rvs(self, n, p, size=None, random_state=None):
|
61 |
+
return random_state.binomial(n, p, size)
|
62 |
+
|
63 |
+
def _argcheck(self, n, p):
|
64 |
+
return (n >= 0) & _isintegral(n) & (p >= 0) & (p <= 1)
|
65 |
+
|
66 |
+
def _get_support(self, n, p):
|
67 |
+
return self.a, n
|
68 |
+
|
69 |
+
def _logpmf(self, x, n, p):
|
70 |
+
k = floor(x)
|
71 |
+
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
|
72 |
+
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
|
73 |
+
|
74 |
+
def _pmf(self, x, n, p):
|
75 |
+
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
|
76 |
+
return _boost._binom_pdf(x, n, p)
|
77 |
+
|
78 |
+
def _cdf(self, x, n, p):
|
79 |
+
k = floor(x)
|
80 |
+
return _boost._binom_cdf(k, n, p)
|
81 |
+
|
82 |
+
def _sf(self, x, n, p):
|
83 |
+
k = floor(x)
|
84 |
+
return _boost._binom_sf(k, n, p)
|
85 |
+
|
86 |
+
def _isf(self, x, n, p):
|
87 |
+
return _boost._binom_isf(x, n, p)
|
88 |
+
|
89 |
+
def _ppf(self, q, n, p):
|
90 |
+
return _boost._binom_ppf(q, n, p)
|
91 |
+
|
92 |
+
def _stats(self, n, p, moments='mv'):
|
93 |
+
mu = _boost._binom_mean(n, p)
|
94 |
+
var = _boost._binom_variance(n, p)
|
95 |
+
g1, g2 = None, None
|
96 |
+
if 's' in moments:
|
97 |
+
g1 = _boost._binom_skewness(n, p)
|
98 |
+
if 'k' in moments:
|
99 |
+
g2 = _boost._binom_kurtosis_excess(n, p)
|
100 |
+
return mu, var, g1, g2
|
101 |
+
|
102 |
+
def _entropy(self, n, p):
|
103 |
+
k = np.r_[0:n + 1]
|
104 |
+
vals = self._pmf(k, n, p)
|
105 |
+
return np.sum(entr(vals), axis=0)
|
106 |
+
|
107 |
+
|
108 |
+
binom = binom_gen(name='binom')
|
109 |
+
|
110 |
+
|
111 |
+
class bernoulli_gen(binom_gen):
|
112 |
+
r"""A Bernoulli discrete random variable.
|
113 |
+
|
114 |
+
%(before_notes)s
|
115 |
+
|
116 |
+
Notes
|
117 |
+
-----
|
118 |
+
The probability mass function for `bernoulli` is:
|
119 |
+
|
120 |
+
.. math::
|
121 |
+
|
122 |
+
f(k) = \begin{cases}1-p &\text{if } k = 0\\
|
123 |
+
p &\text{if } k = 1\end{cases}
|
124 |
+
|
125 |
+
for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1`
|
126 |
+
|
127 |
+
`bernoulli` takes :math:`p` as shape parameter,
|
128 |
+
where :math:`p` is the probability of a single success
|
129 |
+
and :math:`1-p` is the probability of a single failure.
|
130 |
+
|
131 |
+
%(after_notes)s
|
132 |
+
|
133 |
+
%(example)s
|
134 |
+
|
135 |
+
"""
|
136 |
+
def _shape_info(self):
|
137 |
+
return [_ShapeInfo("p", False, (0, 1), (True, True))]
|
138 |
+
|
139 |
+
def _rvs(self, p, size=None, random_state=None):
|
140 |
+
return binom_gen._rvs(self, 1, p, size=size, random_state=random_state)
|
141 |
+
|
142 |
+
def _argcheck(self, p):
|
143 |
+
return (p >= 0) & (p <= 1)
|
144 |
+
|
145 |
+
def _get_support(self, p):
|
146 |
+
# Overrides binom_gen._get_support!x
|
147 |
+
return self.a, self.b
|
148 |
+
|
149 |
+
def _logpmf(self, x, p):
|
150 |
+
return binom._logpmf(x, 1, p)
|
151 |
+
|
152 |
+
def _pmf(self, x, p):
|
153 |
+
# bernoulli.pmf(k) = 1-p if k = 0
|
154 |
+
# = p if k = 1
|
155 |
+
return binom._pmf(x, 1, p)
|
156 |
+
|
157 |
+
def _cdf(self, x, p):
|
158 |
+
return binom._cdf(x, 1, p)
|
159 |
+
|
160 |
+
def _sf(self, x, p):
|
161 |
+
return binom._sf(x, 1, p)
|
162 |
+
|
163 |
+
def _isf(self, x, p):
|
164 |
+
return binom._isf(x, 1, p)
|
165 |
+
|
166 |
+
def _ppf(self, q, p):
|
167 |
+
return binom._ppf(q, 1, p)
|
168 |
+
|
169 |
+
def _stats(self, p):
|
170 |
+
return binom._stats(1, p)
|
171 |
+
|
172 |
+
def _entropy(self, p):
|
173 |
+
return entr(p) + entr(1-p)
|
174 |
+
|
175 |
+
|
176 |
+
bernoulli = bernoulli_gen(b=1, name='bernoulli')
|
177 |
+
|
178 |
+
|
179 |
+
class betabinom_gen(rv_discrete):
|
180 |
+
r"""A beta-binomial discrete random variable.
|
181 |
+
|
182 |
+
%(before_notes)s
|
183 |
+
|
184 |
+
Notes
|
185 |
+
-----
|
186 |
+
The beta-binomial distribution is a binomial distribution with a
|
187 |
+
probability of success `p` that follows a beta distribution.
|
188 |
+
|
189 |
+
The probability mass function for `betabinom` is:
|
190 |
+
|
191 |
+
.. math::
|
192 |
+
|
193 |
+
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
|
194 |
+
|
195 |
+
for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
|
196 |
+
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
|
197 |
+
|
198 |
+
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
|
199 |
+
|
200 |
+
References
|
201 |
+
----------
|
202 |
+
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
|
203 |
+
|
204 |
+
%(after_notes)s
|
205 |
+
|
206 |
+
.. versionadded:: 1.4.0
|
207 |
+
|
208 |
+
See Also
|
209 |
+
--------
|
210 |
+
beta, binom
|
211 |
+
|
212 |
+
%(example)s
|
213 |
+
|
214 |
+
"""
|
215 |
+
def _shape_info(self):
|
216 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
217 |
+
_ShapeInfo("a", False, (0, np.inf), (False, False)),
|
218 |
+
_ShapeInfo("b", False, (0, np.inf), (False, False))]
|
219 |
+
|
220 |
+
def _rvs(self, n, a, b, size=None, random_state=None):
|
221 |
+
p = random_state.beta(a, b, size)
|
222 |
+
return random_state.binomial(n, p, size)
|
223 |
+
|
224 |
+
def _get_support(self, n, a, b):
|
225 |
+
return 0, n
|
226 |
+
|
227 |
+
def _argcheck(self, n, a, b):
|
228 |
+
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
|
229 |
+
|
230 |
+
def _logpmf(self, x, n, a, b):
|
231 |
+
k = floor(x)
|
232 |
+
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
|
233 |
+
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
|
234 |
+
|
235 |
+
def _pmf(self, x, n, a, b):
|
236 |
+
return exp(self._logpmf(x, n, a, b))
|
237 |
+
|
238 |
+
def _stats(self, n, a, b, moments='mv'):
|
239 |
+
e_p = a / (a + b)
|
240 |
+
e_q = 1 - e_p
|
241 |
+
mu = n * e_p
|
242 |
+
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
|
243 |
+
g1, g2 = None, None
|
244 |
+
if 's' in moments:
|
245 |
+
g1 = 1.0 / sqrt(var)
|
246 |
+
g1 *= (a + b + 2 * n) * (b - a)
|
247 |
+
g1 /= (a + b + 2) * (a + b)
|
248 |
+
if 'k' in moments:
|
249 |
+
g2 = (a + b).astype(e_p.dtype)
|
250 |
+
g2 *= (a + b - 1 + 6 * n)
|
251 |
+
g2 += 3 * a * b * (n - 2)
|
252 |
+
g2 += 6 * n ** 2
|
253 |
+
g2 -= 3 * e_p * b * n * (6 - n)
|
254 |
+
g2 -= 18 * e_p * e_q * n ** 2
|
255 |
+
g2 *= (a + b) ** 2 * (1 + a + b)
|
256 |
+
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
|
257 |
+
g2 -= 3
|
258 |
+
return mu, var, g1, g2
|
259 |
+
|
260 |
+
|
261 |
+
betabinom = betabinom_gen(name='betabinom')
|
262 |
+
|
263 |
+
|
264 |
+
class nbinom_gen(rv_discrete):
|
265 |
+
r"""A negative binomial discrete random variable.
|
266 |
+
|
267 |
+
%(before_notes)s
|
268 |
+
|
269 |
+
Notes
|
270 |
+
-----
|
271 |
+
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
|
272 |
+
trials, repeated until a predefined, non-random number of successes occurs.
|
273 |
+
|
274 |
+
The probability mass function of the number of failures for `nbinom` is:
|
275 |
+
|
276 |
+
.. math::
|
277 |
+
|
278 |
+
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
|
279 |
+
|
280 |
+
for :math:`k \ge 0`, :math:`0 < p \leq 1`
|
281 |
+
|
282 |
+
`nbinom` takes :math:`n` and :math:`p` as shape parameters where :math:`n`
|
283 |
+
is the number of successes, :math:`p` is the probability of a single
|
284 |
+
success, and :math:`1-p` is the probability of a single failure.
|
285 |
+
|
286 |
+
Another common parameterization of the negative binomial distribution is
|
287 |
+
in terms of the mean number of failures :math:`\mu` to achieve :math:`n`
|
288 |
+
successes. The mean :math:`\mu` is related to the probability of success
|
289 |
+
as
|
290 |
+
|
291 |
+
.. math::
|
292 |
+
|
293 |
+
p = \frac{n}{n + \mu}
|
294 |
+
|
295 |
+
The number of successes :math:`n` may also be specified in terms of a
|
296 |
+
"dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`,
|
297 |
+
which relates the mean :math:`\mu` to the variance :math:`\sigma^2`,
|
298 |
+
e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention
|
299 |
+
used for :math:`\alpha`,
|
300 |
+
|
301 |
+
.. math::
|
302 |
+
|
303 |
+
p &= \frac{\mu}{\sigma^2} \\
|
304 |
+
n &= \frac{\mu^2}{\sigma^2 - \mu}
|
305 |
+
|
306 |
+
%(after_notes)s
|
307 |
+
|
308 |
+
%(example)s
|
309 |
+
|
310 |
+
See Also
|
311 |
+
--------
|
312 |
+
hypergeom, binom, nhypergeom
|
313 |
+
|
314 |
+
"""
|
315 |
+
def _shape_info(self):
|
316 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
317 |
+
_ShapeInfo("p", False, (0, 1), (True, True))]
|
318 |
+
|
319 |
+
def _rvs(self, n, p, size=None, random_state=None):
|
320 |
+
return random_state.negative_binomial(n, p, size)
|
321 |
+
|
322 |
+
def _argcheck(self, n, p):
|
323 |
+
return (n > 0) & (p > 0) & (p <= 1)
|
324 |
+
|
325 |
+
def _pmf(self, x, n, p):
|
326 |
+
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
|
327 |
+
return _boost._nbinom_pdf(x, n, p)
|
328 |
+
|
329 |
+
def _logpmf(self, x, n, p):
|
330 |
+
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
|
331 |
+
return coeff + n*log(p) + special.xlog1py(x, -p)
|
332 |
+
|
333 |
+
def _cdf(self, x, n, p):
|
334 |
+
k = floor(x)
|
335 |
+
return _boost._nbinom_cdf(k, n, p)
|
336 |
+
|
337 |
+
def _logcdf(self, x, n, p):
|
338 |
+
k = floor(x)
|
339 |
+
k, n, p = np.broadcast_arrays(k, n, p)
|
340 |
+
cdf = self._cdf(k, n, p)
|
341 |
+
cond = cdf > 0.5
|
342 |
+
def f1(k, n, p):
|
343 |
+
return np.log1p(-special.betainc(k + 1, n, 1 - p))
|
344 |
+
|
345 |
+
# do calc in place
|
346 |
+
logcdf = cdf
|
347 |
+
with np.errstate(divide='ignore'):
|
348 |
+
logcdf[cond] = f1(k[cond], n[cond], p[cond])
|
349 |
+
logcdf[~cond] = np.log(cdf[~cond])
|
350 |
+
return logcdf
|
351 |
+
|
352 |
+
def _sf(self, x, n, p):
|
353 |
+
k = floor(x)
|
354 |
+
return _boost._nbinom_sf(k, n, p)
|
355 |
+
|
356 |
+
def _isf(self, x, n, p):
|
357 |
+
with np.errstate(over='ignore'): # see gh-17432
|
358 |
+
return _boost._nbinom_isf(x, n, p)
|
359 |
+
|
360 |
+
def _ppf(self, q, n, p):
|
361 |
+
with np.errstate(over='ignore'): # see gh-17432
|
362 |
+
return _boost._nbinom_ppf(q, n, p)
|
363 |
+
|
364 |
+
def _stats(self, n, p):
|
365 |
+
return (
|
366 |
+
_boost._nbinom_mean(n, p),
|
367 |
+
_boost._nbinom_variance(n, p),
|
368 |
+
_boost._nbinom_skewness(n, p),
|
369 |
+
_boost._nbinom_kurtosis_excess(n, p),
|
370 |
+
)
|
371 |
+
|
372 |
+
|
373 |
+
nbinom = nbinom_gen(name='nbinom')
|
374 |
+
|
375 |
+
|
376 |
+
class betanbinom_gen(rv_discrete):
|
377 |
+
r"""A beta-negative-binomial discrete random variable.
|
378 |
+
|
379 |
+
%(before_notes)s
|
380 |
+
|
381 |
+
Notes
|
382 |
+
-----
|
383 |
+
The beta-negative-binomial distribution is a negative binomial
|
384 |
+
distribution with a probability of success `p` that follows a
|
385 |
+
beta distribution.
|
386 |
+
|
387 |
+
The probability mass function for `betanbinom` is:
|
388 |
+
|
389 |
+
.. math::
|
390 |
+
|
391 |
+
f(k) = \binom{n + k - 1}{k} \frac{B(a + n, b + k)}{B(a, b)}
|
392 |
+
|
393 |
+
for :math:`k \ge 0`, :math:`n \geq 0`, :math:`a > 0`,
|
394 |
+
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
|
395 |
+
|
396 |
+
`betanbinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
|
397 |
+
|
398 |
+
References
|
399 |
+
----------
|
400 |
+
.. [1] https://en.wikipedia.org/wiki/Beta_negative_binomial_distribution
|
401 |
+
|
402 |
+
%(after_notes)s
|
403 |
+
|
404 |
+
.. versionadded:: 1.12.0
|
405 |
+
|
406 |
+
See Also
|
407 |
+
--------
|
408 |
+
betabinom : Beta binomial distribution
|
409 |
+
|
410 |
+
%(example)s
|
411 |
+
|
412 |
+
"""
|
413 |
+
def _shape_info(self):
|
414 |
+
return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
415 |
+
_ShapeInfo("a", False, (0, np.inf), (False, False)),
|
416 |
+
_ShapeInfo("b", False, (0, np.inf), (False, False))]
|
417 |
+
|
418 |
+
def _rvs(self, n, a, b, size=None, random_state=None):
|
419 |
+
p = random_state.beta(a, b, size)
|
420 |
+
return random_state.negative_binomial(n, p, size)
|
421 |
+
|
422 |
+
def _argcheck(self, n, a, b):
|
423 |
+
return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
|
424 |
+
|
425 |
+
def _logpmf(self, x, n, a, b):
|
426 |
+
k = floor(x)
|
427 |
+
combiln = -np.log(n + k) - betaln(n, k + 1)
|
428 |
+
return combiln + betaln(a + n, b + k) - betaln(a, b)
|
429 |
+
|
430 |
+
def _pmf(self, x, n, a, b):
|
431 |
+
return exp(self._logpmf(x, n, a, b))
|
432 |
+
|
433 |
+
def _stats(self, n, a, b, moments='mv'):
|
434 |
+
# reference: Wolfram Alpha input
|
435 |
+
# BetaNegativeBinomialDistribution[a, b, n]
|
436 |
+
def mean(n, a, b):
|
437 |
+
return n * b / (a - 1.)
|
438 |
+
mu = _lazywhere(a > 1, (n, a, b), f=mean, fillvalue=np.inf)
|
439 |
+
def var(n, a, b):
|
440 |
+
return (n * b * (n + a - 1.) * (a + b - 1.)
|
441 |
+
/ ((a - 2.) * (a - 1.)**2.))
|
442 |
+
var = _lazywhere(a > 2, (n, a, b), f=var, fillvalue=np.inf)
|
443 |
+
g1, g2 = None, None
|
444 |
+
def skew(n, a, b):
|
445 |
+
return ((2 * n + a - 1.) * (2 * b + a - 1.)
|
446 |
+
/ (a - 3.) / sqrt(n * b * (n + a - 1.) * (b + a - 1.)
|
447 |
+
/ (a - 2.)))
|
448 |
+
if 's' in moments:
|
449 |
+
g1 = _lazywhere(a > 3, (n, a, b), f=skew, fillvalue=np.inf)
|
450 |
+
def kurtosis(n, a, b):
|
451 |
+
term = (a - 2.)
|
452 |
+
term_2 = ((a - 1.)**2. * (a**2. + a * (6 * b - 1.)
|
453 |
+
+ 6. * (b - 1.) * b)
|
454 |
+
+ 3. * n**2. * ((a + 5.) * b**2. + (a + 5.)
|
455 |
+
* (a - 1.) * b + 2. * (a - 1.)**2)
|
456 |
+
+ 3 * (a - 1.) * n
|
457 |
+
* ((a + 5.) * b**2. + (a + 5.) * (a - 1.) * b
|
458 |
+
+ 2. * (a - 1.)**2.))
|
459 |
+
denominator = ((a - 4.) * (a - 3.) * b * n
|
460 |
+
* (a + b - 1.) * (a + n - 1.))
|
461 |
+
# Wolfram Alpha uses Pearson kurtosis, so we substract 3 to get
|
462 |
+
# scipy's Fisher kurtosis
|
463 |
+
return term * term_2 / denominator - 3.
|
464 |
+
if 'k' in moments:
|
465 |
+
g2 = _lazywhere(a > 4, (n, a, b), f=kurtosis, fillvalue=np.inf)
|
466 |
+
return mu, var, g1, g2
|
467 |
+
|
468 |
+
|
469 |
+
betanbinom = betanbinom_gen(name='betanbinom')
|
470 |
+
|
471 |
+
|
472 |
+
class geom_gen(rv_discrete):
|
473 |
+
r"""A geometric discrete random variable.
|
474 |
+
|
475 |
+
%(before_notes)s
|
476 |
+
|
477 |
+
Notes
|
478 |
+
-----
|
479 |
+
The probability mass function for `geom` is:
|
480 |
+
|
481 |
+
.. math::
|
482 |
+
|
483 |
+
f(k) = (1-p)^{k-1} p
|
484 |
+
|
485 |
+
for :math:`k \ge 1`, :math:`0 < p \leq 1`
|
486 |
+
|
487 |
+
`geom` takes :math:`p` as shape parameter,
|
488 |
+
where :math:`p` is the probability of a single success
|
489 |
+
and :math:`1-p` is the probability of a single failure.
|
490 |
+
|
491 |
+
%(after_notes)s
|
492 |
+
|
493 |
+
See Also
|
494 |
+
--------
|
495 |
+
planck
|
496 |
+
|
497 |
+
%(example)s
|
498 |
+
|
499 |
+
"""
|
500 |
+
|
501 |
+
def _shape_info(self):
|
502 |
+
return [_ShapeInfo("p", False, (0, 1), (True, True))]
|
503 |
+
|
504 |
+
def _rvs(self, p, size=None, random_state=None):
|
505 |
+
return random_state.geometric(p, size=size)
|
506 |
+
|
507 |
+
def _argcheck(self, p):
|
508 |
+
return (p <= 1) & (p > 0)
|
509 |
+
|
510 |
+
def _pmf(self, k, p):
|
511 |
+
return np.power(1-p, k-1) * p
|
512 |
+
|
513 |
+
def _logpmf(self, k, p):
|
514 |
+
return special.xlog1py(k - 1, -p) + log(p)
|
515 |
+
|
516 |
+
def _cdf(self, x, p):
|
517 |
+
k = floor(x)
|
518 |
+
return -expm1(log1p(-p)*k)
|
519 |
+
|
520 |
+
def _sf(self, x, p):
|
521 |
+
return np.exp(self._logsf(x, p))
|
522 |
+
|
523 |
+
def _logsf(self, x, p):
|
524 |
+
k = floor(x)
|
525 |
+
return k*log1p(-p)
|
526 |
+
|
527 |
+
def _ppf(self, q, p):
|
528 |
+
vals = ceil(log1p(-q) / log1p(-p))
|
529 |
+
temp = self._cdf(vals-1, p)
|
530 |
+
return np.where((temp >= q) & (vals > 0), vals-1, vals)
|
531 |
+
|
532 |
+
def _stats(self, p):
|
533 |
+
mu = 1.0/p
|
534 |
+
qr = 1.0-p
|
535 |
+
var = qr / p / p
|
536 |
+
g1 = (2.0-p) / sqrt(qr)
|
537 |
+
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
|
538 |
+
return mu, var, g1, g2
|
539 |
+
|
540 |
+
def _entropy(self, p):
|
541 |
+
return -np.log(p) - np.log1p(-p) * (1.0-p) / p
|
542 |
+
|
543 |
+
|
544 |
+
geom = geom_gen(a=1, name='geom', longname="A geometric")
|
545 |
+
|
546 |
+
|
547 |
+
class hypergeom_gen(rv_discrete):
|
548 |
+
r"""A hypergeometric discrete random variable.
|
549 |
+
|
550 |
+
The hypergeometric distribution models drawing objects from a bin.
|
551 |
+
`M` is the total number of objects, `n` is total number of Type I objects.
|
552 |
+
The random variate represents the number of Type I objects in `N` drawn
|
553 |
+
without replacement from the total population.
|
554 |
+
|
555 |
+
%(before_notes)s
|
556 |
+
|
557 |
+
Notes
|
558 |
+
-----
|
559 |
+
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
|
560 |
+
universally accepted. See the Examples for a clarification of the
|
561 |
+
definitions used here.
|
562 |
+
|
563 |
+
The probability mass function is defined as,
|
564 |
+
|
565 |
+
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
|
566 |
+
{\binom{M}{N}}
|
567 |
+
|
568 |
+
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
|
569 |
+
coefficients are defined as,
|
570 |
+
|
571 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
572 |
+
|
573 |
+
%(after_notes)s
|
574 |
+
|
575 |
+
Examples
|
576 |
+
--------
|
577 |
+
>>> import numpy as np
|
578 |
+
>>> from scipy.stats import hypergeom
|
579 |
+
>>> import matplotlib.pyplot as plt
|
580 |
+
|
581 |
+
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
|
582 |
+
we want to know the probability of finding a given number of dogs if we
|
583 |
+
choose at random 12 of the 20 animals, we can initialize a frozen
|
584 |
+
distribution and plot the probability mass function:
|
585 |
+
|
586 |
+
>>> [M, n, N] = [20, 7, 12]
|
587 |
+
>>> rv = hypergeom(M, n, N)
|
588 |
+
>>> x = np.arange(0, n+1)
|
589 |
+
>>> pmf_dogs = rv.pmf(x)
|
590 |
+
|
591 |
+
>>> fig = plt.figure()
|
592 |
+
>>> ax = fig.add_subplot(111)
|
593 |
+
>>> ax.plot(x, pmf_dogs, 'bo')
|
594 |
+
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
|
595 |
+
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
|
596 |
+
>>> ax.set_ylabel('hypergeom PMF')
|
597 |
+
>>> plt.show()
|
598 |
+
|
599 |
+
Instead of using a frozen distribution we can also use `hypergeom`
|
600 |
+
methods directly. To for example obtain the cumulative distribution
|
601 |
+
function, use:
|
602 |
+
|
603 |
+
>>> prb = hypergeom.cdf(x, M, n, N)
|
604 |
+
|
605 |
+
And to generate random numbers:
|
606 |
+
|
607 |
+
>>> R = hypergeom.rvs(M, n, N, size=10)
|
608 |
+
|
609 |
+
See Also
|
610 |
+
--------
|
611 |
+
nhypergeom, binom, nbinom
|
612 |
+
|
613 |
+
"""
|
614 |
+
def _shape_info(self):
|
615 |
+
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
|
616 |
+
_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
617 |
+
_ShapeInfo("N", True, (0, np.inf), (True, False))]
|
618 |
+
|
619 |
+
def _rvs(self, M, n, N, size=None, random_state=None):
|
620 |
+
return random_state.hypergeometric(n, M-n, N, size=size)
|
621 |
+
|
622 |
+
def _get_support(self, M, n, N):
|
623 |
+
return np.maximum(N-(M-n), 0), np.minimum(n, N)
|
624 |
+
|
625 |
+
def _argcheck(self, M, n, N):
|
626 |
+
cond = (M > 0) & (n >= 0) & (N >= 0)
|
627 |
+
cond &= (n <= M) & (N <= M)
|
628 |
+
cond &= _isintegral(M) & _isintegral(n) & _isintegral(N)
|
629 |
+
return cond
|
630 |
+
|
631 |
+
def _logpmf(self, k, M, n, N):
|
632 |
+
tot, good = M, n
|
633 |
+
bad = tot - good
|
634 |
+
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
|
635 |
+
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
|
636 |
+
betaln(tot+1, 1))
|
637 |
+
return result
|
638 |
+
|
639 |
+
def _pmf(self, k, M, n, N):
|
640 |
+
return _boost._hypergeom_pdf(k, n, N, M)
|
641 |
+
|
642 |
+
def _cdf(self, k, M, n, N):
|
643 |
+
return _boost._hypergeom_cdf(k, n, N, M)
|
644 |
+
|
645 |
+
def _stats(self, M, n, N):
|
646 |
+
M, n, N = 1. * M, 1. * n, 1. * N
|
647 |
+
m = M - n
|
648 |
+
|
649 |
+
# Boost kurtosis_excess doesn't return the same as the value
|
650 |
+
# computed here.
|
651 |
+
g2 = M * (M + 1) - 6. * N * (M - N) - 6. * n * m
|
652 |
+
g2 *= (M - 1) * M * M
|
653 |
+
g2 += 6. * n * N * (M - N) * m * (5. * M - 6)
|
654 |
+
g2 /= n * N * (M - N) * m * (M - 2.) * (M - 3.)
|
655 |
+
return (
|
656 |
+
_boost._hypergeom_mean(n, N, M),
|
657 |
+
_boost._hypergeom_variance(n, N, M),
|
658 |
+
_boost._hypergeom_skewness(n, N, M),
|
659 |
+
g2,
|
660 |
+
)
|
661 |
+
|
662 |
+
def _entropy(self, M, n, N):
|
663 |
+
k = np.r_[N - (M - n):min(n, N) + 1]
|
664 |
+
vals = self.pmf(k, M, n, N)
|
665 |
+
return np.sum(entr(vals), axis=0)
|
666 |
+
|
667 |
+
def _sf(self, k, M, n, N):
|
668 |
+
return _boost._hypergeom_sf(k, n, N, M)
|
669 |
+
|
670 |
+
def _logsf(self, k, M, n, N):
|
671 |
+
res = []
|
672 |
+
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
|
673 |
+
if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5):
|
674 |
+
# Less terms to sum if we calculate log(1-cdf)
|
675 |
+
res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
|
676 |
+
else:
|
677 |
+
# Integration over probability mass function using logsumexp
|
678 |
+
k2 = np.arange(quant + 1, draw + 1)
|
679 |
+
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
|
680 |
+
return np.asarray(res)
|
681 |
+
|
682 |
+
def _logcdf(self, k, M, n, N):
|
683 |
+
res = []
|
684 |
+
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
|
685 |
+
if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
|
686 |
+
# Less terms to sum if we calculate log(1-sf)
|
687 |
+
res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
|
688 |
+
else:
|
689 |
+
# Integration over probability mass function using logsumexp
|
690 |
+
k2 = np.arange(0, quant + 1)
|
691 |
+
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
|
692 |
+
return np.asarray(res)
|
693 |
+
|
694 |
+
|
695 |
+
hypergeom = hypergeom_gen(name='hypergeom')
|
696 |
+
|
697 |
+
|
698 |
+
class nhypergeom_gen(rv_discrete):
|
699 |
+
r"""A negative hypergeometric discrete random variable.
|
700 |
+
|
701 |
+
Consider a box containing :math:`M` balls:, :math:`n` red and
|
702 |
+
:math:`M-n` blue. We randomly sample balls from the box, one
|
703 |
+
at a time and *without* replacement, until we have picked :math:`r`
|
704 |
+
blue balls. `nhypergeom` is the distribution of the number of
|
705 |
+
red balls :math:`k` we have picked.
|
706 |
+
|
707 |
+
%(before_notes)s
|
708 |
+
|
709 |
+
Notes
|
710 |
+
-----
|
711 |
+
The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not
|
712 |
+
universally accepted. See the Examples for a clarification of the
|
713 |
+
definitions used here.
|
714 |
+
|
715 |
+
The probability mass function is defined as,
|
716 |
+
|
717 |
+
.. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}}
|
718 |
+
{{M \choose n}}
|
719 |
+
|
720 |
+
for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`,
|
721 |
+
and the binomial coefficient is:
|
722 |
+
|
723 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
724 |
+
|
725 |
+
It is equivalent to observing :math:`k` successes in :math:`k+r-1`
|
726 |
+
samples with :math:`k+r`'th sample being a failure. The former
|
727 |
+
can be modelled as a hypergeometric distribution. The probability
|
728 |
+
of the latter is simply the number of failures remaining
|
729 |
+
:math:`M-n-(r-1)` divided by the size of the remaining population
|
730 |
+
:math:`M-(k+r-1)`. This relationship can be shown as:
|
731 |
+
|
732 |
+
.. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))}
|
733 |
+
|
734 |
+
where :math:`NHG` is probability mass function (PMF) of the
|
735 |
+
negative hypergeometric distribution and :math:`HG` is the
|
736 |
+
PMF of the hypergeometric distribution.
|
737 |
+
|
738 |
+
%(after_notes)s
|
739 |
+
|
740 |
+
Examples
|
741 |
+
--------
|
742 |
+
>>> import numpy as np
|
743 |
+
>>> from scipy.stats import nhypergeom
|
744 |
+
>>> import matplotlib.pyplot as plt
|
745 |
+
|
746 |
+
Suppose we have a collection of 20 animals, of which 7 are dogs.
|
747 |
+
Then if we want to know the probability of finding a given number
|
748 |
+
of dogs (successes) in a sample with exactly 12 animals that
|
749 |
+
aren't dogs (failures), we can initialize a frozen distribution
|
750 |
+
and plot the probability mass function:
|
751 |
+
|
752 |
+
>>> M, n, r = [20, 7, 12]
|
753 |
+
>>> rv = nhypergeom(M, n, r)
|
754 |
+
>>> x = np.arange(0, n+2)
|
755 |
+
>>> pmf_dogs = rv.pmf(x)
|
756 |
+
|
757 |
+
>>> fig = plt.figure()
|
758 |
+
>>> ax = fig.add_subplot(111)
|
759 |
+
>>> ax.plot(x, pmf_dogs, 'bo')
|
760 |
+
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
|
761 |
+
>>> ax.set_xlabel('# of dogs in our group with given 12 failures')
|
762 |
+
>>> ax.set_ylabel('nhypergeom PMF')
|
763 |
+
>>> plt.show()
|
764 |
+
|
765 |
+
Instead of using a frozen distribution we can also use `nhypergeom`
|
766 |
+
methods directly. To for example obtain the probability mass
|
767 |
+
function, use:
|
768 |
+
|
769 |
+
>>> prb = nhypergeom.pmf(x, M, n, r)
|
770 |
+
|
771 |
+
And to generate random numbers:
|
772 |
+
|
773 |
+
>>> R = nhypergeom.rvs(M, n, r, size=10)
|
774 |
+
|
775 |
+
To verify the relationship between `hypergeom` and `nhypergeom`, use:
|
776 |
+
|
777 |
+
>>> from scipy.stats import hypergeom, nhypergeom
|
778 |
+
>>> M, n, r = 45, 13, 8
|
779 |
+
>>> k = 6
|
780 |
+
>>> nhypergeom.pmf(k, M, n, r)
|
781 |
+
0.06180776620271643
|
782 |
+
>>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
|
783 |
+
0.06180776620271644
|
784 |
+
|
785 |
+
See Also
|
786 |
+
--------
|
787 |
+
hypergeom, binom, nbinom
|
788 |
+
|
789 |
+
References
|
790 |
+
----------
|
791 |
+
.. [1] Negative Hypergeometric Distribution on Wikipedia
|
792 |
+
https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution
|
793 |
+
|
794 |
+
.. [2] Negative Hypergeometric Distribution from
|
795 |
+
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf
|
796 |
+
|
797 |
+
"""
|
798 |
+
|
799 |
+
def _shape_info(self):
|
800 |
+
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
|
801 |
+
_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
802 |
+
_ShapeInfo("r", True, (0, np.inf), (True, False))]
|
803 |
+
|
804 |
+
def _get_support(self, M, n, r):
|
805 |
+
return 0, n
|
806 |
+
|
807 |
+
def _argcheck(self, M, n, r):
|
808 |
+
cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n)
|
809 |
+
cond &= _isintegral(M) & _isintegral(n) & _isintegral(r)
|
810 |
+
return cond
|
811 |
+
|
812 |
+
def _rvs(self, M, n, r, size=None, random_state=None):
|
813 |
+
|
814 |
+
@_vectorize_rvs_over_shapes
|
815 |
+
def _rvs1(M, n, r, size, random_state):
|
816 |
+
# invert cdf by calculating all values in support, scalar M, n, r
|
817 |
+
a, b = self.support(M, n, r)
|
818 |
+
ks = np.arange(a, b+1)
|
819 |
+
cdf = self.cdf(ks, M, n, r)
|
820 |
+
ppf = interp1d(cdf, ks, kind='next', fill_value='extrapolate')
|
821 |
+
rvs = ppf(random_state.uniform(size=size)).astype(int)
|
822 |
+
if size is None:
|
823 |
+
return rvs.item()
|
824 |
+
return rvs
|
825 |
+
|
826 |
+
return _rvs1(M, n, r, size=size, random_state=random_state)
|
827 |
+
|
828 |
+
def _logpmf(self, k, M, n, r):
|
829 |
+
cond = ((r == 0) & (k == 0))
|
830 |
+
result = _lazywhere(~cond, (k, M, n, r),
|
831 |
+
lambda k, M, n, r:
|
832 |
+
(-betaln(k+1, r) + betaln(k+r, 1) -
|
833 |
+
betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) +
|
834 |
+
betaln(n+1, M-n+1) - betaln(M+1, 1)),
|
835 |
+
fillvalue=0.0)
|
836 |
+
return result
|
837 |
+
|
838 |
+
def _pmf(self, k, M, n, r):
|
839 |
+
# same as the following but numerically more precise
|
840 |
+
# return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n)
|
841 |
+
return exp(self._logpmf(k, M, n, r))
|
842 |
+
|
843 |
+
def _stats(self, M, n, r):
|
844 |
+
# Promote the datatype to at least float
|
845 |
+
# mu = rn / (M-n+1)
|
846 |
+
M, n, r = 1.*M, 1.*n, 1.*r
|
847 |
+
mu = r*n / (M-n+1)
|
848 |
+
|
849 |
+
var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1))
|
850 |
+
|
851 |
+
# The skew and kurtosis are mathematically
|
852 |
+
# intractable so return `None`. See [2]_.
|
853 |
+
g1, g2 = None, None
|
854 |
+
return mu, var, g1, g2
|
855 |
+
|
856 |
+
|
857 |
+
nhypergeom = nhypergeom_gen(name='nhypergeom')
|
858 |
+
|
859 |
+
|
860 |
+
# FIXME: Fails _cdfvec
|
861 |
+
class logser_gen(rv_discrete):
|
862 |
+
r"""A Logarithmic (Log-Series, Series) discrete random variable.
|
863 |
+
|
864 |
+
%(before_notes)s
|
865 |
+
|
866 |
+
Notes
|
867 |
+
-----
|
868 |
+
The probability mass function for `logser` is:
|
869 |
+
|
870 |
+
.. math::
|
871 |
+
|
872 |
+
f(k) = - \frac{p^k}{k \log(1-p)}
|
873 |
+
|
874 |
+
for :math:`k \ge 1`, :math:`0 < p < 1`
|
875 |
+
|
876 |
+
`logser` takes :math:`p` as shape parameter,
|
877 |
+
where :math:`p` is the probability of a single success
|
878 |
+
and :math:`1-p` is the probability of a single failure.
|
879 |
+
|
880 |
+
%(after_notes)s
|
881 |
+
|
882 |
+
%(example)s
|
883 |
+
|
884 |
+
"""
|
885 |
+
|
886 |
+
def _shape_info(self):
|
887 |
+
return [_ShapeInfo("p", False, (0, 1), (True, True))]
|
888 |
+
|
889 |
+
def _rvs(self, p, size=None, random_state=None):
|
890 |
+
# looks wrong for p>0.5, too few k=1
|
891 |
+
# trying to use generic is worse, no k=1 at all
|
892 |
+
return random_state.logseries(p, size=size)
|
893 |
+
|
894 |
+
def _argcheck(self, p):
|
895 |
+
return (p > 0) & (p < 1)
|
896 |
+
|
897 |
+
def _pmf(self, k, p):
|
898 |
+
# logser.pmf(k) = - p**k / (k*log(1-p))
|
899 |
+
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
|
900 |
+
|
901 |
+
def _stats(self, p):
|
902 |
+
r = special.log1p(-p)
|
903 |
+
mu = p / (p - 1.0) / r
|
904 |
+
mu2p = -p / r / (p - 1.0)**2
|
905 |
+
var = mu2p - mu*mu
|
906 |
+
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
|
907 |
+
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
|
908 |
+
g1 = mu3 / np.power(var, 1.5)
|
909 |
+
|
910 |
+
mu4p = -p / r * (
|
911 |
+
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
|
912 |
+
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
|
913 |
+
g2 = mu4 / var**2 - 3.0
|
914 |
+
return mu, var, g1, g2
|
915 |
+
|
916 |
+
|
917 |
+
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
|
918 |
+
|
919 |
+
|
920 |
+
class poisson_gen(rv_discrete):
|
921 |
+
r"""A Poisson discrete random variable.
|
922 |
+
|
923 |
+
%(before_notes)s
|
924 |
+
|
925 |
+
Notes
|
926 |
+
-----
|
927 |
+
The probability mass function for `poisson` is:
|
928 |
+
|
929 |
+
.. math::
|
930 |
+
|
931 |
+
f(k) = \exp(-\mu) \frac{\mu^k}{k!}
|
932 |
+
|
933 |
+
for :math:`k \ge 0`.
|
934 |
+
|
935 |
+
`poisson` takes :math:`\mu \geq 0` as shape parameter.
|
936 |
+
When :math:`\mu = 0`, the ``pmf`` method
|
937 |
+
returns ``1.0`` at quantile :math:`k = 0`.
|
938 |
+
|
939 |
+
%(after_notes)s
|
940 |
+
|
941 |
+
%(example)s
|
942 |
+
|
943 |
+
"""
|
944 |
+
|
945 |
+
def _shape_info(self):
|
946 |
+
return [_ShapeInfo("mu", False, (0, np.inf), (True, False))]
|
947 |
+
|
948 |
+
# Override rv_discrete._argcheck to allow mu=0.
|
949 |
+
def _argcheck(self, mu):
|
950 |
+
return mu >= 0
|
951 |
+
|
952 |
+
def _rvs(self, mu, size=None, random_state=None):
|
953 |
+
return random_state.poisson(mu, size)
|
954 |
+
|
955 |
+
def _logpmf(self, k, mu):
|
956 |
+
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
|
957 |
+
return Pk
|
958 |
+
|
959 |
+
def _pmf(self, k, mu):
|
960 |
+
# poisson.pmf(k) = exp(-mu) * mu**k / k!
|
961 |
+
return exp(self._logpmf(k, mu))
|
962 |
+
|
963 |
+
def _cdf(self, x, mu):
|
964 |
+
k = floor(x)
|
965 |
+
return special.pdtr(k, mu)
|
966 |
+
|
967 |
+
def _sf(self, x, mu):
|
968 |
+
k = floor(x)
|
969 |
+
return special.pdtrc(k, mu)
|
970 |
+
|
971 |
+
def _ppf(self, q, mu):
|
972 |
+
vals = ceil(special.pdtrik(q, mu))
|
973 |
+
vals1 = np.maximum(vals - 1, 0)
|
974 |
+
temp = special.pdtr(vals1, mu)
|
975 |
+
return np.where(temp >= q, vals1, vals)
|
976 |
+
|
977 |
+
def _stats(self, mu):
|
978 |
+
var = mu
|
979 |
+
tmp = np.asarray(mu)
|
980 |
+
mu_nonzero = tmp > 0
|
981 |
+
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
|
982 |
+
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
|
983 |
+
return mu, var, g1, g2
|
984 |
+
|
985 |
+
|
986 |
+
poisson = poisson_gen(name="poisson", longname='A Poisson')
|
987 |
+
|
988 |
+
|
989 |
+
class planck_gen(rv_discrete):
|
990 |
+
r"""A Planck discrete exponential random variable.
|
991 |
+
|
992 |
+
%(before_notes)s
|
993 |
+
|
994 |
+
Notes
|
995 |
+
-----
|
996 |
+
The probability mass function for `planck` is:
|
997 |
+
|
998 |
+
.. math::
|
999 |
+
|
1000 |
+
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k)
|
1001 |
+
|
1002 |
+
for :math:`k \ge 0` and :math:`\lambda > 0`.
|
1003 |
+
|
1004 |
+
`planck` takes :math:`\lambda` as shape parameter. The Planck distribution
|
1005 |
+
can be written as a geometric distribution (`geom`) with
|
1006 |
+
:math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``.
|
1007 |
+
|
1008 |
+
%(after_notes)s
|
1009 |
+
|
1010 |
+
See Also
|
1011 |
+
--------
|
1012 |
+
geom
|
1013 |
+
|
1014 |
+
%(example)s
|
1015 |
+
|
1016 |
+
"""
|
1017 |
+
def _shape_info(self):
|
1018 |
+
return [_ShapeInfo("lambda", False, (0, np.inf), (False, False))]
|
1019 |
+
|
1020 |
+
def _argcheck(self, lambda_):
|
1021 |
+
return lambda_ > 0
|
1022 |
+
|
1023 |
+
def _pmf(self, k, lambda_):
|
1024 |
+
return -expm1(-lambda_)*exp(-lambda_*k)
|
1025 |
+
|
1026 |
+
def _cdf(self, x, lambda_):
|
1027 |
+
k = floor(x)
|
1028 |
+
return -expm1(-lambda_*(k+1))
|
1029 |
+
|
1030 |
+
def _sf(self, x, lambda_):
|
1031 |
+
return exp(self._logsf(x, lambda_))
|
1032 |
+
|
1033 |
+
def _logsf(self, x, lambda_):
|
1034 |
+
k = floor(x)
|
1035 |
+
return -lambda_*(k+1)
|
1036 |
+
|
1037 |
+
def _ppf(self, q, lambda_):
|
1038 |
+
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
|
1039 |
+
vals1 = (vals-1).clip(*(self._get_support(lambda_)))
|
1040 |
+
temp = self._cdf(vals1, lambda_)
|
1041 |
+
return np.where(temp >= q, vals1, vals)
|
1042 |
+
|
1043 |
+
def _rvs(self, lambda_, size=None, random_state=None):
|
1044 |
+
# use relation to geometric distribution for sampling
|
1045 |
+
p = -expm1(-lambda_)
|
1046 |
+
return random_state.geometric(p, size=size) - 1.0
|
1047 |
+
|
1048 |
+
def _stats(self, lambda_):
|
1049 |
+
mu = 1/expm1(lambda_)
|
1050 |
+
var = exp(-lambda_)/(expm1(-lambda_))**2
|
1051 |
+
g1 = 2*cosh(lambda_/2.0)
|
1052 |
+
g2 = 4+2*cosh(lambda_)
|
1053 |
+
return mu, var, g1, g2
|
1054 |
+
|
1055 |
+
def _entropy(self, lambda_):
|
1056 |
+
C = -expm1(-lambda_)
|
1057 |
+
return lambda_*exp(-lambda_)/C - log(C)
|
1058 |
+
|
1059 |
+
|
1060 |
+
planck = planck_gen(a=0, name='planck', longname='A discrete exponential ')
|
1061 |
+
|
1062 |
+
|
1063 |
+
class boltzmann_gen(rv_discrete):
|
1064 |
+
r"""A Boltzmann (Truncated Discrete Exponential) random variable.
|
1065 |
+
|
1066 |
+
%(before_notes)s
|
1067 |
+
|
1068 |
+
Notes
|
1069 |
+
-----
|
1070 |
+
The probability mass function for `boltzmann` is:
|
1071 |
+
|
1072 |
+
.. math::
|
1073 |
+
|
1074 |
+
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N))
|
1075 |
+
|
1076 |
+
for :math:`k = 0,..., N-1`.
|
1077 |
+
|
1078 |
+
`boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters.
|
1079 |
+
|
1080 |
+
%(after_notes)s
|
1081 |
+
|
1082 |
+
%(example)s
|
1083 |
+
|
1084 |
+
"""
|
1085 |
+
def _shape_info(self):
|
1086 |
+
return [_ShapeInfo("lambda_", False, (0, np.inf), (False, False)),
|
1087 |
+
_ShapeInfo("N", True, (0, np.inf), (False, False))]
|
1088 |
+
|
1089 |
+
def _argcheck(self, lambda_, N):
|
1090 |
+
return (lambda_ > 0) & (N > 0) & _isintegral(N)
|
1091 |
+
|
1092 |
+
def _get_support(self, lambda_, N):
|
1093 |
+
return self.a, N - 1
|
1094 |
+
|
1095 |
+
def _pmf(self, k, lambda_, N):
|
1096 |
+
# boltzmann.pmf(k) =
|
1097 |
+
# (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
|
1098 |
+
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
|
1099 |
+
return fact*exp(-lambda_*k)
|
1100 |
+
|
1101 |
+
def _cdf(self, x, lambda_, N):
|
1102 |
+
k = floor(x)
|
1103 |
+
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
|
1104 |
+
|
1105 |
+
def _ppf(self, q, lambda_, N):
|
1106 |
+
qnew = q*(1-exp(-lambda_*N))
|
1107 |
+
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
|
1108 |
+
vals1 = (vals-1).clip(0.0, np.inf)
|
1109 |
+
temp = self._cdf(vals1, lambda_, N)
|
1110 |
+
return np.where(temp >= q, vals1, vals)
|
1111 |
+
|
1112 |
+
def _stats(self, lambda_, N):
|
1113 |
+
z = exp(-lambda_)
|
1114 |
+
zN = exp(-lambda_*N)
|
1115 |
+
mu = z/(1.0-z)-N*zN/(1-zN)
|
1116 |
+
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
|
1117 |
+
trm = (1-zN)/(1-z)
|
1118 |
+
trm2 = (z*trm**2 - N*N*zN)
|
1119 |
+
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
|
1120 |
+
g1 = g1 / trm2**(1.5)
|
1121 |
+
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
|
1122 |
+
g2 = g2 / trm2 / trm2
|
1123 |
+
return mu, var, g1, g2
|
1124 |
+
|
1125 |
+
|
1126 |
+
boltzmann = boltzmann_gen(name='boltzmann', a=0,
|
1127 |
+
longname='A truncated discrete exponential ')
|
1128 |
+
|
1129 |
+
|
1130 |
+
class randint_gen(rv_discrete):
|
1131 |
+
r"""A uniform discrete random variable.
|
1132 |
+
|
1133 |
+
%(before_notes)s
|
1134 |
+
|
1135 |
+
Notes
|
1136 |
+
-----
|
1137 |
+
The probability mass function for `randint` is:
|
1138 |
+
|
1139 |
+
.. math::
|
1140 |
+
|
1141 |
+
f(k) = \frac{1}{\texttt{high} - \texttt{low}}
|
1142 |
+
|
1143 |
+
for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`.
|
1144 |
+
|
1145 |
+
`randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape
|
1146 |
+
parameters.
|
1147 |
+
|
1148 |
+
%(after_notes)s
|
1149 |
+
|
1150 |
+
Examples
|
1151 |
+
--------
|
1152 |
+
>>> import numpy as np
|
1153 |
+
>>> from scipy.stats import randint
|
1154 |
+
>>> import matplotlib.pyplot as plt
|
1155 |
+
>>> fig, ax = plt.subplots(1, 1)
|
1156 |
+
|
1157 |
+
Calculate the first four moments:
|
1158 |
+
|
1159 |
+
>>> low, high = 7, 31
|
1160 |
+
>>> mean, var, skew, kurt = randint.stats(low, high, moments='mvsk')
|
1161 |
+
|
1162 |
+
Display the probability mass function (``pmf``):
|
1163 |
+
|
1164 |
+
>>> x = np.arange(low - 5, high + 5)
|
1165 |
+
>>> ax.plot(x, randint.pmf(x, low, high), 'bo', ms=8, label='randint pmf')
|
1166 |
+
>>> ax.vlines(x, 0, randint.pmf(x, low, high), colors='b', lw=5, alpha=0.5)
|
1167 |
+
|
1168 |
+
Alternatively, the distribution object can be called (as a function) to
|
1169 |
+
fix the shape and location. This returns a "frozen" RV object holding the
|
1170 |
+
given parameters fixed.
|
1171 |
+
|
1172 |
+
Freeze the distribution and display the frozen ``pmf``:
|
1173 |
+
|
1174 |
+
>>> rv = randint(low, high)
|
1175 |
+
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-',
|
1176 |
+
... lw=1, label='frozen pmf')
|
1177 |
+
>>> ax.legend(loc='lower center')
|
1178 |
+
>>> plt.show()
|
1179 |
+
|
1180 |
+
Check the relationship between the cumulative distribution function
|
1181 |
+
(``cdf``) and its inverse, the percent point function (``ppf``):
|
1182 |
+
|
1183 |
+
>>> q = np.arange(low, high)
|
1184 |
+
>>> p = randint.cdf(q, low, high)
|
1185 |
+
>>> np.allclose(q, randint.ppf(p, low, high))
|
1186 |
+
True
|
1187 |
+
|
1188 |
+
Generate random numbers:
|
1189 |
+
|
1190 |
+
>>> r = randint.rvs(low, high, size=1000)
|
1191 |
+
|
1192 |
+
"""
|
1193 |
+
|
1194 |
+
def _shape_info(self):
|
1195 |
+
return [_ShapeInfo("low", True, (-np.inf, np.inf), (False, False)),
|
1196 |
+
_ShapeInfo("high", True, (-np.inf, np.inf), (False, False))]
|
1197 |
+
|
1198 |
+
def _argcheck(self, low, high):
|
1199 |
+
return (high > low) & _isintegral(low) & _isintegral(high)
|
1200 |
+
|
1201 |
+
def _get_support(self, low, high):
|
1202 |
+
return low, high-1
|
1203 |
+
|
1204 |
+
def _pmf(self, k, low, high):
|
1205 |
+
# randint.pmf(k) = 1./(high - low)
|
1206 |
+
p = np.ones_like(k) / (high - low)
|
1207 |
+
return np.where((k >= low) & (k < high), p, 0.)
|
1208 |
+
|
1209 |
+
def _cdf(self, x, low, high):
|
1210 |
+
k = floor(x)
|
1211 |
+
return (k - low + 1.) / (high - low)
|
1212 |
+
|
1213 |
+
def _ppf(self, q, low, high):
|
1214 |
+
vals = ceil(q * (high - low) + low) - 1
|
1215 |
+
vals1 = (vals - 1).clip(low, high)
|
1216 |
+
temp = self._cdf(vals1, low, high)
|
1217 |
+
return np.where(temp >= q, vals1, vals)
|
1218 |
+
|
1219 |
+
def _stats(self, low, high):
|
1220 |
+
m2, m1 = np.asarray(high), np.asarray(low)
|
1221 |
+
mu = (m2 + m1 - 1.0) / 2
|
1222 |
+
d = m2 - m1
|
1223 |
+
var = (d*d - 1) / 12.0
|
1224 |
+
g1 = 0.0
|
1225 |
+
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
|
1226 |
+
return mu, var, g1, g2
|
1227 |
+
|
1228 |
+
def _rvs(self, low, high, size=None, random_state=None):
|
1229 |
+
"""An array of *size* random integers >= ``low`` and < ``high``."""
|
1230 |
+
if np.asarray(low).size == 1 and np.asarray(high).size == 1:
|
1231 |
+
# no need to vectorize in that case
|
1232 |
+
return rng_integers(random_state, low, high, size=size)
|
1233 |
+
|
1234 |
+
if size is not None:
|
1235 |
+
# NumPy's RandomState.randint() doesn't broadcast its arguments.
|
1236 |
+
# Use `broadcast_to()` to extend the shapes of low and high
|
1237 |
+
# up to size. Then we can use the numpy.vectorize'd
|
1238 |
+
# randint without needing to pass it a `size` argument.
|
1239 |
+
low = np.broadcast_to(low, size)
|
1240 |
+
high = np.broadcast_to(high, size)
|
1241 |
+
randint = np.vectorize(partial(rng_integers, random_state),
|
1242 |
+
otypes=[np.dtype(int)])
|
1243 |
+
return randint(low, high)
|
1244 |
+
|
1245 |
+
def _entropy(self, low, high):
|
1246 |
+
return log(high - low)
|
1247 |
+
|
1248 |
+
|
1249 |
+
randint = randint_gen(name='randint', longname='A discrete uniform '
|
1250 |
+
'(random integer)')
|
1251 |
+
|
1252 |
+
|
1253 |
+
# FIXME: problems sampling.
|
1254 |
+
class zipf_gen(rv_discrete):
|
1255 |
+
r"""A Zipf (Zeta) discrete random variable.
|
1256 |
+
|
1257 |
+
%(before_notes)s
|
1258 |
+
|
1259 |
+
See Also
|
1260 |
+
--------
|
1261 |
+
zipfian
|
1262 |
+
|
1263 |
+
Notes
|
1264 |
+
-----
|
1265 |
+
The probability mass function for `zipf` is:
|
1266 |
+
|
1267 |
+
.. math::
|
1268 |
+
|
1269 |
+
f(k, a) = \frac{1}{\zeta(a) k^a}
|
1270 |
+
|
1271 |
+
for :math:`k \ge 1`, :math:`a > 1`.
|
1272 |
+
|
1273 |
+
`zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the
|
1274 |
+
Riemann zeta function (`scipy.special.zeta`)
|
1275 |
+
|
1276 |
+
The Zipf distribution is also known as the zeta distribution, which is
|
1277 |
+
a special case of the Zipfian distribution (`zipfian`).
|
1278 |
+
|
1279 |
+
%(after_notes)s
|
1280 |
+
|
1281 |
+
References
|
1282 |
+
----------
|
1283 |
+
.. [1] "Zeta Distribution", Wikipedia,
|
1284 |
+
https://en.wikipedia.org/wiki/Zeta_distribution
|
1285 |
+
|
1286 |
+
%(example)s
|
1287 |
+
|
1288 |
+
Confirm that `zipf` is the large `n` limit of `zipfian`.
|
1289 |
+
|
1290 |
+
>>> import numpy as np
|
1291 |
+
>>> from scipy.stats import zipf, zipfian
|
1292 |
+
>>> k = np.arange(11)
|
1293 |
+
>>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000))
|
1294 |
+
True
|
1295 |
+
|
1296 |
+
"""
|
1297 |
+
|
1298 |
+
def _shape_info(self):
|
1299 |
+
return [_ShapeInfo("a", False, (1, np.inf), (False, False))]
|
1300 |
+
|
1301 |
+
def _rvs(self, a, size=None, random_state=None):
|
1302 |
+
return random_state.zipf(a, size=size)
|
1303 |
+
|
1304 |
+
def _argcheck(self, a):
|
1305 |
+
return a > 1
|
1306 |
+
|
1307 |
+
def _pmf(self, k, a):
|
1308 |
+
# zipf.pmf(k, a) = 1/(zeta(a) * k**a)
|
1309 |
+
Pk = 1.0 / special.zeta(a, 1) / k**a
|
1310 |
+
return Pk
|
1311 |
+
|
1312 |
+
def _munp(self, n, a):
|
1313 |
+
return _lazywhere(
|
1314 |
+
a > n + 1, (a, n),
|
1315 |
+
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
|
1316 |
+
np.inf)
|
1317 |
+
|
1318 |
+
|
1319 |
+
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
|
1320 |
+
|
1321 |
+
|
1322 |
+
def _gen_harmonic_gt1(n, a):
|
1323 |
+
"""Generalized harmonic number, a > 1"""
|
1324 |
+
# See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz"
|
1325 |
+
return zeta(a, 1) - zeta(a, n+1)
|
1326 |
+
|
1327 |
+
|
1328 |
+
def _gen_harmonic_leq1(n, a):
|
1329 |
+
"""Generalized harmonic number, a <= 1"""
|
1330 |
+
if not np.size(n):
|
1331 |
+
return n
|
1332 |
+
n_max = np.max(n) # loop starts at maximum of all n
|
1333 |
+
out = np.zeros_like(a, dtype=float)
|
1334 |
+
# add terms of harmonic series; starting from smallest to avoid roundoff
|
1335 |
+
for i in np.arange(n_max, 0, -1, dtype=float):
|
1336 |
+
mask = i <= n # don't add terms after nth
|
1337 |
+
out[mask] += 1/i**a[mask]
|
1338 |
+
return out
|
1339 |
+
|
1340 |
+
|
1341 |
+
def _gen_harmonic(n, a):
|
1342 |
+
"""Generalized harmonic number"""
|
1343 |
+
n, a = np.broadcast_arrays(n, a)
|
1344 |
+
return _lazywhere(a > 1, (n, a),
|
1345 |
+
f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1)
|
1346 |
+
|
1347 |
+
|
1348 |
+
class zipfian_gen(rv_discrete):
|
1349 |
+
r"""A Zipfian discrete random variable.
|
1350 |
+
|
1351 |
+
%(before_notes)s
|
1352 |
+
|
1353 |
+
See Also
|
1354 |
+
--------
|
1355 |
+
zipf
|
1356 |
+
|
1357 |
+
Notes
|
1358 |
+
-----
|
1359 |
+
The probability mass function for `zipfian` is:
|
1360 |
+
|
1361 |
+
.. math::
|
1362 |
+
|
1363 |
+
f(k, a, n) = \frac{1}{H_{n,a} k^a}
|
1364 |
+
|
1365 |
+
for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`,
|
1366 |
+
:math:`n \in \{1, 2, 3, \dots\}`.
|
1367 |
+
|
1368 |
+
`zipfian` takes :math:`a` and :math:`n` as shape parameters.
|
1369 |
+
:math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic
|
1370 |
+
number of order :math:`a`.
|
1371 |
+
|
1372 |
+
The Zipfian distribution reduces to the Zipf (zeta) distribution as
|
1373 |
+
:math:`n \rightarrow \infty`.
|
1374 |
+
|
1375 |
+
%(after_notes)s
|
1376 |
+
|
1377 |
+
References
|
1378 |
+
----------
|
1379 |
+
.. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law
|
1380 |
+
.. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution
|
1381 |
+
Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
|
1382 |
+
|
1383 |
+
%(example)s
|
1384 |
+
|
1385 |
+
Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`.
|
1386 |
+
|
1387 |
+
>>> import numpy as np
|
1388 |
+
>>> from scipy.stats import zipf, zipfian
|
1389 |
+
>>> k = np.arange(11)
|
1390 |
+
>>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5))
|
1391 |
+
True
|
1392 |
+
|
1393 |
+
"""
|
1394 |
+
|
1395 |
+
def _shape_info(self):
|
1396 |
+
return [_ShapeInfo("a", False, (0, np.inf), (True, False)),
|
1397 |
+
_ShapeInfo("n", True, (0, np.inf), (False, False))]
|
1398 |
+
|
1399 |
+
def _argcheck(self, a, n):
|
1400 |
+
# we need np.asarray here because moment (maybe others) don't convert
|
1401 |
+
return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int))
|
1402 |
+
|
1403 |
+
def _get_support(self, a, n):
|
1404 |
+
return 1, n
|
1405 |
+
|
1406 |
+
def _pmf(self, k, a, n):
|
1407 |
+
return 1.0 / _gen_harmonic(n, a) / k**a
|
1408 |
+
|
1409 |
+
def _cdf(self, k, a, n):
|
1410 |
+
return _gen_harmonic(k, a) / _gen_harmonic(n, a)
|
1411 |
+
|
1412 |
+
def _sf(self, k, a, n):
|
1413 |
+
k = k + 1 # # to match SciPy convention
|
1414 |
+
# see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
|
1415 |
+
return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1)
|
1416 |
+
/ (k**a*_gen_harmonic(n, a)))
|
1417 |
+
|
1418 |
+
def _stats(self, a, n):
|
1419 |
+
# see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
|
1420 |
+
Hna = _gen_harmonic(n, a)
|
1421 |
+
Hna1 = _gen_harmonic(n, a-1)
|
1422 |
+
Hna2 = _gen_harmonic(n, a-2)
|
1423 |
+
Hna3 = _gen_harmonic(n, a-3)
|
1424 |
+
Hna4 = _gen_harmonic(n, a-4)
|
1425 |
+
mu1 = Hna1/Hna
|
1426 |
+
mu2n = (Hna2*Hna - Hna1**2)
|
1427 |
+
mu2d = Hna**2
|
1428 |
+
mu2 = mu2n / mu2d
|
1429 |
+
g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2)
|
1430 |
+
g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2
|
1431 |
+
- 3*Hna1**4) / mu2n**2
|
1432 |
+
g2 -= 3
|
1433 |
+
return mu1, mu2, g1, g2
|
1434 |
+
|
1435 |
+
|
1436 |
+
zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian')
|
1437 |
+
|
1438 |
+
|
1439 |
+
class dlaplace_gen(rv_discrete):
|
1440 |
+
r"""A Laplacian discrete random variable.
|
1441 |
+
|
1442 |
+
%(before_notes)s
|
1443 |
+
|
1444 |
+
Notes
|
1445 |
+
-----
|
1446 |
+
The probability mass function for `dlaplace` is:
|
1447 |
+
|
1448 |
+
.. math::
|
1449 |
+
|
1450 |
+
f(k) = \tanh(a/2) \exp(-a |k|)
|
1451 |
+
|
1452 |
+
for integers :math:`k` and :math:`a > 0`.
|
1453 |
+
|
1454 |
+
`dlaplace` takes :math:`a` as shape parameter.
|
1455 |
+
|
1456 |
+
%(after_notes)s
|
1457 |
+
|
1458 |
+
%(example)s
|
1459 |
+
|
1460 |
+
"""
|
1461 |
+
|
1462 |
+
def _shape_info(self):
|
1463 |
+
return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
|
1464 |
+
|
1465 |
+
def _pmf(self, k, a):
|
1466 |
+
# dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
|
1467 |
+
return tanh(a/2.0) * exp(-a * abs(k))
|
1468 |
+
|
1469 |
+
def _cdf(self, x, a):
|
1470 |
+
k = floor(x)
|
1471 |
+
|
1472 |
+
def f(k, a):
|
1473 |
+
return 1.0 - exp(-a * k) / (exp(a) + 1)
|
1474 |
+
|
1475 |
+
def f2(k, a):
|
1476 |
+
return exp(a * (k + 1)) / (exp(a) + 1)
|
1477 |
+
|
1478 |
+
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
|
1479 |
+
|
1480 |
+
def _ppf(self, q, a):
|
1481 |
+
const = 1 + exp(a)
|
1482 |
+
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)),
|
1483 |
+
log(q*const) / a - 1,
|
1484 |
+
-log((1-q) * const) / a))
|
1485 |
+
vals1 = vals - 1
|
1486 |
+
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
|
1487 |
+
|
1488 |
+
def _stats(self, a):
|
1489 |
+
ea = exp(a)
|
1490 |
+
mu2 = 2.*ea/(ea-1.)**2
|
1491 |
+
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
|
1492 |
+
return 0., mu2, 0., mu4/mu2**2 - 3.
|
1493 |
+
|
1494 |
+
def _entropy(self, a):
|
1495 |
+
return a / sinh(a) - log(tanh(a/2.0))
|
1496 |
+
|
1497 |
+
def _rvs(self, a, size=None, random_state=None):
|
1498 |
+
# The discrete Laplace is equivalent to the two-sided geometric
|
1499 |
+
# distribution with PMF:
|
1500 |
+
# f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k)
|
1501 |
+
# Reference:
|
1502 |
+
# https://www.sciencedirect.com/science/
|
1503 |
+
# article/abs/pii/S0378375804003519
|
1504 |
+
# Furthermore, the two-sided geometric distribution is
|
1505 |
+
# equivalent to the difference between two iid geometric
|
1506 |
+
# distributions.
|
1507 |
+
# Reference (page 179):
|
1508 |
+
# https://pdfs.semanticscholar.org/61b3/
|
1509 |
+
# b99f466815808fd0d03f5d2791eea8b541a1.pdf
|
1510 |
+
# Thus, we can leverage the following:
|
1511 |
+
# 1) alpha = e^-a
|
1512 |
+
# 2) probability_of_success = 1 - alpha (Bernoulli trial)
|
1513 |
+
probOfSuccess = -np.expm1(-np.asarray(a))
|
1514 |
+
x = random_state.geometric(probOfSuccess, size=size)
|
1515 |
+
y = random_state.geometric(probOfSuccess, size=size)
|
1516 |
+
return x - y
|
1517 |
+
|
1518 |
+
|
1519 |
+
dlaplace = dlaplace_gen(a=-np.inf,
|
1520 |
+
name='dlaplace', longname='A discrete Laplacian')
|
1521 |
+
|
1522 |
+
|
1523 |
+
class skellam_gen(rv_discrete):
|
1524 |
+
r"""A Skellam discrete random variable.
|
1525 |
+
|
1526 |
+
%(before_notes)s
|
1527 |
+
|
1528 |
+
Notes
|
1529 |
+
-----
|
1530 |
+
Probability distribution of the difference of two correlated or
|
1531 |
+
uncorrelated Poisson random variables.
|
1532 |
+
|
1533 |
+
Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with
|
1534 |
+
expected values :math:`\lambda_1` and :math:`\lambda_2`. Then,
|
1535 |
+
:math:`k_1 - k_2` follows a Skellam distribution with parameters
|
1536 |
+
:math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and
|
1537 |
+
:math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where
|
1538 |
+
:math:`\rho` is the correlation coefficient between :math:`k_1` and
|
1539 |
+
:math:`k_2`. If the two Poisson-distributed r.v. are independent then
|
1540 |
+
:math:`\rho = 0`.
|
1541 |
+
|
1542 |
+
Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive.
|
1543 |
+
|
1544 |
+
For details see: https://en.wikipedia.org/wiki/Skellam_distribution
|
1545 |
+
|
1546 |
+
`skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters.
|
1547 |
+
|
1548 |
+
%(after_notes)s
|
1549 |
+
|
1550 |
+
%(example)s
|
1551 |
+
|
1552 |
+
"""
|
1553 |
+
def _shape_info(self):
|
1554 |
+
return [_ShapeInfo("mu1", False, (0, np.inf), (False, False)),
|
1555 |
+
_ShapeInfo("mu2", False, (0, np.inf), (False, False))]
|
1556 |
+
|
1557 |
+
def _rvs(self, mu1, mu2, size=None, random_state=None):
|
1558 |
+
n = size
|
1559 |
+
return (random_state.poisson(mu1, n) -
|
1560 |
+
random_state.poisson(mu2, n))
|
1561 |
+
|
1562 |
+
def _pmf(self, x, mu1, mu2):
|
1563 |
+
with np.errstate(over='ignore'): # see gh-17432
|
1564 |
+
px = np.where(x < 0,
|
1565 |
+
_boost._ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
|
1566 |
+
_boost._ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
|
1567 |
+
# ncx2.pdf() returns nan's for extremely low probabilities
|
1568 |
+
return px
|
1569 |
+
|
1570 |
+
def _cdf(self, x, mu1, mu2):
|
1571 |
+
x = floor(x)
|
1572 |
+
with np.errstate(over='ignore'): # see gh-17432
|
1573 |
+
px = np.where(x < 0,
|
1574 |
+
_boost._ncx2_cdf(2*mu2, -2*x, 2*mu1),
|
1575 |
+
1 - _boost._ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
|
1576 |
+
return px
|
1577 |
+
|
1578 |
+
def _stats(self, mu1, mu2):
|
1579 |
+
mean = mu1 - mu2
|
1580 |
+
var = mu1 + mu2
|
1581 |
+
g1 = mean / sqrt((var)**3)
|
1582 |
+
g2 = 1 / var
|
1583 |
+
return mean, var, g1, g2
|
1584 |
+
|
1585 |
+
|
1586 |
+
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
|
1587 |
+
|
1588 |
+
|
1589 |
+
class yulesimon_gen(rv_discrete):
|
1590 |
+
r"""A Yule-Simon discrete random variable.
|
1591 |
+
|
1592 |
+
%(before_notes)s
|
1593 |
+
|
1594 |
+
Notes
|
1595 |
+
-----
|
1596 |
+
|
1597 |
+
The probability mass function for the `yulesimon` is:
|
1598 |
+
|
1599 |
+
.. math::
|
1600 |
+
|
1601 |
+
f(k) = \alpha B(k, \alpha+1)
|
1602 |
+
|
1603 |
+
for :math:`k=1,2,3,...`, where :math:`\alpha>0`.
|
1604 |
+
Here :math:`B` refers to the `scipy.special.beta` function.
|
1605 |
+
|
1606 |
+
The sampling of random variates is based on pg 553, Section 6.3 of [1]_.
|
1607 |
+
Our notation maps to the referenced logic via :math:`\alpha=a-1`.
|
1608 |
+
|
1609 |
+
For details see the wikipedia entry [2]_.
|
1610 |
+
|
1611 |
+
References
|
1612 |
+
----------
|
1613 |
+
.. [1] Devroye, Luc. "Non-uniform Random Variate Generation",
|
1614 |
+
(1986) Springer, New York.
|
1615 |
+
|
1616 |
+
.. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution
|
1617 |
+
|
1618 |
+
%(after_notes)s
|
1619 |
+
|
1620 |
+
%(example)s
|
1621 |
+
|
1622 |
+
"""
|
1623 |
+
def _shape_info(self):
|
1624 |
+
return [_ShapeInfo("alpha", False, (0, np.inf), (False, False))]
|
1625 |
+
|
1626 |
+
def _rvs(self, alpha, size=None, random_state=None):
|
1627 |
+
E1 = random_state.standard_exponential(size)
|
1628 |
+
E2 = random_state.standard_exponential(size)
|
1629 |
+
ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
|
1630 |
+
return ans
|
1631 |
+
|
1632 |
+
def _pmf(self, x, alpha):
|
1633 |
+
return alpha * special.beta(x, alpha + 1)
|
1634 |
+
|
1635 |
+
def _argcheck(self, alpha):
|
1636 |
+
return (alpha > 0)
|
1637 |
+
|
1638 |
+
def _logpmf(self, x, alpha):
|
1639 |
+
return log(alpha) + special.betaln(x, alpha + 1)
|
1640 |
+
|
1641 |
+
def _cdf(self, x, alpha):
|
1642 |
+
return 1 - x * special.beta(x, alpha + 1)
|
1643 |
+
|
1644 |
+
def _sf(self, x, alpha):
|
1645 |
+
return x * special.beta(x, alpha + 1)
|
1646 |
+
|
1647 |
+
def _logsf(self, x, alpha):
|
1648 |
+
return log(x) + special.betaln(x, alpha + 1)
|
1649 |
+
|
1650 |
+
def _stats(self, alpha):
|
1651 |
+
mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1))
|
1652 |
+
mu2 = np.where(alpha > 2,
|
1653 |
+
alpha**2 / ((alpha - 2.0) * (alpha - 1)**2),
|
1654 |
+
np.inf)
|
1655 |
+
mu2 = np.where(alpha <= 1, np.nan, mu2)
|
1656 |
+
g1 = np.where(alpha > 3,
|
1657 |
+
sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)),
|
1658 |
+
np.inf)
|
1659 |
+
g1 = np.where(alpha <= 2, np.nan, g1)
|
1660 |
+
g2 = np.where(alpha > 4,
|
1661 |
+
alpha + 3 + ((alpha**3 - 49 * alpha - 22) /
|
1662 |
+
(alpha * (alpha - 4) * (alpha - 3))),
|
1663 |
+
np.inf)
|
1664 |
+
g2 = np.where(alpha <= 2, np.nan, g2)
|
1665 |
+
return mu, mu2, g1, g2
|
1666 |
+
|
1667 |
+
|
1668 |
+
yulesimon = yulesimon_gen(name='yulesimon', a=1)
|
1669 |
+
|
1670 |
+
|
1671 |
+
def _vectorize_rvs_over_shapes(_rvs1):
|
1672 |
+
"""Decorator that vectorizes _rvs method to work on ndarray shapes"""
|
1673 |
+
# _rvs1 must be a _function_ that accepts _scalar_ args as positional
|
1674 |
+
# arguments, `size` and `random_state` as keyword arguments.
|
1675 |
+
# _rvs1 must return a random variate array with shape `size`. If `size` is
|
1676 |
+
# None, _rvs1 must return a scalar.
|
1677 |
+
# When applied to _rvs1, this decorator broadcasts ndarray args
|
1678 |
+
# and loops over them, calling _rvs1 for each set of scalar args.
|
1679 |
+
# For usage example, see _nchypergeom_gen
|
1680 |
+
def _rvs(*args, size, random_state):
|
1681 |
+
_rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size)
|
1682 |
+
|
1683 |
+
size = np.array(size)
|
1684 |
+
_rvs1_size = np.array(_rvs1_size)
|
1685 |
+
_rvs1_indices = np.array(_rvs1_indices)
|
1686 |
+
|
1687 |
+
if np.all(_rvs1_indices): # all args are scalars
|
1688 |
+
return _rvs1(*args, size, random_state)
|
1689 |
+
|
1690 |
+
out = np.empty(size)
|
1691 |
+
|
1692 |
+
# out.shape can mix dimensions associated with arg_shape and _rvs1_size
|
1693 |
+
# Sort them to arg_shape + _rvs1_size for easy indexing of dimensions
|
1694 |
+
# corresponding with the different sets of scalar args
|
1695 |
+
j0 = np.arange(out.ndim)
|
1696 |
+
j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices]))
|
1697 |
+
out = np.moveaxis(out, j1, j0)
|
1698 |
+
|
1699 |
+
for i in np.ndindex(*size[~_rvs1_indices]):
|
1700 |
+
# arg can be squeezed because singleton dimensions will be
|
1701 |
+
# associated with _rvs1_size, not arg_shape per _check_shape
|
1702 |
+
out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args],
|
1703 |
+
_rvs1_size, random_state)
|
1704 |
+
|
1705 |
+
return np.moveaxis(out, j0, j1) # move axes back before returning
|
1706 |
+
return _rvs
|
1707 |
+
|
1708 |
+
|
1709 |
+
class _nchypergeom_gen(rv_discrete):
|
1710 |
+
r"""A noncentral hypergeometric discrete random variable.
|
1711 |
+
|
1712 |
+
For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen.
|
1713 |
+
|
1714 |
+
"""
|
1715 |
+
|
1716 |
+
rvs_name = None
|
1717 |
+
dist = None
|
1718 |
+
|
1719 |
+
def _shape_info(self):
|
1720 |
+
return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
|
1721 |
+
_ShapeInfo("n", True, (0, np.inf), (True, False)),
|
1722 |
+
_ShapeInfo("N", True, (0, np.inf), (True, False)),
|
1723 |
+
_ShapeInfo("odds", False, (0, np.inf), (False, False))]
|
1724 |
+
|
1725 |
+
def _get_support(self, M, n, N, odds):
|
1726 |
+
N, m1, n = M, n, N # follow Wikipedia notation
|
1727 |
+
m2 = N - m1
|
1728 |
+
x_min = np.maximum(0, n - m2)
|
1729 |
+
x_max = np.minimum(n, m1)
|
1730 |
+
return x_min, x_max
|
1731 |
+
|
1732 |
+
def _argcheck(self, M, n, N, odds):
|
1733 |
+
M, n = np.asarray(M), np.asarray(n),
|
1734 |
+
N, odds = np.asarray(N), np.asarray(odds)
|
1735 |
+
cond1 = (M.astype(int) == M) & (M >= 0)
|
1736 |
+
cond2 = (n.astype(int) == n) & (n >= 0)
|
1737 |
+
cond3 = (N.astype(int) == N) & (N >= 0)
|
1738 |
+
cond4 = odds > 0
|
1739 |
+
cond5 = N <= M
|
1740 |
+
cond6 = n <= M
|
1741 |
+
return cond1 & cond2 & cond3 & cond4 & cond5 & cond6
|
1742 |
+
|
1743 |
+
def _rvs(self, M, n, N, odds, size=None, random_state=None):
|
1744 |
+
|
1745 |
+
@_vectorize_rvs_over_shapes
|
1746 |
+
def _rvs1(M, n, N, odds, size, random_state):
|
1747 |
+
length = np.prod(size)
|
1748 |
+
urn = _PyStochasticLib3()
|
1749 |
+
rv_gen = getattr(urn, self.rvs_name)
|
1750 |
+
rvs = rv_gen(N, n, M, odds, length, random_state)
|
1751 |
+
rvs = rvs.reshape(size)
|
1752 |
+
return rvs
|
1753 |
+
|
1754 |
+
return _rvs1(M, n, N, odds, size=size, random_state=random_state)
|
1755 |
+
|
1756 |
+
def _pmf(self, x, M, n, N, odds):
|
1757 |
+
|
1758 |
+
x, M, n, N, odds = np.broadcast_arrays(x, M, n, N, odds)
|
1759 |
+
if x.size == 0: # np.vectorize doesn't work with zero size input
|
1760 |
+
return np.empty_like(x)
|
1761 |
+
|
1762 |
+
@np.vectorize
|
1763 |
+
def _pmf1(x, M, n, N, odds):
|
1764 |
+
urn = self.dist(N, n, M, odds, 1e-12)
|
1765 |
+
return urn.probability(x)
|
1766 |
+
|
1767 |
+
return _pmf1(x, M, n, N, odds)
|
1768 |
+
|
1769 |
+
def _stats(self, M, n, N, odds, moments):
|
1770 |
+
|
1771 |
+
@np.vectorize
|
1772 |
+
def _moments1(M, n, N, odds):
|
1773 |
+
urn = self.dist(N, n, M, odds, 1e-12)
|
1774 |
+
return urn.moments()
|
1775 |
+
|
1776 |
+
m, v = (_moments1(M, n, N, odds) if ("m" in moments or "v" in moments)
|
1777 |
+
else (None, None))
|
1778 |
+
s, k = None, None
|
1779 |
+
return m, v, s, k
|
1780 |
+
|
1781 |
+
|
1782 |
+
class nchypergeom_fisher_gen(_nchypergeom_gen):
|
1783 |
+
r"""A Fisher's noncentral hypergeometric discrete random variable.
|
1784 |
+
|
1785 |
+
Fisher's noncentral hypergeometric distribution models drawing objects of
|
1786 |
+
two types from a bin. `M` is the total number of objects, `n` is the
|
1787 |
+
number of Type I objects, and `odds` is the odds ratio: the odds of
|
1788 |
+
selecting a Type I object rather than a Type II object when there is only
|
1789 |
+
one object of each type.
|
1790 |
+
The random variate represents the number of Type I objects drawn if we
|
1791 |
+
take a handful of objects from the bin at once and find out afterwards
|
1792 |
+
that we took `N` objects.
|
1793 |
+
|
1794 |
+
%(before_notes)s
|
1795 |
+
|
1796 |
+
See Also
|
1797 |
+
--------
|
1798 |
+
nchypergeom_wallenius, hypergeom, nhypergeom
|
1799 |
+
|
1800 |
+
Notes
|
1801 |
+
-----
|
1802 |
+
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
|
1803 |
+
with parameters `N`, `n`, and `M` (respectively) as defined above.
|
1804 |
+
|
1805 |
+
The probability mass function is defined as
|
1806 |
+
|
1807 |
+
.. math::
|
1808 |
+
|
1809 |
+
p(x; M, n, N, \omega) =
|
1810 |
+
\frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0},
|
1811 |
+
|
1812 |
+
for
|
1813 |
+
:math:`x \in [x_l, x_u]`,
|
1814 |
+
:math:`M \in {\mathbb N}`,
|
1815 |
+
:math:`n \in [0, M]`,
|
1816 |
+
:math:`N \in [0, M]`,
|
1817 |
+
:math:`\omega > 0`,
|
1818 |
+
where
|
1819 |
+
:math:`x_l = \max(0, N - (M - n))`,
|
1820 |
+
:math:`x_u = \min(N, n)`,
|
1821 |
+
|
1822 |
+
.. math::
|
1823 |
+
|
1824 |
+
P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y,
|
1825 |
+
|
1826 |
+
and the binomial coefficients are defined as
|
1827 |
+
|
1828 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
1829 |
+
|
1830 |
+
`nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with
|
1831 |
+
permission for it to be distributed under SciPy's license.
|
1832 |
+
|
1833 |
+
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
|
1834 |
+
universally accepted; they are chosen for consistency with `hypergeom`.
|
1835 |
+
|
1836 |
+
Note that Fisher's noncentral hypergeometric distribution is distinct
|
1837 |
+
from Wallenius' noncentral hypergeometric distribution, which models
|
1838 |
+
drawing a pre-determined `N` objects from a bin one by one.
|
1839 |
+
When the odds ratio is unity, however, both distributions reduce to the
|
1840 |
+
ordinary hypergeometric distribution.
|
1841 |
+
|
1842 |
+
%(after_notes)s
|
1843 |
+
|
1844 |
+
References
|
1845 |
+
----------
|
1846 |
+
.. [1] Agner Fog, "Biased Urn Theory".
|
1847 |
+
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
|
1848 |
+
|
1849 |
+
.. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia,
|
1850 |
+
https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution
|
1851 |
+
|
1852 |
+
%(example)s
|
1853 |
+
|
1854 |
+
"""
|
1855 |
+
|
1856 |
+
rvs_name = "rvs_fisher"
|
1857 |
+
dist = _PyFishersNCHypergeometric
|
1858 |
+
|
1859 |
+
|
1860 |
+
nchypergeom_fisher = nchypergeom_fisher_gen(
|
1861 |
+
name='nchypergeom_fisher',
|
1862 |
+
longname="A Fisher's noncentral hypergeometric")
|
1863 |
+
|
1864 |
+
|
1865 |
+
class nchypergeom_wallenius_gen(_nchypergeom_gen):
|
1866 |
+
r"""A Wallenius' noncentral hypergeometric discrete random variable.
|
1867 |
+
|
1868 |
+
Wallenius' noncentral hypergeometric distribution models drawing objects of
|
1869 |
+
two types from a bin. `M` is the total number of objects, `n` is the
|
1870 |
+
number of Type I objects, and `odds` is the odds ratio: the odds of
|
1871 |
+
selecting a Type I object rather than a Type II object when there is only
|
1872 |
+
one object of each type.
|
1873 |
+
The random variate represents the number of Type I objects drawn if we
|
1874 |
+
draw a pre-determined `N` objects from a bin one by one.
|
1875 |
+
|
1876 |
+
%(before_notes)s
|
1877 |
+
|
1878 |
+
See Also
|
1879 |
+
--------
|
1880 |
+
nchypergeom_fisher, hypergeom, nhypergeom
|
1881 |
+
|
1882 |
+
Notes
|
1883 |
+
-----
|
1884 |
+
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
|
1885 |
+
with parameters `N`, `n`, and `M` (respectively) as defined above.
|
1886 |
+
|
1887 |
+
The probability mass function is defined as
|
1888 |
+
|
1889 |
+
.. math::
|
1890 |
+
|
1891 |
+
p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x}
|
1892 |
+
\int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt
|
1893 |
+
|
1894 |
+
for
|
1895 |
+
:math:`x \in [x_l, x_u]`,
|
1896 |
+
:math:`M \in {\mathbb N}`,
|
1897 |
+
:math:`n \in [0, M]`,
|
1898 |
+
:math:`N \in [0, M]`,
|
1899 |
+
:math:`\omega > 0`,
|
1900 |
+
where
|
1901 |
+
:math:`x_l = \max(0, N - (M - n))`,
|
1902 |
+
:math:`x_u = \min(N, n)`,
|
1903 |
+
|
1904 |
+
.. math::
|
1905 |
+
|
1906 |
+
D = \omega(n - x) + ((M - n)-(N-x)),
|
1907 |
+
|
1908 |
+
and the binomial coefficients are defined as
|
1909 |
+
|
1910 |
+
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
|
1911 |
+
|
1912 |
+
`nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with
|
1913 |
+
permission for it to be distributed under SciPy's license.
|
1914 |
+
|
1915 |
+
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
|
1916 |
+
universally accepted; they are chosen for consistency with `hypergeom`.
|
1917 |
+
|
1918 |
+
Note that Wallenius' noncentral hypergeometric distribution is distinct
|
1919 |
+
from Fisher's noncentral hypergeometric distribution, which models
|
1920 |
+
take a handful of objects from the bin at once, finding out afterwards
|
1921 |
+
that `N` objects were taken.
|
1922 |
+
When the odds ratio is unity, however, both distributions reduce to the
|
1923 |
+
ordinary hypergeometric distribution.
|
1924 |
+
|
1925 |
+
%(after_notes)s
|
1926 |
+
|
1927 |
+
References
|
1928 |
+
----------
|
1929 |
+
.. [1] Agner Fog, "Biased Urn Theory".
|
1930 |
+
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
|
1931 |
+
|
1932 |
+
.. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia,
|
1933 |
+
https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution
|
1934 |
+
|
1935 |
+
%(example)s
|
1936 |
+
|
1937 |
+
"""
|
1938 |
+
|
1939 |
+
rvs_name = "rvs_wallenius"
|
1940 |
+
dist = _PyWalleniusNCHypergeometric
|
1941 |
+
|
1942 |
+
|
1943 |
+
nchypergeom_wallenius = nchypergeom_wallenius_gen(
|
1944 |
+
name='nchypergeom_wallenius',
|
1945 |
+
longname="A Wallenius' noncentral hypergeometric")
|
1946 |
+
|
1947 |
+
|
1948 |
+
# Collect names of classes and objects in this module.
|
1949 |
+
pairs = list(globals().copy().items())
|
1950 |
+
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
|
1951 |
+
|
1952 |
+
__all__ = _distn_names + _distn_gen_names
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_distr_params.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Sane parameters for stats.distributions.
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
distcont = [
|
7 |
+
['alpha', (3.5704770516650459,)],
|
8 |
+
['anglit', ()],
|
9 |
+
['arcsine', ()],
|
10 |
+
['argus', (1.0,)],
|
11 |
+
['beta', (2.3098496451481823, 0.62687954300963677)],
|
12 |
+
['betaprime', (5, 6)],
|
13 |
+
['bradford', (0.29891359763170633,)],
|
14 |
+
['burr', (10.5, 4.3)],
|
15 |
+
['burr12', (10, 4)],
|
16 |
+
['cauchy', ()],
|
17 |
+
['chi', (78,)],
|
18 |
+
['chi2', (55,)],
|
19 |
+
['cosine', ()],
|
20 |
+
['crystalball', (2.0, 3.0)],
|
21 |
+
['dgamma', (1.1023326088288166,)],
|
22 |
+
['dweibull', (2.0685080649914673,)],
|
23 |
+
['erlang', (10,)],
|
24 |
+
['expon', ()],
|
25 |
+
['exponnorm', (1.5,)],
|
26 |
+
['exponpow', (2.697119160358469,)],
|
27 |
+
['exponweib', (2.8923945291034436, 1.9505288745913174)],
|
28 |
+
['f', (29, 18)],
|
29 |
+
['fatiguelife', (29,)], # correction numargs = 1
|
30 |
+
['fisk', (3.0857548622253179,)],
|
31 |
+
['foldcauchy', (4.7164673455831894,)],
|
32 |
+
['foldnorm', (1.9521253373555869,)],
|
33 |
+
['gamma', (1.9932305483800778,)],
|
34 |
+
['gausshyper', (13.763771604130699, 3.1189636648681431,
|
35 |
+
2.5145980350183019, 5.1811649903971615)], # veryslow
|
36 |
+
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
|
37 |
+
['genextreme', (-0.1,)],
|
38 |
+
['gengamma', (4.4162385429431925, 3.1193091679242761)],
|
39 |
+
['gengamma', (4.4162385429431925, -3.1193091679242761)],
|
40 |
+
['genhalflogistic', (0.77274727809929322,)],
|
41 |
+
['genhyperbolic', (0.5, 1.5, -0.5,)],
|
42 |
+
['geninvgauss', (2.3, 1.5)],
|
43 |
+
['genlogistic', (0.41192440799679475,)],
|
44 |
+
['gennorm', (1.2988442399460265,)],
|
45 |
+
['halfgennorm', (0.6748054997000371,)],
|
46 |
+
['genpareto', (0.1,)], # use case with finite moments
|
47 |
+
['gibrat', ()],
|
48 |
+
['gompertz', (0.94743713075105251,)],
|
49 |
+
['gumbel_l', ()],
|
50 |
+
['gumbel_r', ()],
|
51 |
+
['halfcauchy', ()],
|
52 |
+
['halflogistic', ()],
|
53 |
+
['halfnorm', ()],
|
54 |
+
['hypsecant', ()],
|
55 |
+
['invgamma', (4.0668996136993067,)],
|
56 |
+
['invgauss', (0.14546264555347513,)],
|
57 |
+
['invweibull', (10.58,)],
|
58 |
+
['jf_skew_t', (8, 4)],
|
59 |
+
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
|
60 |
+
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
|
61 |
+
['kappa4', (0.0, 0.0)],
|
62 |
+
['kappa4', (-0.1, 0.1)],
|
63 |
+
['kappa4', (0.0, 0.1)],
|
64 |
+
['kappa4', (0.1, 0.0)],
|
65 |
+
['kappa3', (1.0,)],
|
66 |
+
['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956
|
67 |
+
['kstwo', (10,)],
|
68 |
+
['kstwobign', ()],
|
69 |
+
['laplace', ()],
|
70 |
+
['laplace_asymmetric', (2,)],
|
71 |
+
['levy', ()],
|
72 |
+
['levy_l', ()],
|
73 |
+
['levy_stable', (1.8, -0.5)],
|
74 |
+
['loggamma', (0.41411931826052117,)],
|
75 |
+
['logistic', ()],
|
76 |
+
['loglaplace', (3.2505926592051435,)],
|
77 |
+
['lognorm', (0.95368226960575331,)],
|
78 |
+
['loguniform', (0.01, 1.25)],
|
79 |
+
['lomax', (1.8771398388773268,)],
|
80 |
+
['maxwell', ()],
|
81 |
+
['mielke', (10.4, 4.6)],
|
82 |
+
['moyal', ()],
|
83 |
+
['nakagami', (4.9673794866666237,)],
|
84 |
+
['ncf', (27, 27, 0.41578441799226107)],
|
85 |
+
['nct', (14, 0.24045031331198066)],
|
86 |
+
['ncx2', (21, 1.0560465975116415)],
|
87 |
+
['norm', ()],
|
88 |
+
['norminvgauss', (1.25, 0.5)],
|
89 |
+
['pareto', (2.621716532144454,)],
|
90 |
+
['pearson3', (0.1,)],
|
91 |
+
['pearson3', (-2,)],
|
92 |
+
['powerlaw', (1.6591133289905851,)],
|
93 |
+
['powerlaw', (0.6591133289905851,)],
|
94 |
+
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
|
95 |
+
['powernorm', (4.4453652254590779,)],
|
96 |
+
['rayleigh', ()],
|
97 |
+
['rdist', (1.6,)],
|
98 |
+
['recipinvgauss', (0.63004267809369119,)],
|
99 |
+
['reciprocal', (0.01, 1.25)],
|
100 |
+
['rel_breitwigner', (36.545206797050334, )],
|
101 |
+
['rice', (0.7749725210111873,)],
|
102 |
+
['semicircular', ()],
|
103 |
+
['skewcauchy', (0.5,)],
|
104 |
+
['skewnorm', (4.0,)],
|
105 |
+
['studentized_range', (3.0, 10.0)],
|
106 |
+
['t', (2.7433514990818093,)],
|
107 |
+
['trapezoid', (0.2, 0.8)],
|
108 |
+
['triang', (0.15785029824528218,)],
|
109 |
+
['truncexpon', (4.6907725456810478,)],
|
110 |
+
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
|
111 |
+
['truncnorm', (0.1, 2.)],
|
112 |
+
['truncpareto', (1.8, 5.3)],
|
113 |
+
['truncpareto', (2, 5)],
|
114 |
+
['truncweibull_min', (2.5, 0.25, 1.75)],
|
115 |
+
['tukeylambda', (3.1321477856738267,)],
|
116 |
+
['uniform', ()],
|
117 |
+
['vonmises', (3.9939042581071398,)],
|
118 |
+
['vonmises_line', (3.9939042581071398,)],
|
119 |
+
['wald', ()],
|
120 |
+
['weibull_max', (2.8687961709100187,)],
|
121 |
+
['weibull_min', (1.7866166930421596,)],
|
122 |
+
['wrapcauchy', (0.031071279018614728,)]]
|
123 |
+
|
124 |
+
|
125 |
+
distdiscrete = [
|
126 |
+
['bernoulli',(0.3,)],
|
127 |
+
['betabinom', (5, 2.3, 0.63)],
|
128 |
+
['betanbinom', (5, 9.3, 1)],
|
129 |
+
['binom', (5, 0.4)],
|
130 |
+
['boltzmann',(1.4, 19)],
|
131 |
+
['dlaplace', (0.8,)], # 0.5
|
132 |
+
['geom', (0.5,)],
|
133 |
+
['hypergeom',(30, 12, 6)],
|
134 |
+
['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921
|
135 |
+
['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921
|
136 |
+
['nchypergeom_fisher', (140, 80, 60, 0.5)],
|
137 |
+
['nchypergeom_wallenius', (140, 80, 60, 0.5)],
|
138 |
+
['logser', (0.6,)], # re-enabled, numpy ticket:921
|
139 |
+
['nbinom', (0.4, 0.4)], # from tickets: 583
|
140 |
+
['nbinom', (5, 0.5)],
|
141 |
+
['planck', (0.51,)], # 4.1
|
142 |
+
['poisson', (0.6,)],
|
143 |
+
['randint', (7, 31)],
|
144 |
+
['skellam', (15, 8)],
|
145 |
+
['zipf', (6.5,)],
|
146 |
+
['zipfian', (0.75, 15)],
|
147 |
+
['zipfian', (1.25, 10)],
|
148 |
+
['yulesimon', (11.0,)],
|
149 |
+
['nhypergeom', (20, 7, 1)]
|
150 |
+
]
|
151 |
+
|
152 |
+
|
153 |
+
invdistdiscrete = [
|
154 |
+
# In each of the following, at least one shape parameter is invalid
|
155 |
+
['hypergeom', (3, 3, 4)],
|
156 |
+
['nhypergeom', (5, 2, 8)],
|
157 |
+
['nchypergeom_fisher', (3, 3, 4, 1)],
|
158 |
+
['nchypergeom_wallenius', (3, 3, 4, 1)],
|
159 |
+
['bernoulli', (1.5, )],
|
160 |
+
['binom', (10, 1.5)],
|
161 |
+
['betabinom', (10, -0.4, -0.5)],
|
162 |
+
['betanbinom', (10, -0.4, -0.5)],
|
163 |
+
['boltzmann', (-1, 4)],
|
164 |
+
['dlaplace', (-0.5, )],
|
165 |
+
['geom', (1.5, )],
|
166 |
+
['logser', (1.5, )],
|
167 |
+
['nbinom', (10, 1.5)],
|
168 |
+
['planck', (-0.5, )],
|
169 |
+
['poisson', (-0.5, )],
|
170 |
+
['randint', (5, 2)],
|
171 |
+
['skellam', (-5, -2)],
|
172 |
+
['zipf', (-2, )],
|
173 |
+
['yulesimon', (-2, )],
|
174 |
+
['zipfian', (-0.75, 15)]
|
175 |
+
]
|
176 |
+
|
177 |
+
|
178 |
+
invdistcont = [
|
179 |
+
# In each of the following, at least one shape parameter is invalid
|
180 |
+
['alpha', (-1, )],
|
181 |
+
['anglit', ()],
|
182 |
+
['arcsine', ()],
|
183 |
+
['argus', (-1, )],
|
184 |
+
['beta', (-2, 2)],
|
185 |
+
['betaprime', (-2, 2)],
|
186 |
+
['bradford', (-1, )],
|
187 |
+
['burr', (-1, 1)],
|
188 |
+
['burr12', (-1, 1)],
|
189 |
+
['cauchy', ()],
|
190 |
+
['chi', (-1, )],
|
191 |
+
['chi2', (-1, )],
|
192 |
+
['cosine', ()],
|
193 |
+
['crystalball', (-1, 2)],
|
194 |
+
['dgamma', (-1, )],
|
195 |
+
['dweibull', (-1, )],
|
196 |
+
['erlang', (-1, )],
|
197 |
+
['expon', ()],
|
198 |
+
['exponnorm', (-1, )],
|
199 |
+
['exponweib', (1, -1)],
|
200 |
+
['exponpow', (-1, )],
|
201 |
+
['f', (10, -10)],
|
202 |
+
['fatiguelife', (-1, )],
|
203 |
+
['fisk', (-1, )],
|
204 |
+
['foldcauchy', (-1, )],
|
205 |
+
['foldnorm', (-1, )],
|
206 |
+
['genlogistic', (-1, )],
|
207 |
+
['gennorm', (-1, )],
|
208 |
+
['genpareto', (np.inf, )],
|
209 |
+
['genexpon', (1, 2, -3)],
|
210 |
+
['genextreme', (np.inf, )],
|
211 |
+
['genhyperbolic', (0.5, -0.5, -1.5,)],
|
212 |
+
['gausshyper', (1, 2, 3, -4)],
|
213 |
+
['gamma', (-1, )],
|
214 |
+
['gengamma', (-1, 0)],
|
215 |
+
['genhalflogistic', (-1, )],
|
216 |
+
['geninvgauss', (1, 0)],
|
217 |
+
['gibrat', ()],
|
218 |
+
['gompertz', (-1, )],
|
219 |
+
['gumbel_r', ()],
|
220 |
+
['gumbel_l', ()],
|
221 |
+
['halfcauchy', ()],
|
222 |
+
['halflogistic', ()],
|
223 |
+
['halfnorm', ()],
|
224 |
+
['halfgennorm', (-1, )],
|
225 |
+
['hypsecant', ()],
|
226 |
+
['invgamma', (-1, )],
|
227 |
+
['invgauss', (-1, )],
|
228 |
+
['invweibull', (-1, )],
|
229 |
+
['jf_skew_t', (-1, 0)],
|
230 |
+
['johnsonsb', (1, -2)],
|
231 |
+
['johnsonsu', (1, -2)],
|
232 |
+
['kappa4', (np.nan, 0)],
|
233 |
+
['kappa3', (-1, )],
|
234 |
+
['ksone', (-1, )],
|
235 |
+
['kstwo', (-1, )],
|
236 |
+
['kstwobign', ()],
|
237 |
+
['laplace', ()],
|
238 |
+
['laplace_asymmetric', (-1, )],
|
239 |
+
['levy', ()],
|
240 |
+
['levy_l', ()],
|
241 |
+
['levy_stable', (-1, 1)],
|
242 |
+
['logistic', ()],
|
243 |
+
['loggamma', (-1, )],
|
244 |
+
['loglaplace', (-1, )],
|
245 |
+
['lognorm', (-1, )],
|
246 |
+
['loguniform', (10, 5)],
|
247 |
+
['lomax', (-1, )],
|
248 |
+
['maxwell', ()],
|
249 |
+
['mielke', (1, -2)],
|
250 |
+
['moyal', ()],
|
251 |
+
['nakagami', (-1, )],
|
252 |
+
['ncx2', (-1, 2)],
|
253 |
+
['ncf', (10, 20, -1)],
|
254 |
+
['nct', (-1, 2)],
|
255 |
+
['norm', ()],
|
256 |
+
['norminvgauss', (5, -10)],
|
257 |
+
['pareto', (-1, )],
|
258 |
+
['pearson3', (np.nan, )],
|
259 |
+
['powerlaw', (-1, )],
|
260 |
+
['powerlognorm', (1, -2)],
|
261 |
+
['powernorm', (-1, )],
|
262 |
+
['rdist', (-1, )],
|
263 |
+
['rayleigh', ()],
|
264 |
+
['rice', (-1, )],
|
265 |
+
['recipinvgauss', (-1, )],
|
266 |
+
['semicircular', ()],
|
267 |
+
['skewnorm', (np.inf, )],
|
268 |
+
['studentized_range', (-1, 1)],
|
269 |
+
['rel_breitwigner', (-2, )],
|
270 |
+
['t', (-1, )],
|
271 |
+
['trapezoid', (0, 2)],
|
272 |
+
['triang', (2, )],
|
273 |
+
['truncexpon', (-1, )],
|
274 |
+
['truncnorm', (10, 5)],
|
275 |
+
['truncpareto', (-1, 5)],
|
276 |
+
['truncpareto', (1.8, .5)],
|
277 |
+
['truncweibull_min', (-2.5, 0.25, 1.75)],
|
278 |
+
['tukeylambda', (np.nan, )],
|
279 |
+
['uniform', ()],
|
280 |
+
['vonmises', (-1, )],
|
281 |
+
['vonmises_line', (-1, )],
|
282 |
+
['wald', ()],
|
283 |
+
['weibull_min', (-1, )],
|
284 |
+
['weibull_max', (-1, )],
|
285 |
+
['wrapcauchy', (2, )],
|
286 |
+
['reciprocal', (15, 10)],
|
287 |
+
['skewcauchy', (2, )]
|
288 |
+
]
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_fit.py
ADDED
@@ -0,0 +1,1351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
from collections import namedtuple
|
3 |
+
import numpy as np
|
4 |
+
from scipy import optimize, stats
|
5 |
+
from scipy._lib._util import check_random_state
|
6 |
+
|
7 |
+
|
8 |
+
def _combine_bounds(name, user_bounds, shape_domain, integral):
|
9 |
+
"""Intersection of user-defined bounds and distribution PDF/PMF domain"""
|
10 |
+
|
11 |
+
user_bounds = np.atleast_1d(user_bounds)
|
12 |
+
|
13 |
+
if user_bounds[0] > user_bounds[1]:
|
14 |
+
message = (f"There are no values for `{name}` on the interval "
|
15 |
+
f"{list(user_bounds)}.")
|
16 |
+
raise ValueError(message)
|
17 |
+
|
18 |
+
bounds = (max(user_bounds[0], shape_domain[0]),
|
19 |
+
min(user_bounds[1], shape_domain[1]))
|
20 |
+
|
21 |
+
if integral and (np.ceil(bounds[0]) > np.floor(bounds[1])):
|
22 |
+
message = (f"There are no integer values for `{name}` on the interval "
|
23 |
+
f"defined by the user-provided bounds and the domain "
|
24 |
+
"of the distribution.")
|
25 |
+
raise ValueError(message)
|
26 |
+
elif not integral and (bounds[0] > bounds[1]):
|
27 |
+
message = (f"There are no values for `{name}` on the interval "
|
28 |
+
f"defined by the user-provided bounds and the domain "
|
29 |
+
"of the distribution.")
|
30 |
+
raise ValueError(message)
|
31 |
+
|
32 |
+
if not np.all(np.isfinite(bounds)):
|
33 |
+
message = (f"The intersection of user-provided bounds for `{name}` "
|
34 |
+
f"and the domain of the distribution is not finite. Please "
|
35 |
+
f"provide finite bounds for shape `{name}` in `bounds`.")
|
36 |
+
raise ValueError(message)
|
37 |
+
|
38 |
+
return bounds
|
39 |
+
|
40 |
+
|
41 |
+
class FitResult:
|
42 |
+
r"""Result of fitting a discrete or continuous distribution to data
|
43 |
+
|
44 |
+
Attributes
|
45 |
+
----------
|
46 |
+
params : namedtuple
|
47 |
+
A namedtuple containing the maximum likelihood estimates of the
|
48 |
+
shape parameters, location, and (if applicable) scale of the
|
49 |
+
distribution.
|
50 |
+
success : bool or None
|
51 |
+
Whether the optimizer considered the optimization to terminate
|
52 |
+
successfully or not.
|
53 |
+
message : str or None
|
54 |
+
Any status message provided by the optimizer.
|
55 |
+
|
56 |
+
"""
|
57 |
+
|
58 |
+
def __init__(self, dist, data, discrete, res):
|
59 |
+
self._dist = dist
|
60 |
+
self._data = data
|
61 |
+
self.discrete = discrete
|
62 |
+
self.pxf = getattr(dist, "pmf", None) or getattr(dist, "pdf", None)
|
63 |
+
|
64 |
+
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
|
65 |
+
if not discrete:
|
66 |
+
FitParams = namedtuple('FitParams', shape_names + ['loc', 'scale'])
|
67 |
+
else:
|
68 |
+
FitParams = namedtuple('FitParams', shape_names + ['loc'])
|
69 |
+
|
70 |
+
self.params = FitParams(*res.x)
|
71 |
+
|
72 |
+
# Optimizer can report success even when nllf is infinite
|
73 |
+
if res.success and not np.isfinite(self.nllf()):
|
74 |
+
res.success = False
|
75 |
+
res.message = ("Optimization converged to parameter values that "
|
76 |
+
"are inconsistent with the data.")
|
77 |
+
self.success = getattr(res, "success", None)
|
78 |
+
self.message = getattr(res, "message", None)
|
79 |
+
|
80 |
+
def __repr__(self):
|
81 |
+
keys = ["params", "success", "message"]
|
82 |
+
m = max(map(len, keys)) + 1
|
83 |
+
return '\n'.join([key.rjust(m) + ': ' + repr(getattr(self, key))
|
84 |
+
for key in keys if getattr(self, key) is not None])
|
85 |
+
|
86 |
+
def nllf(self, params=None, data=None):
|
87 |
+
"""Negative log-likelihood function
|
88 |
+
|
89 |
+
Evaluates the negative of the log-likelihood function of the provided
|
90 |
+
data at the provided parameters.
|
91 |
+
|
92 |
+
Parameters
|
93 |
+
----------
|
94 |
+
params : tuple, optional
|
95 |
+
The shape parameters, location, and (if applicable) scale of the
|
96 |
+
distribution as a single tuple. Default is the maximum likelihood
|
97 |
+
estimates (``self.params``).
|
98 |
+
data : array_like, optional
|
99 |
+
The data for which the log-likelihood function is to be evaluated.
|
100 |
+
Default is the data to which the distribution was fit.
|
101 |
+
|
102 |
+
Returns
|
103 |
+
-------
|
104 |
+
nllf : float
|
105 |
+
The negative of the log-likelihood function.
|
106 |
+
|
107 |
+
"""
|
108 |
+
params = params if params is not None else self.params
|
109 |
+
data = data if data is not None else self._data
|
110 |
+
return self._dist.nnlf(theta=params, x=data)
|
111 |
+
|
112 |
+
def plot(self, ax=None, *, plot_type="hist"):
|
113 |
+
"""Visually compare the data against the fitted distribution.
|
114 |
+
|
115 |
+
Available only if `matplotlib` is installed.
|
116 |
+
|
117 |
+
Parameters
|
118 |
+
----------
|
119 |
+
ax : `matplotlib.axes.Axes`
|
120 |
+
Axes object to draw the plot onto, otherwise uses the current Axes.
|
121 |
+
plot_type : {"hist", "qq", "pp", "cdf"}
|
122 |
+
Type of plot to draw. Options include:
|
123 |
+
|
124 |
+
- "hist": Superposes the PDF/PMF of the fitted distribution
|
125 |
+
over a normalized histogram of the data.
|
126 |
+
- "qq": Scatter plot of theoretical quantiles against the
|
127 |
+
empirical quantiles. Specifically, the x-coordinates are the
|
128 |
+
values of the fitted distribution PPF evaluated at the
|
129 |
+
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is the
|
130 |
+
number of data points, and the y-coordinates are the sorted
|
131 |
+
data points.
|
132 |
+
- "pp": Scatter plot of theoretical percentiles against the
|
133 |
+
observed percentiles. Specifically, the x-coordinates are the
|
134 |
+
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
|
135 |
+
the number of data points, and the y-coordinates are the values
|
136 |
+
of the fitted distribution CDF evaluated at the sorted
|
137 |
+
data points.
|
138 |
+
- "cdf": Superposes the CDF of the fitted distribution over the
|
139 |
+
empirical CDF. Specifically, the x-coordinates of the empirical
|
140 |
+
CDF are the sorted data points, and the y-coordinates are the
|
141 |
+
percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
|
142 |
+
the number of data points.
|
143 |
+
|
144 |
+
Returns
|
145 |
+
-------
|
146 |
+
ax : `matplotlib.axes.Axes`
|
147 |
+
The matplotlib Axes object on which the plot was drawn.
|
148 |
+
|
149 |
+
Examples
|
150 |
+
--------
|
151 |
+
>>> import numpy as np
|
152 |
+
>>> from scipy import stats
|
153 |
+
>>> import matplotlib.pyplot as plt # matplotlib must be installed
|
154 |
+
>>> rng = np.random.default_rng()
|
155 |
+
>>> data = stats.nbinom(5, 0.5).rvs(size=1000, random_state=rng)
|
156 |
+
>>> bounds = [(0, 30), (0, 1)]
|
157 |
+
>>> res = stats.fit(stats.nbinom, data, bounds)
|
158 |
+
>>> ax = res.plot() # save matplotlib Axes object
|
159 |
+
|
160 |
+
The `matplotlib.axes.Axes` object can be used to customize the plot.
|
161 |
+
See `matplotlib.axes.Axes` documentation for details.
|
162 |
+
|
163 |
+
>>> ax.set_xlabel('number of trials') # customize axis label
|
164 |
+
>>> ax.get_children()[0].set_linewidth(5) # customize line widths
|
165 |
+
>>> ax.legend()
|
166 |
+
>>> plt.show()
|
167 |
+
"""
|
168 |
+
try:
|
169 |
+
import matplotlib # noqa: F401
|
170 |
+
except ModuleNotFoundError as exc:
|
171 |
+
message = "matplotlib must be installed to use method `plot`."
|
172 |
+
raise ModuleNotFoundError(message) from exc
|
173 |
+
|
174 |
+
plots = {'histogram': self._hist_plot, 'qq': self._qq_plot,
|
175 |
+
'pp': self._pp_plot, 'cdf': self._cdf_plot,
|
176 |
+
'hist': self._hist_plot}
|
177 |
+
if plot_type.lower() not in plots:
|
178 |
+
message = f"`plot_type` must be one of {set(plots.keys())}"
|
179 |
+
raise ValueError(message)
|
180 |
+
plot = plots[plot_type.lower()]
|
181 |
+
|
182 |
+
if ax is None:
|
183 |
+
import matplotlib.pyplot as plt
|
184 |
+
ax = plt.gca()
|
185 |
+
|
186 |
+
fit_params = np.atleast_1d(self.params)
|
187 |
+
|
188 |
+
return plot(ax=ax, fit_params=fit_params)
|
189 |
+
|
190 |
+
def _hist_plot(self, ax, fit_params):
|
191 |
+
from matplotlib.ticker import MaxNLocator
|
192 |
+
|
193 |
+
support = self._dist.support(*fit_params)
|
194 |
+
lb = support[0] if np.isfinite(support[0]) else min(self._data)
|
195 |
+
ub = support[1] if np.isfinite(support[1]) else max(self._data)
|
196 |
+
pxf = "PMF" if self.discrete else "PDF"
|
197 |
+
|
198 |
+
if self.discrete:
|
199 |
+
x = np.arange(lb, ub + 2)
|
200 |
+
y = self.pxf(x, *fit_params)
|
201 |
+
ax.vlines(x[:-1], 0, y[:-1], label='Fitted Distribution PMF',
|
202 |
+
color='C0')
|
203 |
+
options = dict(density=True, bins=x, align='left', color='C1')
|
204 |
+
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
205 |
+
ax.set_xlabel('k')
|
206 |
+
ax.set_ylabel('PMF')
|
207 |
+
else:
|
208 |
+
x = np.linspace(lb, ub, 200)
|
209 |
+
y = self.pxf(x, *fit_params)
|
210 |
+
ax.plot(x, y, '--', label='Fitted Distribution PDF', color='C0')
|
211 |
+
options = dict(density=True, bins=50, align='mid', color='C1')
|
212 |
+
ax.set_xlabel('x')
|
213 |
+
ax.set_ylabel('PDF')
|
214 |
+
|
215 |
+
if len(self._data) > 50 or self.discrete:
|
216 |
+
ax.hist(self._data, label="Histogram of Data", **options)
|
217 |
+
else:
|
218 |
+
ax.plot(self._data, np.zeros_like(self._data), "*",
|
219 |
+
label='Data', color='C1')
|
220 |
+
|
221 |
+
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {pxf} and Histogram")
|
222 |
+
ax.legend(*ax.get_legend_handles_labels())
|
223 |
+
return ax
|
224 |
+
|
225 |
+
def _qp_plot(self, ax, fit_params, qq):
|
226 |
+
data = np.sort(self._data)
|
227 |
+
ps = self._plotting_positions(len(self._data))
|
228 |
+
|
229 |
+
if qq:
|
230 |
+
qp = "Quantiles"
|
231 |
+
plot_type = 'Q-Q'
|
232 |
+
x = self._dist.ppf(ps, *fit_params)
|
233 |
+
y = data
|
234 |
+
else:
|
235 |
+
qp = "Percentiles"
|
236 |
+
plot_type = 'P-P'
|
237 |
+
x = ps
|
238 |
+
y = self._dist.cdf(data, *fit_params)
|
239 |
+
|
240 |
+
ax.plot(x, y, '.', label=f'Fitted Distribution {plot_type}',
|
241 |
+
color='C0', zorder=1)
|
242 |
+
xlim = ax.get_xlim()
|
243 |
+
ylim = ax.get_ylim()
|
244 |
+
lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])]
|
245 |
+
if not qq:
|
246 |
+
lim = max(lim[0], 0), min(lim[1], 1)
|
247 |
+
|
248 |
+
if self.discrete and qq:
|
249 |
+
q_min, q_max = int(lim[0]), int(lim[1]+1)
|
250 |
+
q_ideal = np.arange(q_min, q_max)
|
251 |
+
# q_ideal = np.unique(self._dist.ppf(ps, *fit_params))
|
252 |
+
ax.plot(q_ideal, q_ideal, 'o', label='Reference', color='k',
|
253 |
+
alpha=0.25, markerfacecolor='none', clip_on=True)
|
254 |
+
elif self.discrete and not qq:
|
255 |
+
# The intent of this is to match the plot that would be produced
|
256 |
+
# if x were continuous on [0, 1] and y were cdf(ppf(x)).
|
257 |
+
# It can be approximated by letting x = np.linspace(0, 1, 1000),
|
258 |
+
# but this might not look great when zooming in. The vertical
|
259 |
+
# portions are included to indicate where the transition occurs
|
260 |
+
# where the data completely obscures the horizontal portions.
|
261 |
+
p_min, p_max = lim
|
262 |
+
a, b = self._dist.support(*fit_params)
|
263 |
+
p_min = max(p_min, 0 if np.isfinite(a) else 1e-3)
|
264 |
+
p_max = min(p_max, 1 if np.isfinite(b) else 1-1e-3)
|
265 |
+
q_min, q_max = self._dist.ppf([p_min, p_max], *fit_params)
|
266 |
+
qs = np.arange(q_min-1, q_max+1)
|
267 |
+
ps = self._dist.cdf(qs, *fit_params)
|
268 |
+
ax.step(ps, ps, '-', label='Reference', color='k', alpha=0.25,
|
269 |
+
clip_on=True)
|
270 |
+
else:
|
271 |
+
ax.plot(lim, lim, '-', label='Reference', color='k', alpha=0.25,
|
272 |
+
clip_on=True)
|
273 |
+
|
274 |
+
ax.set_xlim(lim)
|
275 |
+
ax.set_ylim(lim)
|
276 |
+
ax.set_xlabel(rf"Fitted $\tt {self._dist.name}$ Theoretical {qp}")
|
277 |
+
ax.set_ylabel(f"Data {qp}")
|
278 |
+
ax.set_title(rf"Fitted $\tt {self._dist.name}$ {plot_type} Plot")
|
279 |
+
ax.legend(*ax.get_legend_handles_labels())
|
280 |
+
ax.set_aspect('equal')
|
281 |
+
return ax
|
282 |
+
|
283 |
+
def _qq_plot(self, **kwargs):
|
284 |
+
return self._qp_plot(qq=True, **kwargs)
|
285 |
+
|
286 |
+
def _pp_plot(self, **kwargs):
|
287 |
+
return self._qp_plot(qq=False, **kwargs)
|
288 |
+
|
289 |
+
def _plotting_positions(self, n, a=.5):
|
290 |
+
# See https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_positions
|
291 |
+
k = np.arange(1, n+1)
|
292 |
+
return (k-a) / (n + 1 - 2*a)
|
293 |
+
|
294 |
+
def _cdf_plot(self, ax, fit_params):
|
295 |
+
data = np.sort(self._data)
|
296 |
+
ecdf = self._plotting_positions(len(self._data))
|
297 |
+
ls = '--' if len(np.unique(data)) < 30 else '.'
|
298 |
+
xlabel = 'k' if self.discrete else 'x'
|
299 |
+
ax.step(data, ecdf, ls, label='Empirical CDF', color='C1', zorder=0)
|
300 |
+
|
301 |
+
xlim = ax.get_xlim()
|
302 |
+
q = np.linspace(*xlim, 300)
|
303 |
+
tcdf = self._dist.cdf(q, *fit_params)
|
304 |
+
|
305 |
+
ax.plot(q, tcdf, label='Fitted Distribution CDF', color='C0', zorder=1)
|
306 |
+
ax.set_xlim(xlim)
|
307 |
+
ax.set_ylim(0, 1)
|
308 |
+
ax.set_xlabel(xlabel)
|
309 |
+
ax.set_ylabel("CDF")
|
310 |
+
ax.set_title(rf"Fitted $\tt {self._dist.name}$ and Empirical CDF")
|
311 |
+
handles, labels = ax.get_legend_handles_labels()
|
312 |
+
ax.legend(handles[::-1], labels[::-1])
|
313 |
+
return ax
|
314 |
+
|
315 |
+
|
316 |
+
def fit(dist, data, bounds=None, *, guess=None, method='mle',
|
317 |
+
optimizer=optimize.differential_evolution):
|
318 |
+
r"""Fit a discrete or continuous distribution to data
|
319 |
+
|
320 |
+
Given a distribution, data, and bounds on the parameters of the
|
321 |
+
distribution, return maximum likelihood estimates of the parameters.
|
322 |
+
|
323 |
+
Parameters
|
324 |
+
----------
|
325 |
+
dist : `scipy.stats.rv_continuous` or `scipy.stats.rv_discrete`
|
326 |
+
The object representing the distribution to be fit to the data.
|
327 |
+
data : 1D array_like
|
328 |
+
The data to which the distribution is to be fit. If the data contain
|
329 |
+
any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
|
330 |
+
raise a ``ValueError``.
|
331 |
+
bounds : dict or sequence of tuples, optional
|
332 |
+
If a dictionary, each key is the name of a parameter of the
|
333 |
+
distribution, and the corresponding value is a tuple containing the
|
334 |
+
lower and upper bound on that parameter. If the distribution is
|
335 |
+
defined only for a finite range of values of that parameter, no entry
|
336 |
+
for that parameter is required; e.g., some distributions have
|
337 |
+
parameters which must be on the interval [0, 1]. Bounds for parameters
|
338 |
+
location (``loc``) and scale (``scale``) are optional; by default,
|
339 |
+
they are fixed to 0 and 1, respectively.
|
340 |
+
|
341 |
+
If a sequence, element *i* is a tuple containing the lower and upper
|
342 |
+
bound on the *i*\ th parameter of the distribution. In this case,
|
343 |
+
bounds for *all* distribution shape parameters must be provided.
|
344 |
+
Optionally, bounds for location and scale may follow the
|
345 |
+
distribution shape parameters.
|
346 |
+
|
347 |
+
If a shape is to be held fixed (e.g. if it is known), the
|
348 |
+
lower and upper bounds may be equal. If a user-provided lower or upper
|
349 |
+
bound is beyond a bound of the domain for which the distribution is
|
350 |
+
defined, the bound of the distribution's domain will replace the
|
351 |
+
user-provided value. Similarly, parameters which must be integral
|
352 |
+
will be constrained to integral values within the user-provided bounds.
|
353 |
+
guess : dict or array_like, optional
|
354 |
+
If a dictionary, each key is the name of a parameter of the
|
355 |
+
distribution, and the corresponding value is a guess for the value
|
356 |
+
of the parameter.
|
357 |
+
|
358 |
+
If a sequence, element *i* is a guess for the *i*\ th parameter of the
|
359 |
+
distribution. In this case, guesses for *all* distribution shape
|
360 |
+
parameters must be provided.
|
361 |
+
|
362 |
+
If `guess` is not provided, guesses for the decision variables will
|
363 |
+
not be passed to the optimizer. If `guess` is provided, guesses for
|
364 |
+
any missing parameters will be set at the mean of the lower and
|
365 |
+
upper bounds. Guesses for parameters which must be integral will be
|
366 |
+
rounded to integral values, and guesses that lie outside the
|
367 |
+
intersection of the user-provided bounds and the domain of the
|
368 |
+
distribution will be clipped.
|
369 |
+
method : {'mle', 'mse'}
|
370 |
+
With ``method="mle"`` (default), the fit is computed by minimizing
|
371 |
+
the negative log-likelihood function. A large, finite penalty
|
372 |
+
(rather than infinite negative log-likelihood) is applied for
|
373 |
+
observations beyond the support of the distribution.
|
374 |
+
With ``method="mse"``, the fit is computed by minimizing
|
375 |
+
the negative log-product spacing function. The same penalty is applied
|
376 |
+
for observations beyond the support. We follow the approach of [1]_,
|
377 |
+
which is generalized for samples with repeated observations.
|
378 |
+
optimizer : callable, optional
|
379 |
+
`optimizer` is a callable that accepts the following positional
|
380 |
+
argument.
|
381 |
+
|
382 |
+
fun : callable
|
383 |
+
The objective function to be optimized. `fun` accepts one argument
|
384 |
+
``x``, candidate shape parameters of the distribution, and returns
|
385 |
+
the objective function value given ``x``, `dist`, and the provided
|
386 |
+
`data`.
|
387 |
+
The job of `optimizer` is to find values of the decision variables
|
388 |
+
that minimizes `fun`.
|
389 |
+
|
390 |
+
`optimizer` must also accept the following keyword argument.
|
391 |
+
|
392 |
+
bounds : sequence of tuples
|
393 |
+
The bounds on values of the decision variables; each element will
|
394 |
+
be a tuple containing the lower and upper bound on a decision
|
395 |
+
variable.
|
396 |
+
|
397 |
+
If `guess` is provided, `optimizer` must also accept the following
|
398 |
+
keyword argument.
|
399 |
+
|
400 |
+
x0 : array_like
|
401 |
+
The guesses for each decision variable.
|
402 |
+
|
403 |
+
If the distribution has any shape parameters that must be integral or
|
404 |
+
if the distribution is discrete and the location parameter is not
|
405 |
+
fixed, `optimizer` must also accept the following keyword argument.
|
406 |
+
|
407 |
+
integrality : array_like of bools
|
408 |
+
For each decision variable, True if the decision variable
|
409 |
+
must be constrained to integer values and False if the decision
|
410 |
+
variable is continuous.
|
411 |
+
|
412 |
+
`optimizer` must return an object, such as an instance of
|
413 |
+
`scipy.optimize.OptimizeResult`, which holds the optimal values of
|
414 |
+
the decision variables in an attribute ``x``. If attributes
|
415 |
+
``fun``, ``status``, or ``message`` are provided, they will be
|
416 |
+
included in the result object returned by `fit`.
|
417 |
+
|
418 |
+
Returns
|
419 |
+
-------
|
420 |
+
result : `~scipy.stats._result_classes.FitResult`
|
421 |
+
An object with the following fields.
|
422 |
+
|
423 |
+
params : namedtuple
|
424 |
+
A namedtuple containing the maximum likelihood estimates of the
|
425 |
+
shape parameters, location, and (if applicable) scale of the
|
426 |
+
distribution.
|
427 |
+
success : bool or None
|
428 |
+
Whether the optimizer considered the optimization to terminate
|
429 |
+
successfully or not.
|
430 |
+
message : str or None
|
431 |
+
Any status message provided by the optimizer.
|
432 |
+
|
433 |
+
The object has the following method:
|
434 |
+
|
435 |
+
nllf(params=None, data=None)
|
436 |
+
By default, the negative log-likehood function at the fitted
|
437 |
+
`params` for the given `data`. Accepts a tuple containing
|
438 |
+
alternative shapes, location, and scale of the distribution and
|
439 |
+
an array of alternative data.
|
440 |
+
|
441 |
+
plot(ax=None)
|
442 |
+
Superposes the PDF/PMF of the fitted distribution over a normalized
|
443 |
+
histogram of the data.
|
444 |
+
|
445 |
+
See Also
|
446 |
+
--------
|
447 |
+
rv_continuous, rv_discrete
|
448 |
+
|
449 |
+
Notes
|
450 |
+
-----
|
451 |
+
Optimization is more likely to converge to the maximum likelihood estimate
|
452 |
+
when the user provides tight bounds containing the maximum likelihood
|
453 |
+
estimate. For example, when fitting a binomial distribution to data, the
|
454 |
+
number of experiments underlying each sample may be known, in which case
|
455 |
+
the corresponding shape parameter ``n`` can be fixed.
|
456 |
+
|
457 |
+
References
|
458 |
+
----------
|
459 |
+
.. [1] Shao, Yongzhao, and Marjorie G. Hahn. "Maximum product of spacings
|
460 |
+
method: a unified formulation with illustration of strong
|
461 |
+
consistency." Illinois Journal of Mathematics 43.3 (1999): 489-499.
|
462 |
+
|
463 |
+
Examples
|
464 |
+
--------
|
465 |
+
Suppose we wish to fit a distribution to the following data.
|
466 |
+
|
467 |
+
>>> import numpy as np
|
468 |
+
>>> from scipy import stats
|
469 |
+
>>> rng = np.random.default_rng()
|
470 |
+
>>> dist = stats.nbinom
|
471 |
+
>>> shapes = (5, 0.5)
|
472 |
+
>>> data = dist.rvs(*shapes, size=1000, random_state=rng)
|
473 |
+
|
474 |
+
Suppose we do not know how the data were generated, but we suspect that
|
475 |
+
it follows a negative binomial distribution with parameters *n* and *p*\.
|
476 |
+
(See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
|
477 |
+
than 30, and we know that the parameter *p* must lie on the interval
|
478 |
+
[0, 1]. We record this information in a variable `bounds` and pass
|
479 |
+
this information to `fit`.
|
480 |
+
|
481 |
+
>>> bounds = [(0, 30), (0, 1)]
|
482 |
+
>>> res = stats.fit(dist, data, bounds)
|
483 |
+
|
484 |
+
`fit` searches within the user-specified `bounds` for the
|
485 |
+
values that best match the data (in the sense of maximum likelihood
|
486 |
+
estimation). In this case, it found shape values similar to those
|
487 |
+
from which the data were actually generated.
|
488 |
+
|
489 |
+
>>> res.params
|
490 |
+
FitParams(n=5.0, p=0.5028157644634368, loc=0.0) # may vary
|
491 |
+
|
492 |
+
We can visualize the results by superposing the probability mass function
|
493 |
+
of the distribution (with the shapes fit to the data) over a normalized
|
494 |
+
histogram of the data.
|
495 |
+
|
496 |
+
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
|
497 |
+
>>> res.plot()
|
498 |
+
>>> plt.show()
|
499 |
+
|
500 |
+
Note that the estimate for *n* was exactly integral; this is because
|
501 |
+
the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
|
502 |
+
object "knows" that. `nbinom` also knows that the shape *p* must be a
|
503 |
+
value between 0 and 1. In such a case - when the domain of the distribution
|
504 |
+
with respect to a parameter is finite - we are not required to specify
|
505 |
+
bounds for the parameter.
|
506 |
+
|
507 |
+
>>> bounds = {'n': (0, 30)} # omit parameter p using a `dict`
|
508 |
+
>>> res2 = stats.fit(dist, data, bounds)
|
509 |
+
>>> res2.params
|
510 |
+
FitParams(n=5.0, p=0.5016492009232932, loc=0.0) # may vary
|
511 |
+
|
512 |
+
If we wish to force the distribution to be fit with *n* fixed at 6, we can
|
513 |
+
set both the lower and upper bounds on *n* to 6. Note, however, that the
|
514 |
+
value of the objective function being optimized is typically worse (higher)
|
515 |
+
in this case.
|
516 |
+
|
517 |
+
>>> bounds = {'n': (6, 6)} # fix parameter `n`
|
518 |
+
>>> res3 = stats.fit(dist, data, bounds)
|
519 |
+
>>> res3.params
|
520 |
+
FitParams(n=6.0, p=0.5486556076755706, loc=0.0) # may vary
|
521 |
+
>>> res3.nllf() > res.nllf()
|
522 |
+
True # may vary
|
523 |
+
|
524 |
+
Note that the numerical results of the previous examples are typical, but
|
525 |
+
they may vary because the default optimizer used by `fit`,
|
526 |
+
`scipy.optimize.differential_evolution`, is stochastic. However, we can
|
527 |
+
customize the settings used by the optimizer to ensure reproducibility -
|
528 |
+
or even use a different optimizer entirely - using the `optimizer`
|
529 |
+
parameter.
|
530 |
+
|
531 |
+
>>> from scipy.optimize import differential_evolution
|
532 |
+
>>> rng = np.random.default_rng(767585560716548)
|
533 |
+
>>> def optimizer(fun, bounds, *, integrality):
|
534 |
+
... return differential_evolution(fun, bounds, strategy='best2bin',
|
535 |
+
... seed=rng, integrality=integrality)
|
536 |
+
>>> bounds = [(0, 30), (0, 1)]
|
537 |
+
>>> res4 = stats.fit(dist, data, bounds, optimizer=optimizer)
|
538 |
+
>>> res4.params
|
539 |
+
FitParams(n=5.0, p=0.5015183149259951, loc=0.0)
|
540 |
+
|
541 |
+
"""
|
542 |
+
# --- Input Validation / Standardization --- #
|
543 |
+
user_bounds = bounds
|
544 |
+
user_guess = guess
|
545 |
+
|
546 |
+
# distribution input validation and information collection
|
547 |
+
if hasattr(dist, "pdf"): # can't use isinstance for types
|
548 |
+
default_bounds = {'loc': (0, 0), 'scale': (1, 1)}
|
549 |
+
discrete = False
|
550 |
+
elif hasattr(dist, "pmf"):
|
551 |
+
default_bounds = {'loc': (0, 0)}
|
552 |
+
discrete = True
|
553 |
+
else:
|
554 |
+
message = ("`dist` must be an instance of `rv_continuous` "
|
555 |
+
"or `rv_discrete.`")
|
556 |
+
raise ValueError(message)
|
557 |
+
|
558 |
+
try:
|
559 |
+
param_info = dist._param_info()
|
560 |
+
except AttributeError as e:
|
561 |
+
message = (f"Distribution `{dist.name}` is not yet supported by "
|
562 |
+
"`scipy.stats.fit` because shape information has "
|
563 |
+
"not been defined.")
|
564 |
+
raise ValueError(message) from e
|
565 |
+
|
566 |
+
# data input validation
|
567 |
+
data = np.asarray(data)
|
568 |
+
if data.ndim != 1:
|
569 |
+
message = "`data` must be exactly one-dimensional."
|
570 |
+
raise ValueError(message)
|
571 |
+
if not (np.issubdtype(data.dtype, np.number)
|
572 |
+
and np.all(np.isfinite(data))):
|
573 |
+
message = "All elements of `data` must be finite numbers."
|
574 |
+
raise ValueError(message)
|
575 |
+
|
576 |
+
# bounds input validation and information collection
|
577 |
+
n_params = len(param_info)
|
578 |
+
n_shapes = n_params - (1 if discrete else 2)
|
579 |
+
param_list = [param.name for param in param_info]
|
580 |
+
param_names = ", ".join(param_list)
|
581 |
+
shape_names = ", ".join(param_list[:n_shapes])
|
582 |
+
|
583 |
+
if user_bounds is None:
|
584 |
+
user_bounds = {}
|
585 |
+
|
586 |
+
if isinstance(user_bounds, dict):
|
587 |
+
default_bounds.update(user_bounds)
|
588 |
+
user_bounds = default_bounds
|
589 |
+
user_bounds_array = np.empty((n_params, 2))
|
590 |
+
for i in range(n_params):
|
591 |
+
param_name = param_info[i].name
|
592 |
+
user_bound = user_bounds.pop(param_name, None)
|
593 |
+
if user_bound is None:
|
594 |
+
user_bound = param_info[i].domain
|
595 |
+
user_bounds_array[i] = user_bound
|
596 |
+
if user_bounds:
|
597 |
+
message = ("Bounds provided for the following unrecognized "
|
598 |
+
f"parameters will be ignored: {set(user_bounds)}")
|
599 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
600 |
+
|
601 |
+
else:
|
602 |
+
try:
|
603 |
+
user_bounds = np.asarray(user_bounds, dtype=float)
|
604 |
+
if user_bounds.size == 0:
|
605 |
+
user_bounds = np.empty((0, 2))
|
606 |
+
except ValueError as e:
|
607 |
+
message = ("Each element of a `bounds` sequence must be a tuple "
|
608 |
+
"containing two elements: the lower and upper bound of "
|
609 |
+
"a distribution parameter.")
|
610 |
+
raise ValueError(message) from e
|
611 |
+
if (user_bounds.ndim != 2 or user_bounds.shape[1] != 2):
|
612 |
+
message = ("Each element of `bounds` must be a tuple specifying "
|
613 |
+
"the lower and upper bounds of a shape parameter")
|
614 |
+
raise ValueError(message)
|
615 |
+
if user_bounds.shape[0] < n_shapes:
|
616 |
+
message = (f"A `bounds` sequence must contain at least {n_shapes} "
|
617 |
+
"elements: tuples specifying the lower and upper "
|
618 |
+
f"bounds of all shape parameters {shape_names}.")
|
619 |
+
raise ValueError(message)
|
620 |
+
if user_bounds.shape[0] > n_params:
|
621 |
+
message = ("A `bounds` sequence may not contain more than "
|
622 |
+
f"{n_params} elements: tuples specifying the lower and "
|
623 |
+
"upper bounds of distribution parameters "
|
624 |
+
f"{param_names}.")
|
625 |
+
raise ValueError(message)
|
626 |
+
|
627 |
+
user_bounds_array = np.empty((n_params, 2))
|
628 |
+
user_bounds_array[n_shapes:] = list(default_bounds.values())
|
629 |
+
user_bounds_array[:len(user_bounds)] = user_bounds
|
630 |
+
|
631 |
+
user_bounds = user_bounds_array
|
632 |
+
validated_bounds = []
|
633 |
+
for i in range(n_params):
|
634 |
+
name = param_info[i].name
|
635 |
+
user_bound = user_bounds_array[i]
|
636 |
+
param_domain = param_info[i].domain
|
637 |
+
integral = param_info[i].integrality
|
638 |
+
combined = _combine_bounds(name, user_bound, param_domain, integral)
|
639 |
+
validated_bounds.append(combined)
|
640 |
+
|
641 |
+
bounds = np.asarray(validated_bounds)
|
642 |
+
integrality = [param.integrality for param in param_info]
|
643 |
+
|
644 |
+
# guess input validation
|
645 |
+
|
646 |
+
if user_guess is None:
|
647 |
+
guess_array = None
|
648 |
+
elif isinstance(user_guess, dict):
|
649 |
+
default_guess = {param.name: np.mean(bound)
|
650 |
+
for param, bound in zip(param_info, bounds)}
|
651 |
+
unrecognized = set(user_guess) - set(default_guess)
|
652 |
+
if unrecognized:
|
653 |
+
message = ("Guesses provided for the following unrecognized "
|
654 |
+
f"parameters will be ignored: {unrecognized}")
|
655 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
656 |
+
default_guess.update(user_guess)
|
657 |
+
|
658 |
+
message = ("Each element of `guess` must be a scalar "
|
659 |
+
"guess for a distribution parameter.")
|
660 |
+
try:
|
661 |
+
guess_array = np.asarray([default_guess[param.name]
|
662 |
+
for param in param_info], dtype=float)
|
663 |
+
except ValueError as e:
|
664 |
+
raise ValueError(message) from e
|
665 |
+
|
666 |
+
else:
|
667 |
+
message = ("Each element of `guess` must be a scalar "
|
668 |
+
"guess for a distribution parameter.")
|
669 |
+
try:
|
670 |
+
user_guess = np.asarray(user_guess, dtype=float)
|
671 |
+
except ValueError as e:
|
672 |
+
raise ValueError(message) from e
|
673 |
+
if user_guess.ndim != 1:
|
674 |
+
raise ValueError(message)
|
675 |
+
if user_guess.shape[0] < n_shapes:
|
676 |
+
message = (f"A `guess` sequence must contain at least {n_shapes} "
|
677 |
+
"elements: scalar guesses for the distribution shape "
|
678 |
+
f"parameters {shape_names}.")
|
679 |
+
raise ValueError(message)
|
680 |
+
if user_guess.shape[0] > n_params:
|
681 |
+
message = ("A `guess` sequence may not contain more than "
|
682 |
+
f"{n_params} elements: scalar guesses for the "
|
683 |
+
f"distribution parameters {param_names}.")
|
684 |
+
raise ValueError(message)
|
685 |
+
|
686 |
+
guess_array = np.mean(bounds, axis=1)
|
687 |
+
guess_array[:len(user_guess)] = user_guess
|
688 |
+
|
689 |
+
if guess_array is not None:
|
690 |
+
guess_rounded = guess_array.copy()
|
691 |
+
|
692 |
+
guess_rounded[integrality] = np.round(guess_rounded[integrality])
|
693 |
+
rounded = np.where(guess_rounded != guess_array)[0]
|
694 |
+
for i in rounded:
|
695 |
+
message = (f"Guess for parameter `{param_info[i].name}` "
|
696 |
+
f"rounded from {guess_array[i]} to {guess_rounded[i]}.")
|
697 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
698 |
+
|
699 |
+
guess_clipped = np.clip(guess_rounded, bounds[:, 0], bounds[:, 1])
|
700 |
+
clipped = np.where(guess_clipped != guess_rounded)[0]
|
701 |
+
for i in clipped:
|
702 |
+
message = (f"Guess for parameter `{param_info[i].name}` "
|
703 |
+
f"clipped from {guess_rounded[i]} to "
|
704 |
+
f"{guess_clipped[i]}.")
|
705 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
706 |
+
|
707 |
+
guess = guess_clipped
|
708 |
+
else:
|
709 |
+
guess = None
|
710 |
+
|
711 |
+
# --- Fitting --- #
|
712 |
+
def nllf(free_params, data=data): # bind data NOW
|
713 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
714 |
+
return dist._penalized_nnlf(free_params, data)
|
715 |
+
|
716 |
+
def nlpsf(free_params, data=data): # bind data NOW
|
717 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
718 |
+
return dist._penalized_nlpsf(free_params, data)
|
719 |
+
|
720 |
+
methods = {'mle': nllf, 'mse': nlpsf}
|
721 |
+
objective = methods[method.lower()]
|
722 |
+
|
723 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
724 |
+
kwds = {}
|
725 |
+
if bounds is not None:
|
726 |
+
kwds['bounds'] = bounds
|
727 |
+
if np.any(integrality):
|
728 |
+
kwds['integrality'] = integrality
|
729 |
+
if guess is not None:
|
730 |
+
kwds['x0'] = guess
|
731 |
+
res = optimizer(objective, **kwds)
|
732 |
+
|
733 |
+
return FitResult(dist, data, discrete, res)
|
734 |
+
|
735 |
+
|
736 |
+
GoodnessOfFitResult = namedtuple('GoodnessOfFitResult',
|
737 |
+
('fit_result', 'statistic', 'pvalue',
|
738 |
+
'null_distribution'))
|
739 |
+
|
740 |
+
|
741 |
+
def goodness_of_fit(dist, data, *, known_params=None, fit_params=None,
|
742 |
+
guessed_params=None, statistic='ad', n_mc_samples=9999,
|
743 |
+
random_state=None):
|
744 |
+
r"""
|
745 |
+
Perform a goodness of fit test comparing data to a distribution family.
|
746 |
+
|
747 |
+
Given a distribution family and data, perform a test of the null hypothesis
|
748 |
+
that the data were drawn from a distribution in that family. Any known
|
749 |
+
parameters of the distribution may be specified. Remaining parameters of
|
750 |
+
the distribution will be fit to the data, and the p-value of the test
|
751 |
+
is computed accordingly. Several statistics for comparing the distribution
|
752 |
+
to data are available.
|
753 |
+
|
754 |
+
Parameters
|
755 |
+
----------
|
756 |
+
dist : `scipy.stats.rv_continuous`
|
757 |
+
The object representing the distribution family under the null
|
758 |
+
hypothesis.
|
759 |
+
data : 1D array_like
|
760 |
+
Finite, uncensored data to be tested.
|
761 |
+
known_params : dict, optional
|
762 |
+
A dictionary containing name-value pairs of known distribution
|
763 |
+
parameters. Monte Carlo samples are randomly drawn from the
|
764 |
+
null-hypothesized distribution with these values of the parameters.
|
765 |
+
Before the statistic is evaluated for each Monte Carlo sample, only
|
766 |
+
remaining unknown parameters of the null-hypothesized distribution
|
767 |
+
family are fit to the samples; the known parameters are held fixed.
|
768 |
+
If all parameters of the distribution family are known, then the step
|
769 |
+
of fitting the distribution family to each sample is omitted.
|
770 |
+
fit_params : dict, optional
|
771 |
+
A dictionary containing name-value pairs of distribution parameters
|
772 |
+
that have already been fit to the data, e.g. using `scipy.stats.fit`
|
773 |
+
or the ``fit`` method of `dist`. Monte Carlo samples are drawn from the
|
774 |
+
null-hypothesized distribution with these specified values of the
|
775 |
+
parameter. On those Monte Carlo samples, however, these and all other
|
776 |
+
unknown parameters of the null-hypothesized distribution family are
|
777 |
+
fit before the statistic is evaluated.
|
778 |
+
guessed_params : dict, optional
|
779 |
+
A dictionary containing name-value pairs of distribution parameters
|
780 |
+
which have been guessed. These parameters are always considered as
|
781 |
+
free parameters and are fit both to the provided `data` as well as
|
782 |
+
to the Monte Carlo samples drawn from the null-hypothesized
|
783 |
+
distribution. The purpose of these `guessed_params` is to be used as
|
784 |
+
initial values for the numerical fitting procedure.
|
785 |
+
statistic : {"ad", "ks", "cvm", "filliben"} or callable, optional
|
786 |
+
The statistic used to compare data to a distribution after fitting
|
787 |
+
unknown parameters of the distribution family to the data. The
|
788 |
+
Anderson-Darling ("ad") [1]_, Kolmogorov-Smirnov ("ks") [1]_,
|
789 |
+
Cramer-von Mises ("cvm") [1]_, and Filliben ("filliben") [7]_
|
790 |
+
statistics are available. Alternatively, a callable with signature
|
791 |
+
``(dist, data, axis)`` may be supplied to compute the statistic. Here
|
792 |
+
``dist`` is a frozen distribution object (potentially with array
|
793 |
+
parameters), ``data`` is an array of Monte Carlo samples (of
|
794 |
+
compatible shape), and ``axis`` is the axis of ``data`` along which
|
795 |
+
the statistic must be computed.
|
796 |
+
n_mc_samples : int, default: 9999
|
797 |
+
The number of Monte Carlo samples drawn from the null hypothesized
|
798 |
+
distribution to form the null distribution of the statistic. The
|
799 |
+
sample size of each is the same as the given `data`.
|
800 |
+
random_state : {None, int, `numpy.random.Generator`,
|
801 |
+
`numpy.random.RandomState`}, optional
|
802 |
+
|
803 |
+
Pseudorandom number generator state used to generate the Monte Carlo
|
804 |
+
samples.
|
805 |
+
|
806 |
+
If `random_state` is ``None`` (default), the
|
807 |
+
`numpy.random.RandomState` singleton is used.
|
808 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
809 |
+
seeded with `random_state`.
|
810 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
811 |
+
instance, then the provided instance is used.
|
812 |
+
|
813 |
+
Returns
|
814 |
+
-------
|
815 |
+
res : GoodnessOfFitResult
|
816 |
+
An object with the following attributes.
|
817 |
+
|
818 |
+
fit_result : `~scipy.stats._result_classes.FitResult`
|
819 |
+
An object representing the fit of the provided `dist` to `data`.
|
820 |
+
This object includes the values of distribution family parameters
|
821 |
+
that fully define the null-hypothesized distribution, that is,
|
822 |
+
the distribution from which Monte Carlo samples are drawn.
|
823 |
+
statistic : float
|
824 |
+
The value of the statistic comparing provided `data` to the
|
825 |
+
null-hypothesized distribution.
|
826 |
+
pvalue : float
|
827 |
+
The proportion of elements in the null distribution with
|
828 |
+
statistic values at least as extreme as the statistic value of the
|
829 |
+
provided `data`.
|
830 |
+
null_distribution : ndarray
|
831 |
+
The value of the statistic for each Monte Carlo sample
|
832 |
+
drawn from the null-hypothesized distribution.
|
833 |
+
|
834 |
+
Notes
|
835 |
+
-----
|
836 |
+
This is a generalized Monte Carlo goodness-of-fit procedure, special cases
|
837 |
+
of which correspond with various Anderson-Darling tests, Lilliefors' test,
|
838 |
+
etc. The test is described in [2]_, [3]_, and [4]_ as a parametric
|
839 |
+
bootstrap test. This is a Monte Carlo test in which parameters that
|
840 |
+
specify the distribution from which samples are drawn have been estimated
|
841 |
+
from the data. We describe the test using "Monte Carlo" rather than
|
842 |
+
"parametric bootstrap" throughout to avoid confusion with the more familiar
|
843 |
+
nonparametric bootstrap, and describe how the test is performed below.
|
844 |
+
|
845 |
+
*Traditional goodness of fit tests*
|
846 |
+
|
847 |
+
Traditionally, critical values corresponding with a fixed set of
|
848 |
+
significance levels are pre-calculated using Monte Carlo methods. Users
|
849 |
+
perform the test by calculating the value of the test statistic only for
|
850 |
+
their observed `data` and comparing this value to tabulated critical
|
851 |
+
values. This practice is not very flexible, as tables are not available for
|
852 |
+
all distributions and combinations of known and unknown parameter values.
|
853 |
+
Also, results can be inaccurate when critical values are interpolated from
|
854 |
+
limited tabulated data to correspond with the user's sample size and
|
855 |
+
fitted parameter values. To overcome these shortcomings, this function
|
856 |
+
allows the user to perform the Monte Carlo trials adapted to their
|
857 |
+
particular data.
|
858 |
+
|
859 |
+
*Algorithmic overview*
|
860 |
+
|
861 |
+
In brief, this routine executes the following steps:
|
862 |
+
|
863 |
+
1. Fit unknown parameters to the given `data`, thereby forming the
|
864 |
+
"null-hypothesized" distribution, and compute the statistic of
|
865 |
+
this pair of data and distribution.
|
866 |
+
2. Draw random samples from this null-hypothesized distribution.
|
867 |
+
3. Fit the unknown parameters to each random sample.
|
868 |
+
4. Calculate the statistic between each sample and the distribution that
|
869 |
+
has been fit to the sample.
|
870 |
+
5. Compare the value of the statistic corresponding with `data` from (1)
|
871 |
+
against the values of the statistic corresponding with the random
|
872 |
+
samples from (4). The p-value is the proportion of samples with a
|
873 |
+
statistic value greater than or equal to the statistic of the observed
|
874 |
+
data.
|
875 |
+
|
876 |
+
In more detail, the steps are as follows.
|
877 |
+
|
878 |
+
First, any unknown parameters of the distribution family specified by
|
879 |
+
`dist` are fit to the provided `data` using maximum likelihood estimation.
|
880 |
+
(One exception is the normal distribution with unknown location and scale:
|
881 |
+
we use the bias-corrected standard deviation ``np.std(data, ddof=1)`` for
|
882 |
+
the scale as recommended in [1]_.)
|
883 |
+
These values of the parameters specify a particular member of the
|
884 |
+
distribution family referred to as the "null-hypothesized distribution",
|
885 |
+
that is, the distribution from which the data were sampled under the null
|
886 |
+
hypothesis. The `statistic`, which compares data to a distribution, is
|
887 |
+
computed between `data` and the null-hypothesized distribution.
|
888 |
+
|
889 |
+
Next, many (specifically `n_mc_samples`) new samples, each containing the
|
890 |
+
same number of observations as `data`, are drawn from the
|
891 |
+
null-hypothesized distribution. All unknown parameters of the distribution
|
892 |
+
family `dist` are fit to *each resample*, and the `statistic` is computed
|
893 |
+
between each sample and its corresponding fitted distribution. These
|
894 |
+
values of the statistic form the Monte Carlo null distribution (not to be
|
895 |
+
confused with the "null-hypothesized distribution" above).
|
896 |
+
|
897 |
+
The p-value of the test is the proportion of statistic values in the Monte
|
898 |
+
Carlo null distribution that are at least as extreme as the statistic value
|
899 |
+
of the provided `data`. More precisely, the p-value is given by
|
900 |
+
|
901 |
+
.. math::
|
902 |
+
|
903 |
+
p = \frac{b + 1}
|
904 |
+
{m + 1}
|
905 |
+
|
906 |
+
where :math:`b` is the number of statistic values in the Monte Carlo null
|
907 |
+
distribution that are greater than or equal to the statistic value
|
908 |
+
calculated for `data`, and :math:`m` is the number of elements in the
|
909 |
+
Monte Carlo null distribution (`n_mc_samples`). The addition of :math:`1`
|
910 |
+
to the numerator and denominator can be thought of as including the
|
911 |
+
value of the statistic corresponding with `data` in the null distribution,
|
912 |
+
but a more formal explanation is given in [5]_.
|
913 |
+
|
914 |
+
*Limitations*
|
915 |
+
|
916 |
+
The test can be very slow for some distribution families because unknown
|
917 |
+
parameters of the distribution family must be fit to each of the Monte
|
918 |
+
Carlo samples, and for most distributions in SciPy, distribution fitting
|
919 |
+
performed via numerical optimization.
|
920 |
+
|
921 |
+
*Anti-Pattern*
|
922 |
+
|
923 |
+
For this reason, it may be tempting
|
924 |
+
to treat parameters of the distribution pre-fit to `data` (by the user)
|
925 |
+
as though they were `known_params`, as specification of all parameters of
|
926 |
+
the distribution precludes the need to fit the distribution to each Monte
|
927 |
+
Carlo sample. (This is essentially how the original Kilmogorov-Smirnov
|
928 |
+
test is performed.) Although such a test can provide evidence against the
|
929 |
+
null hypothesis, the test is conservative in the sense that small p-values
|
930 |
+
will tend to (greatly) *overestimate* the probability of making a type I
|
931 |
+
error (that is, rejecting the null hypothesis although it is true), and the
|
932 |
+
power of the test is low (that is, it is less likely to reject the null
|
933 |
+
hypothesis even when the null hypothesis is false).
|
934 |
+
This is because the Monte Carlo samples are less likely to agree with the
|
935 |
+
null-hypothesized distribution as well as `data`. This tends to increase
|
936 |
+
the values of the statistic recorded in the null distribution, so that a
|
937 |
+
larger number of them exceed the value of statistic for `data`, thereby
|
938 |
+
inflating the p-value.
|
939 |
+
|
940 |
+
References
|
941 |
+
----------
|
942 |
+
.. [1] M. A. Stephens (1974). "EDF Statistics for Goodness of Fit and
|
943 |
+
Some Comparisons." Journal of the American Statistical Association,
|
944 |
+
Vol. 69, pp. 730-737.
|
945 |
+
.. [2] W. Stute, W. G. Manteiga, and M. P. Quindimil (1993).
|
946 |
+
"Bootstrap based goodness-of-fit-tests." Metrika 40.1: 243-256.
|
947 |
+
.. [3] C. Genest, & B Rémillard. (2008). "Validity of the parametric
|
948 |
+
bootstrap for goodness-of-fit testing in semiparametric models."
|
949 |
+
Annales de l'IHP Probabilités et statistiques. Vol. 44. No. 6.
|
950 |
+
.. [4] I. Kojadinovic and J. Yan (2012). "Goodness-of-fit testing based on
|
951 |
+
a weighted bootstrap: A fast large-sample alternative to the
|
952 |
+
parametric bootstrap." Canadian Journal of Statistics 40.3: 480-500.
|
953 |
+
.. [5] B. Phipson and G. K. Smyth (2010). "Permutation P-values Should
|
954 |
+
Never Be Zero: Calculating Exact P-values When Permutations Are
|
955 |
+
Randomly Drawn." Statistical Applications in Genetics and Molecular
|
956 |
+
Biology 9.1.
|
957 |
+
.. [6] H. W. Lilliefors (1967). "On the Kolmogorov-Smirnov test for
|
958 |
+
normality with mean and variance unknown." Journal of the American
|
959 |
+
statistical Association 62.318: 399-402.
|
960 |
+
.. [7] Filliben, James J. "The probability plot correlation coefficient
|
961 |
+
test for normality." Technometrics 17.1 (1975): 111-117.
|
962 |
+
|
963 |
+
Examples
|
964 |
+
--------
|
965 |
+
A well-known test of the null hypothesis that data were drawn from a
|
966 |
+
given distribution is the Kolmogorov-Smirnov (KS) test, available in SciPy
|
967 |
+
as `scipy.stats.ks_1samp`. Suppose we wish to test whether the following
|
968 |
+
data:
|
969 |
+
|
970 |
+
>>> import numpy as np
|
971 |
+
>>> from scipy import stats
|
972 |
+
>>> rng = np.random.default_rng()
|
973 |
+
>>> x = stats.uniform.rvs(size=75, random_state=rng)
|
974 |
+
|
975 |
+
were sampled from a normal distribution. To perform a KS test, the
|
976 |
+
empirical distribution function of the observed data will be compared
|
977 |
+
against the (theoretical) cumulative distribution function of a normal
|
978 |
+
distribution. Of course, to do this, the normal distribution under the null
|
979 |
+
hypothesis must be fully specified. This is commonly done by first fitting
|
980 |
+
the ``loc`` and ``scale`` parameters of the distribution to the observed
|
981 |
+
data, then performing the test.
|
982 |
+
|
983 |
+
>>> loc, scale = np.mean(x), np.std(x, ddof=1)
|
984 |
+
>>> cdf = stats.norm(loc, scale).cdf
|
985 |
+
>>> stats.ks_1samp(x, cdf)
|
986 |
+
KstestResult(statistic=0.1119257570456813, pvalue=0.2827756409939257)
|
987 |
+
|
988 |
+
An advantage of the KS-test is that the p-value - the probability of
|
989 |
+
obtaining a value of the test statistic under the null hypothesis as
|
990 |
+
extreme as the value obtained from the observed data - can be calculated
|
991 |
+
exactly and efficiently. `goodness_of_fit` can only approximate these
|
992 |
+
results.
|
993 |
+
|
994 |
+
>>> known_params = {'loc': loc, 'scale': scale}
|
995 |
+
>>> res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
|
996 |
+
... statistic='ks', random_state=rng)
|
997 |
+
>>> res.statistic, res.pvalue
|
998 |
+
(0.1119257570456813, 0.2788)
|
999 |
+
|
1000 |
+
The statistic matches exactly, but the p-value is estimated by forming
|
1001 |
+
a "Monte Carlo null distribution", that is, by explicitly drawing random
|
1002 |
+
samples from `scipy.stats.norm` with the provided parameters and
|
1003 |
+
calculating the stastic for each. The fraction of these statistic values
|
1004 |
+
at least as extreme as ``res.statistic`` approximates the exact p-value
|
1005 |
+
calculated by `scipy.stats.ks_1samp`.
|
1006 |
+
|
1007 |
+
However, in many cases, we would prefer to test only that the data were
|
1008 |
+
sampled from one of *any* member of the normal distribution family, not
|
1009 |
+
specifically from the normal distribution with the location and scale
|
1010 |
+
fitted to the observed sample. In this case, Lilliefors [6]_ argued that
|
1011 |
+
the KS test is far too conservative (that is, the p-value overstates
|
1012 |
+
the actual probability of rejecting a true null hypothesis) and thus lacks
|
1013 |
+
power - the ability to reject the null hypothesis when the null hypothesis
|
1014 |
+
is actually false.
|
1015 |
+
Indeed, our p-value above is approximately 0.28, which is far too large
|
1016 |
+
to reject the null hypothesis at any common significance level.
|
1017 |
+
|
1018 |
+
Consider why this might be. Note that in the KS test above, the statistic
|
1019 |
+
always compares data against the CDF of a normal distribution fitted to the
|
1020 |
+
*observed data*. This tends to reduce the value of the statistic for the
|
1021 |
+
observed data, but it is "unfair" when computing the statistic for other
|
1022 |
+
samples, such as those we randomly draw to form the Monte Carlo null
|
1023 |
+
distribution. It is easy to correct for this: whenever we compute the KS
|
1024 |
+
statistic of a sample, we use the CDF of a normal distribution fitted
|
1025 |
+
to *that sample*. The null distribution in this case has not been
|
1026 |
+
calculated exactly and is tyically approximated using Monte Carlo methods
|
1027 |
+
as described above. This is where `goodness_of_fit` excels.
|
1028 |
+
|
1029 |
+
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ks',
|
1030 |
+
... random_state=rng)
|
1031 |
+
>>> res.statistic, res.pvalue
|
1032 |
+
(0.1119257570456813, 0.0196)
|
1033 |
+
|
1034 |
+
Indeed, this p-value is much smaller, and small enough to (correctly)
|
1035 |
+
reject the null hypothesis at common significance levels, including 5% and
|
1036 |
+
2.5%.
|
1037 |
+
|
1038 |
+
However, the KS statistic is not very sensitive to all deviations from
|
1039 |
+
normality. The original advantage of the KS statistic was the ability
|
1040 |
+
to compute the null distribution theoretically, but a more sensitive
|
1041 |
+
statistic - resulting in a higher test power - can be used now that we can
|
1042 |
+
approximate the null distribution
|
1043 |
+
computationally. The Anderson-Darling statistic [1]_ tends to be more
|
1044 |
+
sensitive, and critical values of the this statistic have been tabulated
|
1045 |
+
for various significance levels and sample sizes using Monte Carlo methods.
|
1046 |
+
|
1047 |
+
>>> res = stats.anderson(x, 'norm')
|
1048 |
+
>>> print(res.statistic)
|
1049 |
+
1.2139573337497467
|
1050 |
+
>>> print(res.critical_values)
|
1051 |
+
[0.549 0.625 0.75 0.875 1.041]
|
1052 |
+
>>> print(res.significance_level)
|
1053 |
+
[15. 10. 5. 2.5 1. ]
|
1054 |
+
|
1055 |
+
Here, the observed value of the statistic exceeds the critical value
|
1056 |
+
corresponding with a 1% significance level. This tells us that the p-value
|
1057 |
+
of the observed data is less than 1%, but what is it? We could interpolate
|
1058 |
+
from these (already-interpolated) values, but `goodness_of_fit` can
|
1059 |
+
estimate it directly.
|
1060 |
+
|
1061 |
+
>>> res = stats.goodness_of_fit(stats.norm, x, statistic='ad',
|
1062 |
+
... random_state=rng)
|
1063 |
+
>>> res.statistic, res.pvalue
|
1064 |
+
(1.2139573337497467, 0.0034)
|
1065 |
+
|
1066 |
+
A further advantage is that use of `goodness_of_fit` is not limited to
|
1067 |
+
a particular set of distributions or conditions on which parameters
|
1068 |
+
are known versus which must be estimated from data. Instead,
|
1069 |
+
`goodness_of_fit` can estimate p-values relatively quickly for any
|
1070 |
+
distribution with a sufficiently fast and reliable ``fit`` method. For
|
1071 |
+
instance, here we perform a goodness of fit test using the Cramer-von Mises
|
1072 |
+
statistic against the Rayleigh distribution with known location and unknown
|
1073 |
+
scale.
|
1074 |
+
|
1075 |
+
>>> rng = np.random.default_rng()
|
1076 |
+
>>> x = stats.chi(df=2.2, loc=0, scale=2).rvs(size=1000, random_state=rng)
|
1077 |
+
>>> res = stats.goodness_of_fit(stats.rayleigh, x, statistic='cvm',
|
1078 |
+
... known_params={'loc': 0}, random_state=rng)
|
1079 |
+
|
1080 |
+
This executes fairly quickly, but to check the reliability of the ``fit``
|
1081 |
+
method, we should inspect the fit result.
|
1082 |
+
|
1083 |
+
>>> res.fit_result # location is as specified, and scale is reasonable
|
1084 |
+
params: FitParams(loc=0.0, scale=2.1026719844231243)
|
1085 |
+
success: True
|
1086 |
+
message: 'The fit was performed successfully.'
|
1087 |
+
>>> import matplotlib.pyplot as plt # matplotlib must be installed to plot
|
1088 |
+
>>> res.fit_result.plot()
|
1089 |
+
>>> plt.show()
|
1090 |
+
|
1091 |
+
If the distribution is not fit to the observed data as well as possible,
|
1092 |
+
the test may not control the type I error rate, that is, the chance of
|
1093 |
+
rejecting the null hypothesis even when it is true.
|
1094 |
+
|
1095 |
+
We should also look for extreme outliers in the null distribution that
|
1096 |
+
may be caused by unreliable fitting. These do not necessarily invalidate
|
1097 |
+
the result, but they tend to reduce the test's power.
|
1098 |
+
|
1099 |
+
>>> _, ax = plt.subplots()
|
1100 |
+
>>> ax.hist(np.log10(res.null_distribution))
|
1101 |
+
>>> ax.set_xlabel("log10 of CVM statistic under the null hypothesis")
|
1102 |
+
>>> ax.set_ylabel("Frequency")
|
1103 |
+
>>> ax.set_title("Histogram of the Monte Carlo null distribution")
|
1104 |
+
>>> plt.show()
|
1105 |
+
|
1106 |
+
This plot seems reassuring.
|
1107 |
+
|
1108 |
+
If ``fit`` method is working reliably, and if the distribution of the test
|
1109 |
+
statistic is not particularly sensitive to the values of the fitted
|
1110 |
+
parameters, then the p-value provided by `goodness_of_fit` is expected to
|
1111 |
+
be a good approximation.
|
1112 |
+
|
1113 |
+
>>> res.statistic, res.pvalue
|
1114 |
+
(0.2231991510248692, 0.0525)
|
1115 |
+
|
1116 |
+
"""
|
1117 |
+
args = _gof_iv(dist, data, known_params, fit_params, guessed_params,
|
1118 |
+
statistic, n_mc_samples, random_state)
|
1119 |
+
(dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
|
1120 |
+
guessed_rfd_params, statistic, n_mc_samples_int, random_state) = args
|
1121 |
+
|
1122 |
+
# Fit null hypothesis distribution to data
|
1123 |
+
nhd_fit_fun = _get_fit_fun(dist, data, guessed_nhd_params,
|
1124 |
+
fixed_nhd_params)
|
1125 |
+
nhd_vals = nhd_fit_fun(data)
|
1126 |
+
nhd_dist = dist(*nhd_vals)
|
1127 |
+
|
1128 |
+
def rvs(size):
|
1129 |
+
return nhd_dist.rvs(size=size, random_state=random_state)
|
1130 |
+
|
1131 |
+
# Define statistic
|
1132 |
+
fit_fun = _get_fit_fun(dist, data, guessed_rfd_params, fixed_rfd_params)
|
1133 |
+
if callable(statistic):
|
1134 |
+
compare_fun = statistic
|
1135 |
+
else:
|
1136 |
+
compare_fun = _compare_dict[statistic]
|
1137 |
+
alternative = getattr(compare_fun, 'alternative', 'greater')
|
1138 |
+
|
1139 |
+
def statistic_fun(data, axis):
|
1140 |
+
# Make things simple by always working along the last axis.
|
1141 |
+
data = np.moveaxis(data, axis, -1)
|
1142 |
+
rfd_vals = fit_fun(data)
|
1143 |
+
rfd_dist = dist(*rfd_vals)
|
1144 |
+
return compare_fun(rfd_dist, data, axis=-1)
|
1145 |
+
|
1146 |
+
res = stats.monte_carlo_test(data, rvs, statistic_fun, vectorized=True,
|
1147 |
+
n_resamples=n_mc_samples, axis=-1,
|
1148 |
+
alternative=alternative)
|
1149 |
+
opt_res = optimize.OptimizeResult()
|
1150 |
+
opt_res.success = True
|
1151 |
+
opt_res.message = "The fit was performed successfully."
|
1152 |
+
opt_res.x = nhd_vals
|
1153 |
+
# Only continuous distributions for now, hence discrete=False
|
1154 |
+
# There's no fundamental limitation; it's just that we're not using
|
1155 |
+
# stats.fit, discrete distributions don't have `fit` method, and
|
1156 |
+
# we haven't written any vectorized fit functions for a discrete
|
1157 |
+
# distribution yet.
|
1158 |
+
return GoodnessOfFitResult(FitResult(dist, data, False, opt_res),
|
1159 |
+
res.statistic, res.pvalue,
|
1160 |
+
res.null_distribution)
|
1161 |
+
|
1162 |
+
|
1163 |
+
def _get_fit_fun(dist, data, guessed_params, fixed_params):
|
1164 |
+
|
1165 |
+
shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
|
1166 |
+
param_names = shape_names + ['loc', 'scale']
|
1167 |
+
fparam_names = ['f'+name for name in param_names]
|
1168 |
+
all_fixed = not set(fparam_names).difference(fixed_params)
|
1169 |
+
guessed_shapes = [guessed_params.pop(x, None)
|
1170 |
+
for x in shape_names if x in guessed_params]
|
1171 |
+
|
1172 |
+
if all_fixed:
|
1173 |
+
def fit_fun(data):
|
1174 |
+
return [fixed_params[name] for name in fparam_names]
|
1175 |
+
# Define statistic, including fitting distribution to data
|
1176 |
+
elif dist in _fit_funs:
|
1177 |
+
def fit_fun(data):
|
1178 |
+
params = _fit_funs[dist](data, **fixed_params)
|
1179 |
+
params = np.asarray(np.broadcast_arrays(*params))
|
1180 |
+
if params.ndim > 1:
|
1181 |
+
params = params[..., np.newaxis]
|
1182 |
+
return params
|
1183 |
+
else:
|
1184 |
+
def fit_fun_1d(data):
|
1185 |
+
return dist.fit(data, *guessed_shapes, **guessed_params,
|
1186 |
+
**fixed_params)
|
1187 |
+
|
1188 |
+
def fit_fun(data):
|
1189 |
+
params = np.apply_along_axis(fit_fun_1d, axis=-1, arr=data)
|
1190 |
+
if params.ndim > 1:
|
1191 |
+
params = params.T[..., np.newaxis]
|
1192 |
+
return params
|
1193 |
+
|
1194 |
+
return fit_fun
|
1195 |
+
|
1196 |
+
|
1197 |
+
# Vectorized fitting functions. These are to accept ND `data` in which each
|
1198 |
+
# row (slice along last axis) is a sample to fit and scalar fixed parameters.
|
1199 |
+
# They return a tuple of shape parameter arrays, each of shape data.shape[:-1].
|
1200 |
+
def _fit_norm(data, floc=None, fscale=None):
|
1201 |
+
loc = floc
|
1202 |
+
scale = fscale
|
1203 |
+
if loc is None and scale is None:
|
1204 |
+
loc = np.mean(data, axis=-1)
|
1205 |
+
scale = np.std(data, ddof=1, axis=-1)
|
1206 |
+
elif loc is None:
|
1207 |
+
loc = np.mean(data, axis=-1)
|
1208 |
+
elif scale is None:
|
1209 |
+
scale = np.sqrt(((data - loc)**2).mean(axis=-1))
|
1210 |
+
return loc, scale
|
1211 |
+
|
1212 |
+
|
1213 |
+
_fit_funs = {stats.norm: _fit_norm} # type: ignore[attr-defined]
|
1214 |
+
|
1215 |
+
|
1216 |
+
# Vectorized goodness of fit statistic functions. These accept a frozen
|
1217 |
+
# distribution object and `data` in which each row (slice along last axis) is
|
1218 |
+
# a sample.
|
1219 |
+
|
1220 |
+
|
1221 |
+
def _anderson_darling(dist, data, axis):
|
1222 |
+
x = np.sort(data, axis=-1)
|
1223 |
+
n = data.shape[-1]
|
1224 |
+
i = np.arange(1, n+1)
|
1225 |
+
Si = (2*i - 1)/n * (dist.logcdf(x) + dist.logsf(x[..., ::-1]))
|
1226 |
+
S = np.sum(Si, axis=-1)
|
1227 |
+
return -n - S
|
1228 |
+
|
1229 |
+
|
1230 |
+
def _compute_dplus(cdfvals): # adapted from _stats_py before gh-17062
|
1231 |
+
n = cdfvals.shape[-1]
|
1232 |
+
return (np.arange(1.0, n + 1) / n - cdfvals).max(axis=-1)
|
1233 |
+
|
1234 |
+
|
1235 |
+
def _compute_dminus(cdfvals):
|
1236 |
+
n = cdfvals.shape[-1]
|
1237 |
+
return (cdfvals - np.arange(0.0, n)/n).max(axis=-1)
|
1238 |
+
|
1239 |
+
|
1240 |
+
def _kolmogorov_smirnov(dist, data, axis):
|
1241 |
+
x = np.sort(data, axis=-1)
|
1242 |
+
cdfvals = dist.cdf(x)
|
1243 |
+
Dplus = _compute_dplus(cdfvals) # always works along last axis
|
1244 |
+
Dminus = _compute_dminus(cdfvals)
|
1245 |
+
return np.maximum(Dplus, Dminus)
|
1246 |
+
|
1247 |
+
|
1248 |
+
def _corr(X, M):
|
1249 |
+
# Correlation coefficient r, simplified and vectorized as we need it.
|
1250 |
+
# See [7] Equation (2). Lemma 1/2 are only for distributions symmetric
|
1251 |
+
# about 0.
|
1252 |
+
Xm = X.mean(axis=-1, keepdims=True)
|
1253 |
+
Mm = M.mean(axis=-1, keepdims=True)
|
1254 |
+
num = np.sum((X - Xm) * (M - Mm), axis=-1)
|
1255 |
+
den = np.sqrt(np.sum((X - Xm)**2, axis=-1) * np.sum((M - Mm)**2, axis=-1))
|
1256 |
+
return num/den
|
1257 |
+
|
1258 |
+
|
1259 |
+
def _filliben(dist, data, axis):
|
1260 |
+
# [7] Section 8 # 1
|
1261 |
+
X = np.sort(data, axis=-1)
|
1262 |
+
|
1263 |
+
# [7] Section 8 # 2
|
1264 |
+
n = data.shape[-1]
|
1265 |
+
k = np.arange(1, n+1)
|
1266 |
+
# Filliben used an approximation for the uniform distribution order
|
1267 |
+
# statistic medians.
|
1268 |
+
# m = (k - .3175)/(n + 0.365)
|
1269 |
+
# m[-1] = 0.5**(1/n)
|
1270 |
+
# m[0] = 1 - m[-1]
|
1271 |
+
# We can just as easily use the (theoretically) exact values. See e.g.
|
1272 |
+
# https://en.wikipedia.org/wiki/Order_statistic
|
1273 |
+
# "Order statistics sampled from a uniform distribution"
|
1274 |
+
m = stats.beta(k, n + 1 - k).median()
|
1275 |
+
|
1276 |
+
# [7] Section 8 # 3
|
1277 |
+
M = dist.ppf(m)
|
1278 |
+
|
1279 |
+
# [7] Section 8 # 4
|
1280 |
+
return _corr(X, M)
|
1281 |
+
_filliben.alternative = 'less' # type: ignore[attr-defined]
|
1282 |
+
|
1283 |
+
|
1284 |
+
def _cramer_von_mises(dist, data, axis):
|
1285 |
+
x = np.sort(data, axis=-1)
|
1286 |
+
n = data.shape[-1]
|
1287 |
+
cdfvals = dist.cdf(x)
|
1288 |
+
u = (2*np.arange(1, n+1) - 1)/(2*n)
|
1289 |
+
w = 1 / (12*n) + np.sum((u - cdfvals)**2, axis=-1)
|
1290 |
+
return w
|
1291 |
+
|
1292 |
+
|
1293 |
+
_compare_dict = {"ad": _anderson_darling, "ks": _kolmogorov_smirnov,
|
1294 |
+
"cvm": _cramer_von_mises, "filliben": _filliben}
|
1295 |
+
|
1296 |
+
|
1297 |
+
def _gof_iv(dist, data, known_params, fit_params, guessed_params, statistic,
|
1298 |
+
n_mc_samples, random_state):
|
1299 |
+
|
1300 |
+
if not isinstance(dist, stats.rv_continuous):
|
1301 |
+
message = ("`dist` must be a (non-frozen) instance of "
|
1302 |
+
"`stats.rv_continuous`.")
|
1303 |
+
raise TypeError(message)
|
1304 |
+
|
1305 |
+
data = np.asarray(data, dtype=float)
|
1306 |
+
if not data.ndim == 1:
|
1307 |
+
message = "`data` must be a one-dimensional array of numbers."
|
1308 |
+
raise ValueError(message)
|
1309 |
+
|
1310 |
+
# Leave validation of these key/value pairs to the `fit` method,
|
1311 |
+
# but collect these into dictionaries that will be used
|
1312 |
+
known_params = known_params or dict()
|
1313 |
+
fit_params = fit_params or dict()
|
1314 |
+
guessed_params = guessed_params or dict()
|
1315 |
+
|
1316 |
+
known_params_f = {("f"+key): val for key, val in known_params.items()}
|
1317 |
+
fit_params_f = {("f"+key): val for key, val in fit_params.items()}
|
1318 |
+
|
1319 |
+
# These are the values of parameters of the null distribution family
|
1320 |
+
# with which resamples are drawn
|
1321 |
+
fixed_nhd_params = known_params_f.copy()
|
1322 |
+
fixed_nhd_params.update(fit_params_f)
|
1323 |
+
|
1324 |
+
# These are fixed when fitting the distribution family to resamples
|
1325 |
+
fixed_rfd_params = known_params_f.copy()
|
1326 |
+
|
1327 |
+
# These are used as guesses when fitting the distribution family to
|
1328 |
+
# the original data
|
1329 |
+
guessed_nhd_params = guessed_params.copy()
|
1330 |
+
|
1331 |
+
# These are used as guesses when fitting the distribution family to
|
1332 |
+
# resamples
|
1333 |
+
guessed_rfd_params = fit_params.copy()
|
1334 |
+
guessed_rfd_params.update(guessed_params)
|
1335 |
+
|
1336 |
+
if not callable(statistic):
|
1337 |
+
statistic = statistic.lower()
|
1338 |
+
statistics = {'ad', 'ks', 'cvm', 'filliben'}
|
1339 |
+
if statistic not in statistics:
|
1340 |
+
message = f"`statistic` must be one of {statistics}."
|
1341 |
+
raise ValueError(message)
|
1342 |
+
|
1343 |
+
n_mc_samples_int = int(n_mc_samples)
|
1344 |
+
if n_mc_samples_int != n_mc_samples:
|
1345 |
+
message = "`n_mc_samples` must be an integer."
|
1346 |
+
raise TypeError(message)
|
1347 |
+
|
1348 |
+
random_state = check_random_state(random_state)
|
1349 |
+
|
1350 |
+
return (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
|
1351 |
+
guessed_rfd_params, statistic, n_mc_samples_int, random_state)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_hypotests.py
ADDED
@@ -0,0 +1,2021 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import namedtuple
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from math import comb
|
4 |
+
import numpy as np
|
5 |
+
import warnings
|
6 |
+
from itertools import combinations
|
7 |
+
import scipy.stats
|
8 |
+
from scipy.optimize import shgo
|
9 |
+
from . import distributions
|
10 |
+
from ._common import ConfidenceInterval
|
11 |
+
from ._continuous_distns import chi2, norm
|
12 |
+
from scipy.special import gamma, kv, gammaln
|
13 |
+
from scipy.fft import ifft
|
14 |
+
from ._stats_pythran import _a_ij_Aij_Dij2
|
15 |
+
from ._stats_pythran import (
|
16 |
+
_concordant_pairs as _P, _discordant_pairs as _Q
|
17 |
+
)
|
18 |
+
from ._axis_nan_policy import _axis_nan_policy_factory
|
19 |
+
from scipy.stats import _stats_py
|
20 |
+
|
21 |
+
__all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd',
|
22 |
+
'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp',
|
23 |
+
'tukey_hsd', 'poisson_means_test']
|
24 |
+
|
25 |
+
Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult',
|
26 |
+
('statistic', 'pvalue'))
|
27 |
+
|
28 |
+
|
29 |
+
@_axis_nan_policy_factory(Epps_Singleton_2sampResult, n_samples=2, too_small=4)
|
30 |
+
def epps_singleton_2samp(x, y, t=(0.4, 0.8)):
|
31 |
+
"""Compute the Epps-Singleton (ES) test statistic.
|
32 |
+
|
33 |
+
Test the null hypothesis that two samples have the same underlying
|
34 |
+
probability distribution.
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
x, y : array-like
|
39 |
+
The two samples of observations to be tested. Input must not have more
|
40 |
+
than one dimension. Samples can have different lengths.
|
41 |
+
t : array-like, optional
|
42 |
+
The points (t1, ..., tn) where the empirical characteristic function is
|
43 |
+
to be evaluated. It should be positive distinct numbers. The default
|
44 |
+
value (0.4, 0.8) is proposed in [1]_. Input must not have more than
|
45 |
+
one dimension.
|
46 |
+
|
47 |
+
Returns
|
48 |
+
-------
|
49 |
+
statistic : float
|
50 |
+
The test statistic.
|
51 |
+
pvalue : float
|
52 |
+
The associated p-value based on the asymptotic chi2-distribution.
|
53 |
+
|
54 |
+
See Also
|
55 |
+
--------
|
56 |
+
ks_2samp, anderson_ksamp
|
57 |
+
|
58 |
+
Notes
|
59 |
+
-----
|
60 |
+
Testing whether two samples are generated by the same underlying
|
61 |
+
distribution is a classical question in statistics. A widely used test is
|
62 |
+
the Kolmogorov-Smirnov (KS) test which relies on the empirical
|
63 |
+
distribution function. Epps and Singleton introduce a test based on the
|
64 |
+
empirical characteristic function in [1]_.
|
65 |
+
|
66 |
+
One advantage of the ES test compared to the KS test is that is does
|
67 |
+
not assume a continuous distribution. In [1]_, the authors conclude
|
68 |
+
that the test also has a higher power than the KS test in many
|
69 |
+
examples. They recommend the use of the ES test for discrete samples as
|
70 |
+
well as continuous samples with at least 25 observations each, whereas
|
71 |
+
`anderson_ksamp` is recommended for smaller sample sizes in the
|
72 |
+
continuous case.
|
73 |
+
|
74 |
+
The p-value is computed from the asymptotic distribution of the test
|
75 |
+
statistic which follows a `chi2` distribution. If the sample size of both
|
76 |
+
`x` and `y` is below 25, the small sample correction proposed in [1]_ is
|
77 |
+
applied to the test statistic.
|
78 |
+
|
79 |
+
The default values of `t` are determined in [1]_ by considering
|
80 |
+
various distributions and finding good values that lead to a high power
|
81 |
+
of the test in general. Table III in [1]_ gives the optimal values for
|
82 |
+
the distributions tested in that study. The values of `t` are scaled by
|
83 |
+
the semi-interquartile range in the implementation, see [1]_.
|
84 |
+
|
85 |
+
References
|
86 |
+
----------
|
87 |
+
.. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
|
88 |
+
problem using the empirical characteristic function", Journal of
|
89 |
+
Statistical Computation and Simulation 26, p. 177--203, 1986.
|
90 |
+
|
91 |
+
.. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
|
92 |
+
- the Epps-Singleton two-sample test using the empirical characteristic
|
93 |
+
function", The Stata Journal 9(3), p. 454--465, 2009.
|
94 |
+
|
95 |
+
"""
|
96 |
+
# x and y are converted to arrays by the decorator
|
97 |
+
t = np.asarray(t)
|
98 |
+
# check if x and y are valid inputs
|
99 |
+
nx, ny = len(x), len(y)
|
100 |
+
if (nx < 5) or (ny < 5):
|
101 |
+
raise ValueError('x and y should have at least 5 elements, but len(x) '
|
102 |
+
f'= {nx} and len(y) = {ny}.')
|
103 |
+
if not np.isfinite(x).all():
|
104 |
+
raise ValueError('x must not contain nonfinite values.')
|
105 |
+
if not np.isfinite(y).all():
|
106 |
+
raise ValueError('y must not contain nonfinite values.')
|
107 |
+
n = nx + ny
|
108 |
+
|
109 |
+
# check if t is valid
|
110 |
+
if t.ndim > 1:
|
111 |
+
raise ValueError(f't must be 1d, but t.ndim equals {t.ndim}.')
|
112 |
+
if np.less_equal(t, 0).any():
|
113 |
+
raise ValueError('t must contain positive elements only.')
|
114 |
+
|
115 |
+
# rescale t with semi-iqr as proposed in [1]; import iqr here to avoid
|
116 |
+
# circular import
|
117 |
+
from scipy.stats import iqr
|
118 |
+
sigma = iqr(np.hstack((x, y))) / 2
|
119 |
+
ts = np.reshape(t, (-1, 1)) / sigma
|
120 |
+
|
121 |
+
# covariance estimation of ES test
|
122 |
+
gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T # shape = (nx, 2*len(t))
|
123 |
+
gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T
|
124 |
+
cov_x = np.cov(gx.T, bias=True) # the test uses biased cov-estimate
|
125 |
+
cov_y = np.cov(gy.T, bias=True)
|
126 |
+
est_cov = (n/nx)*cov_x + (n/ny)*cov_y
|
127 |
+
est_cov_inv = np.linalg.pinv(est_cov)
|
128 |
+
r = np.linalg.matrix_rank(est_cov_inv)
|
129 |
+
if r < 2*len(t):
|
130 |
+
warnings.warn('Estimated covariance matrix does not have full rank. '
|
131 |
+
'This indicates a bad choice of the input t and the '
|
132 |
+
'test might not be consistent.', # see p. 183 in [1]_
|
133 |
+
stacklevel=2)
|
134 |
+
|
135 |
+
# compute test statistic w distributed asympt. as chisquare with df=r
|
136 |
+
g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0)
|
137 |
+
w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff))
|
138 |
+
|
139 |
+
# apply small-sample correction
|
140 |
+
if (max(nx, ny) < 25):
|
141 |
+
corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7)))
|
142 |
+
w = corr * w
|
143 |
+
|
144 |
+
p = chi2.sf(w, r)
|
145 |
+
|
146 |
+
return Epps_Singleton_2sampResult(w, p)
|
147 |
+
|
148 |
+
|
149 |
+
def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'):
|
150 |
+
r"""
|
151 |
+
Performs the Poisson means test, AKA the "E-test".
|
152 |
+
|
153 |
+
This is a test of the null hypothesis that the difference between means of
|
154 |
+
two Poisson distributions is `diff`. The samples are provided as the
|
155 |
+
number of events `k1` and `k2` observed within measurement intervals
|
156 |
+
(e.g. of time, space, number of observations) of sizes `n1` and `n2`.
|
157 |
+
|
158 |
+
Parameters
|
159 |
+
----------
|
160 |
+
k1 : int
|
161 |
+
Number of events observed from distribution 1.
|
162 |
+
n1: float
|
163 |
+
Size of sample from distribution 1.
|
164 |
+
k2 : int
|
165 |
+
Number of events observed from distribution 2.
|
166 |
+
n2 : float
|
167 |
+
Size of sample from distribution 2.
|
168 |
+
diff : float, default=0
|
169 |
+
The hypothesized difference in means between the distributions
|
170 |
+
underlying the samples.
|
171 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
172 |
+
Defines the alternative hypothesis.
|
173 |
+
The following options are available (default is 'two-sided'):
|
174 |
+
|
175 |
+
* 'two-sided': the difference between distribution means is not
|
176 |
+
equal to `diff`
|
177 |
+
* 'less': the difference between distribution means is less than
|
178 |
+
`diff`
|
179 |
+
* 'greater': the difference between distribution means is greater
|
180 |
+
than `diff`
|
181 |
+
|
182 |
+
Returns
|
183 |
+
-------
|
184 |
+
statistic : float
|
185 |
+
The test statistic (see [1]_ equation 3.3).
|
186 |
+
pvalue : float
|
187 |
+
The probability of achieving such an extreme value of the test
|
188 |
+
statistic under the null hypothesis.
|
189 |
+
|
190 |
+
Notes
|
191 |
+
-----
|
192 |
+
|
193 |
+
Let:
|
194 |
+
|
195 |
+
.. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1)
|
196 |
+
|
197 |
+
be a random variable independent of
|
198 |
+
|
199 |
+
.. math:: X_2 \sim \mbox{Poisson}(\mathtt{n2}\lambda_2)
|
200 |
+
|
201 |
+
and let ``k1`` and ``k2`` be the observed values of :math:`X_1`
|
202 |
+
and :math:`X_2`, respectively. Then `poisson_means_test` uses the number
|
203 |
+
of observed events ``k1`` and ``k2`` from samples of size ``n1`` and
|
204 |
+
``n2``, respectively, to test the null hypothesis that
|
205 |
+
|
206 |
+
.. math::
|
207 |
+
H_0: \lambda_1 - \lambda_2 = \mathtt{diff}
|
208 |
+
|
209 |
+
A benefit of the E-test is that it has good power for small sample sizes,
|
210 |
+
which can reduce sampling costs [1]_. It has been evaluated and determined
|
211 |
+
to be more powerful than the comparable C-test, sometimes referred to as
|
212 |
+
the Poisson exact test.
|
213 |
+
|
214 |
+
References
|
215 |
+
----------
|
216 |
+
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
|
217 |
+
comparing two Poisson means. Journal of Statistical Planning and
|
218 |
+
Inference, 119(1), 23-35.
|
219 |
+
|
220 |
+
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
|
221 |
+
testing samples from Poisson series: With an application to testing
|
222 |
+
clover seed for dodder. Biometrika, 31(3/4), 313-323.
|
223 |
+
|
224 |
+
Examples
|
225 |
+
--------
|
226 |
+
|
227 |
+
Suppose that a gardener wishes to test the number of dodder (weed) seeds
|
228 |
+
in a sack of clover seeds that they buy from a seed company. It has
|
229 |
+
previously been established that the number of dodder seeds in clover
|
230 |
+
follows the Poisson distribution.
|
231 |
+
|
232 |
+
A 100 gram sample is drawn from the sack before being shipped to the
|
233 |
+
gardener. The sample is analyzed, and it is found to contain no dodder
|
234 |
+
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
|
235 |
+
another 100 gram sample from the sack. This time, three dodder seeds are
|
236 |
+
found in the sample; that is, `k2` is 3. The gardener would like to
|
237 |
+
know if the difference is significant and not due to chance. The
|
238 |
+
null hypothesis is that the difference between the two samples is merely
|
239 |
+
due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}`
|
240 |
+
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
|
241 |
+
difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`.
|
242 |
+
The gardener selects a significance level of 5% to reject the null
|
243 |
+
hypothesis in favor of the alternative [2]_.
|
244 |
+
|
245 |
+
>>> import scipy.stats as stats
|
246 |
+
>>> res = stats.poisson_means_test(0, 100, 3, 100)
|
247 |
+
>>> res.statistic, res.pvalue
|
248 |
+
(-1.7320508075688772, 0.08837900929018157)
|
249 |
+
|
250 |
+
The p-value is .088, indicating a near 9% chance of observing a value of
|
251 |
+
the test statistic under the null hypothesis. This exceeds 5%, so the
|
252 |
+
gardener does not reject the null hypothesis as the difference cannot be
|
253 |
+
regarded as significant at this level.
|
254 |
+
"""
|
255 |
+
|
256 |
+
_poisson_means_test_iv(k1, n1, k2, n2, diff, alternative)
|
257 |
+
|
258 |
+
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
|
259 |
+
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
|
260 |
+
|
261 |
+
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
|
262 |
+
# case the null hypothesis cannot be rejected ... [and] it is not necessary
|
263 |
+
# to compute the p-value". [1] page 26 below eq. (3.6).
|
264 |
+
if lmbd_hat2 <= 0:
|
265 |
+
return _stats_py.SignificanceResult(0, 1)
|
266 |
+
|
267 |
+
# The unbiased variance estimate [1] (3.2)
|
268 |
+
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
|
269 |
+
|
270 |
+
# The _observed_ pivot statistic from the input. It follows the
|
271 |
+
# unnumbered equation following equation (3.3) This is used later in
|
272 |
+
# comparison with the computed pivot statistics in an indicator function.
|
273 |
+
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
|
274 |
+
|
275 |
+
# Equation (3.5) of [1] is lengthy, so it is broken into several parts,
|
276 |
+
# beginning here. Note that the probability mass function of poisson is
|
277 |
+
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
|
278 |
+
# here as nlmbd_hat*. The strategy for evaluating the double summation in
|
279 |
+
# (3.5) is to create two arrays of the values of the two products inside
|
280 |
+
# the summation and then broadcast them together into a matrix, and then
|
281 |
+
# sum across the entire matrix.
|
282 |
+
|
283 |
+
# Compute constants (as seen in the first and second separated products in
|
284 |
+
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
|
285 |
+
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
|
286 |
+
nlmbd_hat2 = n2 * lmbd_hat2
|
287 |
+
|
288 |
+
# Determine summation bounds for tail ends of distribution rather than
|
289 |
+
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
|
290 |
+
# sum.
|
291 |
+
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
|
292 |
+
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
|
293 |
+
|
294 |
+
# Construct arrays to function as the x_1 and x_2 counters on the summation
|
295 |
+
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
|
296 |
+
# broadcasting.
|
297 |
+
x1 = np.arange(x1_lb, x1_ub + 1)
|
298 |
+
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
|
299 |
+
|
300 |
+
# These are the two products in equation (3.5) with `prob_x1` being the
|
301 |
+
# first (left side) and `prob_x2` being the second (right side). (To
|
302 |
+
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
|
303 |
+
# not.)
|
304 |
+
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
|
305 |
+
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
|
306 |
+
|
307 |
+
# compute constants for use in the "pivot statistic" per the
|
308 |
+
# unnumbered equation following (3.3).
|
309 |
+
lmbd_x1 = x1 / n1
|
310 |
+
lmbd_x2 = x2 / n2
|
311 |
+
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
|
312 |
+
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
|
313 |
+
|
314 |
+
# This is the 'pivot statistic' for use in the indicator of the summation
|
315 |
+
# (left side of "I[.]").
|
316 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
317 |
+
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
|
318 |
+
|
319 |
+
# `[indicator]` implements the "I[.] ... the indicator function" per
|
320 |
+
# the paragraph following equation (3.5).
|
321 |
+
if alternative == 'two-sided':
|
322 |
+
indicator = np.abs(t_x1x2) >= np.abs(t_k1k2)
|
323 |
+
elif alternative == 'less':
|
324 |
+
indicator = t_x1x2 <= t_k1k2
|
325 |
+
else:
|
326 |
+
indicator = t_x1x2 >= t_k1k2
|
327 |
+
|
328 |
+
# Multiply all combinations of the products together, exclude terms
|
329 |
+
# based on the `indicator` and then sum. (3.5)
|
330 |
+
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
|
331 |
+
return _stats_py.SignificanceResult(t_k1k2, pvalue)
|
332 |
+
|
333 |
+
|
334 |
+
def _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative):
|
335 |
+
# """check for valid types and values of input to `poisson_mean_test`."""
|
336 |
+
if k1 != int(k1) or k2 != int(k2):
|
337 |
+
raise TypeError('`k1` and `k2` must be integers.')
|
338 |
+
|
339 |
+
count_err = '`k1` and `k2` must be greater than or equal to 0.'
|
340 |
+
if k1 < 0 or k2 < 0:
|
341 |
+
raise ValueError(count_err)
|
342 |
+
|
343 |
+
if n1 <= 0 or n2 <= 0:
|
344 |
+
raise ValueError('`n1` and `n2` must be greater than 0.')
|
345 |
+
|
346 |
+
if diff < 0:
|
347 |
+
raise ValueError('diff must be greater than or equal to 0.')
|
348 |
+
|
349 |
+
alternatives = {'two-sided', 'less', 'greater'}
|
350 |
+
if alternative.lower() not in alternatives:
|
351 |
+
raise ValueError(f"Alternative must be one of '{alternatives}'.")
|
352 |
+
|
353 |
+
|
354 |
+
class CramerVonMisesResult:
|
355 |
+
def __init__(self, statistic, pvalue):
|
356 |
+
self.statistic = statistic
|
357 |
+
self.pvalue = pvalue
|
358 |
+
|
359 |
+
def __repr__(self):
|
360 |
+
return (f"{self.__class__.__name__}(statistic={self.statistic}, "
|
361 |
+
f"pvalue={self.pvalue})")
|
362 |
+
|
363 |
+
|
364 |
+
def _psi1_mod(x):
|
365 |
+
"""
|
366 |
+
psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996).
|
367 |
+
This implements a modified version by excluding the term V(x) / 12
|
368 |
+
(here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
|
369 |
+
twice in _cdf_cvm.
|
370 |
+
|
371 |
+
Implementation based on MAPLE code of Julian Faraway and R code of the
|
372 |
+
function pCvM in the package goftest (v1.1.1), permission granted
|
373 |
+
by Adrian Baddeley. Main difference in the implementation: the code
|
374 |
+
here keeps adding terms of the series until the terms are small enough.
|
375 |
+
"""
|
376 |
+
|
377 |
+
def _ed2(y):
|
378 |
+
z = y**2 / 4
|
379 |
+
b = kv(1/4, z) + kv(3/4, z)
|
380 |
+
return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi)
|
381 |
+
|
382 |
+
def _ed3(y):
|
383 |
+
z = y**2 / 4
|
384 |
+
c = np.exp(-z) / np.sqrt(np.pi)
|
385 |
+
return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z))
|
386 |
+
|
387 |
+
def _Ak(k, x):
|
388 |
+
m = 2*k + 1
|
389 |
+
sx = 2 * np.sqrt(x)
|
390 |
+
y1 = x**(3/4)
|
391 |
+
y2 = x**(5/4)
|
392 |
+
|
393 |
+
e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1)
|
394 |
+
e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2)
|
395 |
+
e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2)
|
396 |
+
e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1)
|
397 |
+
e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1)
|
398 |
+
|
399 |
+
return e1 + e2 + e3 + e4 + e5
|
400 |
+
|
401 |
+
x = np.asarray(x)
|
402 |
+
tot = np.zeros_like(x, dtype='float')
|
403 |
+
cond = np.ones_like(x, dtype='bool')
|
404 |
+
k = 0
|
405 |
+
while np.any(cond):
|
406 |
+
z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1))
|
407 |
+
tot[cond] = tot[cond] + z
|
408 |
+
cond[cond] = np.abs(z) >= 1e-7
|
409 |
+
k += 1
|
410 |
+
|
411 |
+
return tot
|
412 |
+
|
413 |
+
|
414 |
+
def _cdf_cvm_inf(x):
|
415 |
+
"""
|
416 |
+
Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).
|
417 |
+
|
418 |
+
See equation 1.2 in Csörgő, S. and Faraway, J. (1996).
|
419 |
+
|
420 |
+
Implementation based on MAPLE code of Julian Faraway and R code of the
|
421 |
+
function pCvM in the package goftest (v1.1.1), permission granted
|
422 |
+
by Adrian Baddeley. Main difference in the implementation: the code
|
423 |
+
here keeps adding terms of the series until the terms are small enough.
|
424 |
+
|
425 |
+
The function is not expected to be accurate for large values of x, say
|
426 |
+
x > 4, when the cdf is very close to 1.
|
427 |
+
"""
|
428 |
+
x = np.asarray(x)
|
429 |
+
|
430 |
+
def term(x, k):
|
431 |
+
# this expression can be found in [2], second line of (1.3)
|
432 |
+
u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x))
|
433 |
+
y = 4*k + 1
|
434 |
+
q = y**2 / (16*x)
|
435 |
+
b = kv(0.25, q)
|
436 |
+
return u * np.sqrt(y) * np.exp(-q) * b
|
437 |
+
|
438 |
+
tot = np.zeros_like(x, dtype='float')
|
439 |
+
cond = np.ones_like(x, dtype='bool')
|
440 |
+
k = 0
|
441 |
+
while np.any(cond):
|
442 |
+
z = term(x[cond], k)
|
443 |
+
tot[cond] = tot[cond] + z
|
444 |
+
cond[cond] = np.abs(z) >= 1e-7
|
445 |
+
k += 1
|
446 |
+
|
447 |
+
return tot
|
448 |
+
|
449 |
+
|
450 |
+
def _cdf_cvm(x, n=None):
|
451 |
+
"""
|
452 |
+
Calculate the cdf of the Cramér-von Mises statistic for a finite sample
|
453 |
+
size n. If N is None, use the asymptotic cdf (n=inf).
|
454 |
+
|
455 |
+
See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples,
|
456 |
+
1.2 for the asymptotic cdf.
|
457 |
+
|
458 |
+
The function is not expected to be accurate for large values of x, say
|
459 |
+
x > 2, when the cdf is very close to 1 and it might return values > 1
|
460 |
+
in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it
|
461 |
+
is not accurate for small values of n, especially close to the bounds of
|
462 |
+
the distribution's domain, [1/(12*n), n/3], where the value jumps to 0
|
463 |
+
and 1, respectively. These are limitations of the approximation by Csörgő
|
464 |
+
and Faraway (1996) implemented in this function.
|
465 |
+
"""
|
466 |
+
x = np.asarray(x)
|
467 |
+
if n is None:
|
468 |
+
y = _cdf_cvm_inf(x)
|
469 |
+
else:
|
470 |
+
# support of the test statistic is [12/n, n/3], see 1.1 in [2]
|
471 |
+
y = np.zeros_like(x, dtype='float')
|
472 |
+
sup = (1./(12*n) < x) & (x < n/3.)
|
473 |
+
# note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12
|
474 |
+
# therefore, we need to add it here
|
475 |
+
y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n
|
476 |
+
y[x >= n/3] = 1
|
477 |
+
|
478 |
+
if y.ndim == 0:
|
479 |
+
return y[()]
|
480 |
+
return y
|
481 |
+
|
482 |
+
|
483 |
+
def _cvm_result_to_tuple(res):
|
484 |
+
return res.statistic, res.pvalue
|
485 |
+
|
486 |
+
|
487 |
+
@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=1, too_small=1,
|
488 |
+
result_to_tuple=_cvm_result_to_tuple)
|
489 |
+
def cramervonmises(rvs, cdf, args=()):
|
490 |
+
"""Perform the one-sample Cramér-von Mises test for goodness of fit.
|
491 |
+
|
492 |
+
This performs a test of the goodness of fit of a cumulative distribution
|
493 |
+
function (cdf) :math:`F` compared to the empirical distribution function
|
494 |
+
:math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
|
495 |
+
assumed to be independent and identically distributed ([1]_).
|
496 |
+
The null hypothesis is that the :math:`X_i` have cumulative distribution
|
497 |
+
:math:`F`.
|
498 |
+
|
499 |
+
Parameters
|
500 |
+
----------
|
501 |
+
rvs : array_like
|
502 |
+
A 1-D array of observed values of the random variables :math:`X_i`.
|
503 |
+
cdf : str or callable
|
504 |
+
The cumulative distribution function :math:`F` to test the
|
505 |
+
observations against. If a string, it should be the name of a
|
506 |
+
distribution in `scipy.stats`. If a callable, that callable is used
|
507 |
+
to calculate the cdf: ``cdf(x, *args) -> float``.
|
508 |
+
args : tuple, optional
|
509 |
+
Distribution parameters. These are assumed to be known; see Notes.
|
510 |
+
|
511 |
+
Returns
|
512 |
+
-------
|
513 |
+
res : object with attributes
|
514 |
+
statistic : float
|
515 |
+
Cramér-von Mises statistic.
|
516 |
+
pvalue : float
|
517 |
+
The p-value.
|
518 |
+
|
519 |
+
See Also
|
520 |
+
--------
|
521 |
+
kstest, cramervonmises_2samp
|
522 |
+
|
523 |
+
Notes
|
524 |
+
-----
|
525 |
+
.. versionadded:: 1.6.0
|
526 |
+
|
527 |
+
The p-value relies on the approximation given by equation 1.8 in [2]_.
|
528 |
+
It is important to keep in mind that the p-value is only accurate if
|
529 |
+
one tests a simple hypothesis, i.e. the parameters of the reference
|
530 |
+
distribution are known. If the parameters are estimated from the data
|
531 |
+
(composite hypothesis), the computed p-value is not reliable.
|
532 |
+
|
533 |
+
References
|
534 |
+
----------
|
535 |
+
.. [1] Cramér-von Mises criterion, Wikipedia,
|
536 |
+
https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
|
537 |
+
.. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic
|
538 |
+
Distribution of Cramér-von Mises Statistics. Journal of the
|
539 |
+
Royal Statistical Society, pp. 221-234.
|
540 |
+
|
541 |
+
Examples
|
542 |
+
--------
|
543 |
+
|
544 |
+
Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
|
545 |
+
were, in fact, drawn from the standard normal distribution. We choose a
|
546 |
+
significance level of ``alpha=0.05``.
|
547 |
+
|
548 |
+
>>> import numpy as np
|
549 |
+
>>> from scipy import stats
|
550 |
+
>>> rng = np.random.default_rng(165417232101553420507139617764912913465)
|
551 |
+
>>> x = stats.norm.rvs(size=500, random_state=rng)
|
552 |
+
>>> res = stats.cramervonmises(x, 'norm')
|
553 |
+
>>> res.statistic, res.pvalue
|
554 |
+
(0.1072085112565724, 0.5508482238203407)
|
555 |
+
|
556 |
+
The p-value exceeds our chosen significance level, so we do not
|
557 |
+
reject the null hypothesis that the observed sample is drawn from the
|
558 |
+
standard normal distribution.
|
559 |
+
|
560 |
+
Now suppose we wish to check whether the same samples shifted by 2.1 is
|
561 |
+
consistent with being drawn from a normal distribution with a mean of 2.
|
562 |
+
|
563 |
+
>>> y = x + 2.1
|
564 |
+
>>> res = stats.cramervonmises(y, 'norm', args=(2,))
|
565 |
+
>>> res.statistic, res.pvalue
|
566 |
+
(0.8364446265294695, 0.00596286797008283)
|
567 |
+
|
568 |
+
Here we have used the `args` keyword to specify the mean (``loc``)
|
569 |
+
of the normal distribution to test the data against. This is equivalent
|
570 |
+
to the following, in which we create a frozen normal distribution with
|
571 |
+
mean 2.1, then pass its ``cdf`` method as an argument.
|
572 |
+
|
573 |
+
>>> frozen_dist = stats.norm(loc=2)
|
574 |
+
>>> res = stats.cramervonmises(y, frozen_dist.cdf)
|
575 |
+
>>> res.statistic, res.pvalue
|
576 |
+
(0.8364446265294695, 0.00596286797008283)
|
577 |
+
|
578 |
+
In either case, we would reject the null hypothesis that the observed
|
579 |
+
sample is drawn from a normal distribution with a mean of 2 (and default
|
580 |
+
variance of 1) because the p-value is less than our chosen
|
581 |
+
significance level.
|
582 |
+
|
583 |
+
"""
|
584 |
+
if isinstance(cdf, str):
|
585 |
+
cdf = getattr(distributions, cdf).cdf
|
586 |
+
|
587 |
+
vals = np.sort(np.asarray(rvs))
|
588 |
+
|
589 |
+
if vals.size <= 1:
|
590 |
+
raise ValueError('The sample must contain at least two observations.')
|
591 |
+
|
592 |
+
n = len(vals)
|
593 |
+
cdfvals = cdf(vals, *args)
|
594 |
+
|
595 |
+
u = (2*np.arange(1, n+1) - 1)/(2*n)
|
596 |
+
w = 1/(12*n) + np.sum((u - cdfvals)**2)
|
597 |
+
|
598 |
+
# avoid small negative values that can occur due to the approximation
|
599 |
+
p = max(0, 1. - _cdf_cvm(w, n))
|
600 |
+
|
601 |
+
return CramerVonMisesResult(statistic=w, pvalue=p)
|
602 |
+
|
603 |
+
|
604 |
+
def _get_wilcoxon_distr(n):
|
605 |
+
"""
|
606 |
+
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
|
607 |
+
of ranks of positive differences).
|
608 |
+
Returns an array with the probabilities of all the possible ranks
|
609 |
+
r = 0, ..., n*(n+1)/2
|
610 |
+
"""
|
611 |
+
c = np.ones(1, dtype=np.float64)
|
612 |
+
for k in range(1, n + 1):
|
613 |
+
prev_c = c
|
614 |
+
c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.float64)
|
615 |
+
m = len(prev_c)
|
616 |
+
c[:m] = prev_c * 0.5
|
617 |
+
c[-m:] += prev_c * 0.5
|
618 |
+
return c
|
619 |
+
|
620 |
+
|
621 |
+
def _get_wilcoxon_distr2(n):
|
622 |
+
"""
|
623 |
+
Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
|
624 |
+
of ranks of positive differences).
|
625 |
+
Returns an array with the probabilities of all the possible ranks
|
626 |
+
r = 0, ..., n*(n+1)/2
|
627 |
+
This is a slower reference function
|
628 |
+
References
|
629 |
+
----------
|
630 |
+
.. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon
|
631 |
+
Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343.
|
632 |
+
"""
|
633 |
+
ai = np.arange(1, n+1)[:, None]
|
634 |
+
t = n*(n+1)/2
|
635 |
+
q = 2*t
|
636 |
+
j = np.arange(q)
|
637 |
+
theta = 2*np.pi/q*j
|
638 |
+
phi_sp = np.prod(np.cos(theta*ai), axis=0)
|
639 |
+
phi_s = np.exp(1j*theta*t) * phi_sp
|
640 |
+
p = np.real(ifft(phi_s))
|
641 |
+
res = np.zeros(int(t)+1)
|
642 |
+
res[:-1:] = p[::2]
|
643 |
+
res[0] /= 2
|
644 |
+
res[-1] = res[0]
|
645 |
+
return res
|
646 |
+
|
647 |
+
|
648 |
+
def _tau_b(A):
|
649 |
+
"""Calculate Kendall's tau-b and p-value from contingency table."""
|
650 |
+
# See [2] 2.2 and 4.2
|
651 |
+
|
652 |
+
# contingency table must be truly 2D
|
653 |
+
if A.shape[0] == 1 or A.shape[1] == 1:
|
654 |
+
return np.nan, np.nan
|
655 |
+
|
656 |
+
NA = A.sum()
|
657 |
+
PA = _P(A)
|
658 |
+
QA = _Q(A)
|
659 |
+
Sri2 = (A.sum(axis=1)**2).sum()
|
660 |
+
Scj2 = (A.sum(axis=0)**2).sum()
|
661 |
+
denominator = (NA**2 - Sri2)*(NA**2 - Scj2)
|
662 |
+
|
663 |
+
tau = (PA-QA)/(denominator)**0.5
|
664 |
+
|
665 |
+
numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA)
|
666 |
+
s02_tau_b = numerator/denominator
|
667 |
+
if s02_tau_b == 0: # Avoid divide by zero
|
668 |
+
return tau, 0
|
669 |
+
Z = tau/s02_tau_b**0.5
|
670 |
+
p = 2*norm.sf(abs(Z)) # 2-sided p-value
|
671 |
+
|
672 |
+
return tau, p
|
673 |
+
|
674 |
+
|
675 |
+
def _somers_d(A, alternative='two-sided'):
|
676 |
+
"""Calculate Somers' D and p-value from contingency table."""
|
677 |
+
# See [3] page 1740
|
678 |
+
|
679 |
+
# contingency table must be truly 2D
|
680 |
+
if A.shape[0] <= 1 or A.shape[1] <= 1:
|
681 |
+
return np.nan, np.nan
|
682 |
+
|
683 |
+
NA = A.sum()
|
684 |
+
NA2 = NA**2
|
685 |
+
PA = _P(A)
|
686 |
+
QA = _Q(A)
|
687 |
+
Sri2 = (A.sum(axis=1)**2).sum()
|
688 |
+
|
689 |
+
d = (PA - QA)/(NA2 - Sri2)
|
690 |
+
|
691 |
+
S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA
|
692 |
+
|
693 |
+
with np.errstate(divide='ignore'):
|
694 |
+
Z = (PA - QA)/(4*(S))**0.5
|
695 |
+
|
696 |
+
p = scipy.stats._stats_py._get_pvalue(Z, distributions.norm, alternative)
|
697 |
+
|
698 |
+
return d, p
|
699 |
+
|
700 |
+
|
701 |
+
@dataclass
|
702 |
+
class SomersDResult:
|
703 |
+
statistic: float
|
704 |
+
pvalue: float
|
705 |
+
table: np.ndarray
|
706 |
+
|
707 |
+
|
708 |
+
def somersd(x, y=None, alternative='two-sided'):
|
709 |
+
r"""Calculates Somers' D, an asymmetric measure of ordinal association.
|
710 |
+
|
711 |
+
Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
|
712 |
+
correspondence between two rankings. Both statistics consider the
|
713 |
+
difference between the number of concordant and discordant pairs in two
|
714 |
+
rankings :math:`X` and :math:`Y`, and both are normalized such that values
|
715 |
+
close to 1 indicate strong agreement and values close to -1 indicate
|
716 |
+
strong disagreement. They differ in how they are normalized. To show the
|
717 |
+
relationship, Somers' :math:`D` can be defined in terms of Kendall's
|
718 |
+
:math:`\tau_a`:
|
719 |
+
|
720 |
+
.. math::
|
721 |
+
D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}
|
722 |
+
|
723 |
+
Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
|
724 |
+
second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
|
725 |
+
:math:`n` rankings can also be viewed as an :math:`r \times s` contingency
|
726 |
+
table in which element :math:`i, j` is the number of rank pairs with rank
|
727 |
+
:math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
|
728 |
+
Accordingly, `somersd` also allows the input data to be supplied as a
|
729 |
+
single, 2D contingency table instead of as two separate, 1D rankings.
|
730 |
+
|
731 |
+
Note that the definition of Somers' :math:`D` is asymmetric: in general,
|
732 |
+
:math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
|
733 |
+
:math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
|
734 |
+
variable, and the "column" variable :math:`Y` is dependent. For Somers'
|
735 |
+
:math:`D(X|Y)`, swap the input lists or transpose the input table.
|
736 |
+
|
737 |
+
Parameters
|
738 |
+
----------
|
739 |
+
x : array_like
|
740 |
+
1D array of rankings, treated as the (row) independent variable.
|
741 |
+
Alternatively, a 2D contingency table.
|
742 |
+
y : array_like, optional
|
743 |
+
If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
|
744 |
+
same length, treated as the (column) dependent variable.
|
745 |
+
If `x` is 2D, `y` is ignored.
|
746 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
747 |
+
Defines the alternative hypothesis. Default is 'two-sided'.
|
748 |
+
The following options are available:
|
749 |
+
* 'two-sided': the rank correlation is nonzero
|
750 |
+
* 'less': the rank correlation is negative (less than zero)
|
751 |
+
* 'greater': the rank correlation is positive (greater than zero)
|
752 |
+
|
753 |
+
Returns
|
754 |
+
-------
|
755 |
+
res : SomersDResult
|
756 |
+
A `SomersDResult` object with the following fields:
|
757 |
+
|
758 |
+
statistic : float
|
759 |
+
The Somers' :math:`D` statistic.
|
760 |
+
pvalue : float
|
761 |
+
The p-value for a hypothesis test whose null
|
762 |
+
hypothesis is an absence of association, :math:`D=0`.
|
763 |
+
See notes for more information.
|
764 |
+
table : 2D array
|
765 |
+
The contingency table formed from rankings `x` and `y` (or the
|
766 |
+
provided contingency table, if `x` is a 2D array)
|
767 |
+
|
768 |
+
See Also
|
769 |
+
--------
|
770 |
+
kendalltau : Calculates Kendall's tau, another correlation measure.
|
771 |
+
weightedtau : Computes a weighted version of Kendall's tau.
|
772 |
+
spearmanr : Calculates a Spearman rank-order correlation coefficient.
|
773 |
+
pearsonr : Calculates a Pearson correlation coefficient.
|
774 |
+
|
775 |
+
Notes
|
776 |
+
-----
|
777 |
+
This function follows the contingency table approach of [2]_ and
|
778 |
+
[3]_. *p*-values are computed based on an asymptotic approximation of
|
779 |
+
the test statistic distribution under the null hypothesis :math:`D=0`.
|
780 |
+
|
781 |
+
Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
|
782 |
+
:math:`D` should be identical.
|
783 |
+
However, the *p*-values returned by `kendalltau` are based
|
784 |
+
on the null hypothesis of *independence* between :math:`X` and :math:`Y`
|
785 |
+
(i.e. the population from which pairs in :math:`X` and :math:`Y` are
|
786 |
+
sampled contains equal numbers of all possible pairs), which is more
|
787 |
+
specific than the null hypothesis :math:`D=0` used here. If the null
|
788 |
+
hypothesis of independence is desired, it is acceptable to use the
|
789 |
+
*p*-value returned by `kendalltau` with the statistic returned by
|
790 |
+
`somersd` and vice versa. For more information, see [2]_.
|
791 |
+
|
792 |
+
Contingency tables are formatted according to the convention used by
|
793 |
+
SAS and R: the first ranking supplied (``x``) is the "row" variable, and
|
794 |
+
the second ranking supplied (``y``) is the "column" variable. This is
|
795 |
+
opposite the convention of Somers' original paper [1]_.
|
796 |
+
|
797 |
+
References
|
798 |
+
----------
|
799 |
+
.. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
|
800 |
+
Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
|
801 |
+
pp. 799--811, 1962.
|
802 |
+
|
803 |
+
.. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
|
804 |
+
Tests for Correlation in Two-Way Contingency Tables", *Journal of
|
805 |
+
the American Statistical Association* Vol. 72, No. 358, pp.
|
806 |
+
309--315, 1977.
|
807 |
+
|
808 |
+
.. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
|
809 |
+
*SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.
|
810 |
+
|
811 |
+
.. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
|
812 |
+
Statistics Tutorials and Statistical Guides*,
|
813 |
+
https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
|
814 |
+
Accessed July 31, 2020.
|
815 |
+
|
816 |
+
Examples
|
817 |
+
--------
|
818 |
+
We calculate Somers' D for the example given in [4]_, in which a hotel
|
819 |
+
chain owner seeks to determine the association between hotel room
|
820 |
+
cleanliness and customer satisfaction. The independent variable, hotel
|
821 |
+
room cleanliness, is ranked on an ordinal scale: "below average (1)",
|
822 |
+
"average (2)", or "above average (3)". The dependent variable, customer
|
823 |
+
satisfaction, is ranked on a second scale: "very dissatisfied (1)",
|
824 |
+
"moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
|
825 |
+
"moderately satisfied (4)", or "very satisfied (5)". 189 customers
|
826 |
+
respond to the survey, and the results are cast into a contingency table
|
827 |
+
with the hotel room cleanliness as the "row" variable and customer
|
828 |
+
satisfaction as the "column" variable.
|
829 |
+
|
830 |
+
+-----+-----+-----+-----+-----+-----+
|
831 |
+
| | (1) | (2) | (3) | (4) | (5) |
|
832 |
+
+=====+=====+=====+=====+=====+=====+
|
833 |
+
| (1) | 27 | 25 | 14 | 7 | 0 |
|
834 |
+
+-----+-----+-----+-----+-----+-----+
|
835 |
+
| (2) | 7 | 14 | 18 | 35 | 12 |
|
836 |
+
+-----+-----+-----+-----+-----+-----+
|
837 |
+
| (3) | 1 | 3 | 2 | 7 | 17 |
|
838 |
+
+-----+-----+-----+-----+-----+-----+
|
839 |
+
|
840 |
+
For example, 27 customers assigned their room a cleanliness ranking of
|
841 |
+
"below average (1)" and a corresponding satisfaction of "very
|
842 |
+
dissatisfied (1)". We perform the analysis as follows.
|
843 |
+
|
844 |
+
>>> from scipy.stats import somersd
|
845 |
+
>>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
|
846 |
+
>>> res = somersd(table)
|
847 |
+
>>> res.statistic
|
848 |
+
0.6032766111513396
|
849 |
+
>>> res.pvalue
|
850 |
+
1.0007091191074533e-27
|
851 |
+
|
852 |
+
The value of the Somers' D statistic is approximately 0.6, indicating
|
853 |
+
a positive correlation between room cleanliness and customer satisfaction
|
854 |
+
in the sample.
|
855 |
+
The *p*-value is very small, indicating a very small probability of
|
856 |
+
observing such an extreme value of the statistic under the null
|
857 |
+
hypothesis that the statistic of the entire population (from which
|
858 |
+
our sample of 189 customers is drawn) is zero. This supports the
|
859 |
+
alternative hypothesis that the true value of Somers' D for the population
|
860 |
+
is nonzero.
|
861 |
+
|
862 |
+
"""
|
863 |
+
x, y = np.array(x), np.array(y)
|
864 |
+
if x.ndim == 1:
|
865 |
+
if x.size != y.size:
|
866 |
+
raise ValueError("Rankings must be of equal length.")
|
867 |
+
table = scipy.stats.contingency.crosstab(x, y)[1]
|
868 |
+
elif x.ndim == 2:
|
869 |
+
if np.any(x < 0):
|
870 |
+
raise ValueError("All elements of the contingency table must be "
|
871 |
+
"non-negative.")
|
872 |
+
if np.any(x != x.astype(int)):
|
873 |
+
raise ValueError("All elements of the contingency table must be "
|
874 |
+
"integer.")
|
875 |
+
if x.nonzero()[0].size < 2:
|
876 |
+
raise ValueError("At least two elements of the contingency table "
|
877 |
+
"must be nonzero.")
|
878 |
+
table = x
|
879 |
+
else:
|
880 |
+
raise ValueError("x must be either a 1D or 2D array")
|
881 |
+
# The table type is converted to a float to avoid an integer overflow
|
882 |
+
d, p = _somers_d(table.astype(float), alternative)
|
883 |
+
|
884 |
+
# add alias for consistency with other correlation functions
|
885 |
+
res = SomersDResult(d, p, table)
|
886 |
+
res.correlation = d
|
887 |
+
return res
|
888 |
+
|
889 |
+
|
890 |
+
# This could be combined with `_all_partitions` in `_resampling.py`
|
891 |
+
def _all_partitions(nx, ny):
|
892 |
+
"""
|
893 |
+
Partition a set of indices into two fixed-length sets in all possible ways
|
894 |
+
|
895 |
+
Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
|
896 |
+
ny in all possible ways (ignoring order of elements).
|
897 |
+
"""
|
898 |
+
z = np.arange(nx+ny)
|
899 |
+
for c in combinations(z, nx):
|
900 |
+
x = np.array(c)
|
901 |
+
mask = np.ones(nx+ny, bool)
|
902 |
+
mask[x] = False
|
903 |
+
y = z[mask]
|
904 |
+
yield x, y
|
905 |
+
|
906 |
+
|
907 |
+
def _compute_log_combinations(n):
|
908 |
+
"""Compute all log combination of C(n, k)."""
|
909 |
+
gammaln_arr = gammaln(np.arange(n + 1) + 1)
|
910 |
+
return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1]
|
911 |
+
|
912 |
+
|
913 |
+
@dataclass
|
914 |
+
class BarnardExactResult:
|
915 |
+
statistic: float
|
916 |
+
pvalue: float
|
917 |
+
|
918 |
+
|
919 |
+
def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
|
920 |
+
r"""Perform a Barnard exact test on a 2x2 contingency table.
|
921 |
+
|
922 |
+
Parameters
|
923 |
+
----------
|
924 |
+
table : array_like of ints
|
925 |
+
A 2x2 contingency table. Elements should be non-negative integers.
|
926 |
+
|
927 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
928 |
+
Defines the null and alternative hypotheses. Default is 'two-sided'.
|
929 |
+
Please see explanations in the Notes section below.
|
930 |
+
|
931 |
+
pooled : bool, optional
|
932 |
+
Whether to compute score statistic with pooled variance (as in
|
933 |
+
Student's t-test, for example) or unpooled variance (as in Welch's
|
934 |
+
t-test). Default is ``True``.
|
935 |
+
|
936 |
+
n : int, optional
|
937 |
+
Number of sampling points used in the construction of the sampling
|
938 |
+
method. Note that this argument will automatically be converted to
|
939 |
+
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
|
940 |
+
select sample points. Default is 32. Must be positive. In most cases,
|
941 |
+
32 points is enough to reach good precision. More points comes at
|
942 |
+
performance cost.
|
943 |
+
|
944 |
+
Returns
|
945 |
+
-------
|
946 |
+
ber : BarnardExactResult
|
947 |
+
A result object with the following attributes.
|
948 |
+
|
949 |
+
statistic : float
|
950 |
+
The Wald statistic with pooled or unpooled variance, depending
|
951 |
+
on the user choice of `pooled`.
|
952 |
+
|
953 |
+
pvalue : float
|
954 |
+
P-value, the probability of obtaining a distribution at least as
|
955 |
+
extreme as the one that was actually observed, assuming that the
|
956 |
+
null hypothesis is true.
|
957 |
+
|
958 |
+
See Also
|
959 |
+
--------
|
960 |
+
chi2_contingency : Chi-square test of independence of variables in a
|
961 |
+
contingency table.
|
962 |
+
fisher_exact : Fisher exact test on a 2x2 contingency table.
|
963 |
+
boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
|
964 |
+
which is an uniformly more powerful alternative to Fisher's exact test.
|
965 |
+
|
966 |
+
Notes
|
967 |
+
-----
|
968 |
+
Barnard's test is an exact test used in the analysis of contingency
|
969 |
+
tables. It examines the association of two categorical variables, and
|
970 |
+
is a more powerful alternative than Fisher's exact test
|
971 |
+
for 2x2 contingency tables.
|
972 |
+
|
973 |
+
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
|
974 |
+
where each column stores the binomial experiment, as in the example
|
975 |
+
below. Let's also define :math:`p_1, p_2` the theoretical binomial
|
976 |
+
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
|
977 |
+
Barnard exact test, we can assert three different null hypotheses :
|
978 |
+
|
979 |
+
- :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
|
980 |
+
with `alternative` = "less"
|
981 |
+
|
982 |
+
- :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
|
983 |
+
with `alternative` = "greater"
|
984 |
+
|
985 |
+
- :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
|
986 |
+
with `alternative` = "two-sided" (default one)
|
987 |
+
|
988 |
+
In order to compute Barnard's exact test, we are using the Wald
|
989 |
+
statistic [3]_ with pooled or unpooled variance.
|
990 |
+
Under the default assumption that both variances are equal
|
991 |
+
(``pooled = True``), the statistic is computed as:
|
992 |
+
|
993 |
+
.. math::
|
994 |
+
|
995 |
+
T(X) = \frac{
|
996 |
+
\hat{p}_1 - \hat{p}_2
|
997 |
+
}{
|
998 |
+
\sqrt{
|
999 |
+
\hat{p}(1 - \hat{p})
|
1000 |
+
(\frac{1}{c_1} +
|
1001 |
+
\frac{1}{c_2})
|
1002 |
+
}
|
1003 |
+
}
|
1004 |
+
|
1005 |
+
with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
|
1006 |
+
:math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
|
1007 |
+
given the assumption that :math:`p_1 = p_2`.
|
1008 |
+
|
1009 |
+
If this assumption is invalid (``pooled = False``), the statistic is:
|
1010 |
+
|
1011 |
+
.. math::
|
1012 |
+
|
1013 |
+
T(X) = \frac{
|
1014 |
+
\hat{p}_1 - \hat{p}_2
|
1015 |
+
}{
|
1016 |
+
\sqrt{
|
1017 |
+
\frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
|
1018 |
+
\frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
|
1019 |
+
}
|
1020 |
+
}
|
1021 |
+
|
1022 |
+
The p-value is then computed as:
|
1023 |
+
|
1024 |
+
.. math::
|
1025 |
+
|
1026 |
+
\sum
|
1027 |
+
\binom{c_1}{x_{11}}
|
1028 |
+
\binom{c_2}{x_{12}}
|
1029 |
+
\pi^{x_{11} + x_{12}}
|
1030 |
+
(1 - \pi)^{t - x_{11} - x_{12}}
|
1031 |
+
|
1032 |
+
where the sum is over all 2x2 contingency tables :math:`X` such that:
|
1033 |
+
* :math:`T(X) \leq T(X_0)` when `alternative` = "less",
|
1034 |
+
* :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
|
1035 |
+
* :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
|
1036 |
+
Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
|
1037 |
+
and :math:`t` the total (sum of the 4 sample's element).
|
1038 |
+
|
1039 |
+
The returned p-value is the maximum p-value taken over the nuisance
|
1040 |
+
parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
|
1041 |
+
|
1042 |
+
This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
|
1043 |
+
number of sample points.
|
1044 |
+
|
1045 |
+
References
|
1046 |
+
----------
|
1047 |
+
.. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
|
1048 |
+
34.1/2 (1947): 123-138. :doi:`dpgkg3`
|
1049 |
+
|
1050 |
+
.. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
|
1051 |
+
unconditional exact tests for comparing two binomials."
|
1052 |
+
*Cytel Software Corporation* 675 (2003): 1-5.
|
1053 |
+
|
1054 |
+
.. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
|
1055 |
+
|
1056 |
+
Examples
|
1057 |
+
--------
|
1058 |
+
An example use of Barnard's test is presented in [2]_.
|
1059 |
+
|
1060 |
+
Consider the following example of a vaccine efficacy study
|
1061 |
+
(Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
|
1062 |
+
inoculated with a recombinant DNA influenza vaccine and the 15 were
|
1063 |
+
inoculated with a placebo. Twelve of the 15 subjects in the placebo
|
1064 |
+
group (80%) eventually became infected with influenza whereas for the
|
1065 |
+
vaccine group, only 7 of the 15 subjects (47%) became infected. The
|
1066 |
+
data are tabulated as a 2 x 2 table::
|
1067 |
+
|
1068 |
+
Vaccine Placebo
|
1069 |
+
Yes 7 12
|
1070 |
+
No 8 3
|
1071 |
+
|
1072 |
+
When working with statistical hypothesis testing, we usually use a
|
1073 |
+
threshold probability or significance level upon which we decide
|
1074 |
+
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
|
1075 |
+
significance level of 5%.
|
1076 |
+
|
1077 |
+
Our alternative hypothesis is that the vaccine will lower the chance of
|
1078 |
+
becoming infected with the virus; that is, the probability :math:`p_1` of
|
1079 |
+
catching the virus with the vaccine will be *less than* the probability
|
1080 |
+
:math:`p_2` of catching the virus without the vaccine. Therefore, we call
|
1081 |
+
`barnard_exact` with the ``alternative="less"`` option:
|
1082 |
+
|
1083 |
+
>>> import scipy.stats as stats
|
1084 |
+
>>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
|
1085 |
+
>>> res.statistic
|
1086 |
+
-1.894...
|
1087 |
+
>>> res.pvalue
|
1088 |
+
0.03407...
|
1089 |
+
|
1090 |
+
Under the null hypothesis that the vaccine will not lower the chance of
|
1091 |
+
becoming infected, the probability of obtaining test results at least as
|
1092 |
+
extreme as the observed data is approximately 3.4%. Since this p-value is
|
1093 |
+
less than our chosen significance level, we have evidence to reject
|
1094 |
+
:math:`H_0` in favor of the alternative.
|
1095 |
+
|
1096 |
+
Suppose we had used Fisher's exact test instead:
|
1097 |
+
|
1098 |
+
>>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
|
1099 |
+
>>> pvalue
|
1100 |
+
0.0640...
|
1101 |
+
|
1102 |
+
With the same threshold significance of 5%, we would not have been able
|
1103 |
+
to reject the null hypothesis in favor of the alternative. As stated in
|
1104 |
+
[2]_, Barnard's test is uniformly more powerful than Fisher's exact test
|
1105 |
+
because Barnard's test does not condition on any margin. Fisher's test
|
1106 |
+
should only be used when both sets of marginals are fixed.
|
1107 |
+
|
1108 |
+
"""
|
1109 |
+
if n <= 0:
|
1110 |
+
raise ValueError(
|
1111 |
+
"Number of points `n` must be strictly positive, "
|
1112 |
+
f"found {n!r}"
|
1113 |
+
)
|
1114 |
+
|
1115 |
+
table = np.asarray(table, dtype=np.int64)
|
1116 |
+
|
1117 |
+
if not table.shape == (2, 2):
|
1118 |
+
raise ValueError("The input `table` must be of shape (2, 2).")
|
1119 |
+
|
1120 |
+
if np.any(table < 0):
|
1121 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
1122 |
+
|
1123 |
+
if 0 in table.sum(axis=0):
|
1124 |
+
# If both values in column are zero, the p-value is 1 and
|
1125 |
+
# the score's statistic is NaN.
|
1126 |
+
return BarnardExactResult(np.nan, 1.0)
|
1127 |
+
|
1128 |
+
total_col_1, total_col_2 = table.sum(axis=0)
|
1129 |
+
|
1130 |
+
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
|
1131 |
+
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
|
1132 |
+
|
1133 |
+
# We need to calculate the wald statistics for each combination of x1 and
|
1134 |
+
# x2.
|
1135 |
+
p1, p2 = x1 / total_col_1, x2 / total_col_2
|
1136 |
+
|
1137 |
+
if pooled:
|
1138 |
+
p = (x1 + x2) / (total_col_1 + total_col_2)
|
1139 |
+
variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
|
1140 |
+
else:
|
1141 |
+
variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
|
1142 |
+
|
1143 |
+
# To avoid warning when dividing by 0
|
1144 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
1145 |
+
wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
|
1146 |
+
|
1147 |
+
wald_statistic[p1 == p2] = 0 # Removing NaN values
|
1148 |
+
|
1149 |
+
wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
|
1150 |
+
|
1151 |
+
if alternative == "two-sided":
|
1152 |
+
index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
|
1153 |
+
elif alternative == "less":
|
1154 |
+
index_arr = wald_statistic <= wald_stat_obs
|
1155 |
+
elif alternative == "greater":
|
1156 |
+
index_arr = wald_statistic >= wald_stat_obs
|
1157 |
+
else:
|
1158 |
+
msg = (
|
1159 |
+
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
|
1160 |
+
f" found {alternative!r}"
|
1161 |
+
)
|
1162 |
+
raise ValueError(msg)
|
1163 |
+
|
1164 |
+
x1_sum_x2 = x1 + x2
|
1165 |
+
|
1166 |
+
x1_log_comb = _compute_log_combinations(total_col_1)
|
1167 |
+
x2_log_comb = _compute_log_combinations(total_col_2)
|
1168 |
+
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
|
1169 |
+
|
1170 |
+
result = shgo(
|
1171 |
+
_get_binomial_log_p_value_with_nuisance_param,
|
1172 |
+
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
|
1173 |
+
bounds=((0, 1),),
|
1174 |
+
n=n,
|
1175 |
+
sampling_method="sobol",
|
1176 |
+
)
|
1177 |
+
|
1178 |
+
# result.fun is the negative log pvalue and therefore needs to be
|
1179 |
+
# changed before return
|
1180 |
+
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
|
1181 |
+
return BarnardExactResult(wald_stat_obs, p_value)
|
1182 |
+
|
1183 |
+
|
1184 |
+
@dataclass
|
1185 |
+
class BoschlooExactResult:
|
1186 |
+
statistic: float
|
1187 |
+
pvalue: float
|
1188 |
+
|
1189 |
+
|
1190 |
+
def boschloo_exact(table, alternative="two-sided", n=32):
|
1191 |
+
r"""Perform Boschloo's exact test on a 2x2 contingency table.
|
1192 |
+
|
1193 |
+
Parameters
|
1194 |
+
----------
|
1195 |
+
table : array_like of ints
|
1196 |
+
A 2x2 contingency table. Elements should be non-negative integers.
|
1197 |
+
|
1198 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
1199 |
+
Defines the null and alternative hypotheses. Default is 'two-sided'.
|
1200 |
+
Please see explanations in the Notes section below.
|
1201 |
+
|
1202 |
+
n : int, optional
|
1203 |
+
Number of sampling points used in the construction of the sampling
|
1204 |
+
method. Note that this argument will automatically be converted to
|
1205 |
+
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
|
1206 |
+
select sample points. Default is 32. Must be positive. In most cases,
|
1207 |
+
32 points is enough to reach good precision. More points comes at
|
1208 |
+
performance cost.
|
1209 |
+
|
1210 |
+
Returns
|
1211 |
+
-------
|
1212 |
+
ber : BoschlooExactResult
|
1213 |
+
A result object with the following attributes.
|
1214 |
+
|
1215 |
+
statistic : float
|
1216 |
+
The statistic used in Boschloo's test; that is, the p-value
|
1217 |
+
from Fisher's exact test.
|
1218 |
+
|
1219 |
+
pvalue : float
|
1220 |
+
P-value, the probability of obtaining a distribution at least as
|
1221 |
+
extreme as the one that was actually observed, assuming that the
|
1222 |
+
null hypothesis is true.
|
1223 |
+
|
1224 |
+
See Also
|
1225 |
+
--------
|
1226 |
+
chi2_contingency : Chi-square test of independence of variables in a
|
1227 |
+
contingency table.
|
1228 |
+
fisher_exact : Fisher exact test on a 2x2 contingency table.
|
1229 |
+
barnard_exact : Barnard's exact test, which is a more powerful alternative
|
1230 |
+
than Fisher's exact test for 2x2 contingency tables.
|
1231 |
+
|
1232 |
+
Notes
|
1233 |
+
-----
|
1234 |
+
Boschloo's test is an exact test used in the analysis of contingency
|
1235 |
+
tables. It examines the association of two categorical variables, and
|
1236 |
+
is a uniformly more powerful alternative to Fisher's exact test
|
1237 |
+
for 2x2 contingency tables.
|
1238 |
+
|
1239 |
+
Boschloo's exact test uses the p-value of Fisher's exact test as a
|
1240 |
+
statistic, and Boschloo's p-value is the probability under the null
|
1241 |
+
hypothesis of observing such an extreme value of this statistic.
|
1242 |
+
|
1243 |
+
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
|
1244 |
+
where each column stores the binomial experiment, as in the example
|
1245 |
+
below. Let's also define :math:`p_1, p_2` the theoretical binomial
|
1246 |
+
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
|
1247 |
+
Boschloo exact test, we can assert three different alternative hypotheses:
|
1248 |
+
|
1249 |
+
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
|
1250 |
+
with `alternative` = "less"
|
1251 |
+
|
1252 |
+
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
|
1253 |
+
with `alternative` = "greater"
|
1254 |
+
|
1255 |
+
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
|
1256 |
+
with `alternative` = "two-sided" (default)
|
1257 |
+
|
1258 |
+
There are multiple conventions for computing a two-sided p-value when the
|
1259 |
+
null distribution is asymmetric. Here, we apply the convention that the
|
1260 |
+
p-value of a two-sided test is twice the minimum of the p-values of the
|
1261 |
+
one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
|
1262 |
+
different convention, so for a given `table`, the statistic reported by
|
1263 |
+
`boschloo_exact` may differ from the p-value reported by `fisher_exact`
|
1264 |
+
when ``alternative='two-sided'``.
|
1265 |
+
|
1266 |
+
.. versionadded:: 1.7.0
|
1267 |
+
|
1268 |
+
References
|
1269 |
+
----------
|
1270 |
+
.. [1] R.D. Boschloo. "Raised conditional level of significance for the
|
1271 |
+
2 x 2-table when testing the equality of two probabilities",
|
1272 |
+
Statistica Neerlandica, 24(1), 1970
|
1273 |
+
|
1274 |
+
.. [2] "Boschloo's test", Wikipedia,
|
1275 |
+
https://en.wikipedia.org/wiki/Boschloo%27s_test
|
1276 |
+
|
1277 |
+
.. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
|
1278 |
+
Human Resource Management, 43(4), 395-407, 2004,
|
1279 |
+
:doi:`10.1002/hrm.20032`.
|
1280 |
+
|
1281 |
+
Examples
|
1282 |
+
--------
|
1283 |
+
In the following example, we consider the article "Employee
|
1284 |
+
attitudes and job satisfaction" [3]_
|
1285 |
+
which reports the results of a survey from 63 scientists and 117 college
|
1286 |
+
professors. Of the 63 scientists, 31 said they were very satisfied with
|
1287 |
+
their jobs, whereas 74 of the college professors were very satisfied
|
1288 |
+
with their work. Is this significant evidence that college
|
1289 |
+
professors are happier with their work than scientists?
|
1290 |
+
The following table summarizes the data mentioned above::
|
1291 |
+
|
1292 |
+
college professors scientists
|
1293 |
+
Very Satisfied 74 31
|
1294 |
+
Dissatisfied 43 32
|
1295 |
+
|
1296 |
+
When working with statistical hypothesis testing, we usually use a
|
1297 |
+
threshold probability or significance level upon which we decide
|
1298 |
+
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
|
1299 |
+
significance level of 5%.
|
1300 |
+
|
1301 |
+
Our alternative hypothesis is that college professors are truly more
|
1302 |
+
satisfied with their work than scientists. Therefore, we expect
|
1303 |
+
:math:`p_1` the proportion of very satisfied college professors to be
|
1304 |
+
greater than :math:`p_2`, the proportion of very satisfied scientists.
|
1305 |
+
We thus call `boschloo_exact` with the ``alternative="greater"`` option:
|
1306 |
+
|
1307 |
+
>>> import scipy.stats as stats
|
1308 |
+
>>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
|
1309 |
+
>>> res.statistic
|
1310 |
+
0.0483...
|
1311 |
+
>>> res.pvalue
|
1312 |
+
0.0355...
|
1313 |
+
|
1314 |
+
Under the null hypothesis that scientists are happier in their work than
|
1315 |
+
college professors, the probability of obtaining test
|
1316 |
+
results at least as extreme as the observed data is approximately 3.55%.
|
1317 |
+
Since this p-value is less than our chosen significance level, we have
|
1318 |
+
evidence to reject :math:`H_0` in favor of the alternative hypothesis.
|
1319 |
+
|
1320 |
+
"""
|
1321 |
+
hypergeom = distributions.hypergeom
|
1322 |
+
|
1323 |
+
if n <= 0:
|
1324 |
+
raise ValueError(
|
1325 |
+
"Number of points `n` must be strictly positive,"
|
1326 |
+
f" found {n!r}"
|
1327 |
+
)
|
1328 |
+
|
1329 |
+
table = np.asarray(table, dtype=np.int64)
|
1330 |
+
|
1331 |
+
if not table.shape == (2, 2):
|
1332 |
+
raise ValueError("The input `table` must be of shape (2, 2).")
|
1333 |
+
|
1334 |
+
if np.any(table < 0):
|
1335 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
1336 |
+
|
1337 |
+
if 0 in table.sum(axis=0):
|
1338 |
+
# If both values in column are zero, the p-value is 1 and
|
1339 |
+
# the score's statistic is NaN.
|
1340 |
+
return BoschlooExactResult(np.nan, np.nan)
|
1341 |
+
|
1342 |
+
total_col_1, total_col_2 = table.sum(axis=0)
|
1343 |
+
total = total_col_1 + total_col_2
|
1344 |
+
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
|
1345 |
+
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
|
1346 |
+
x1_sum_x2 = x1 + x2
|
1347 |
+
|
1348 |
+
if alternative == 'less':
|
1349 |
+
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
|
1350 |
+
elif alternative == 'greater':
|
1351 |
+
# Same formula as the 'less' case, but with the second column.
|
1352 |
+
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
|
1353 |
+
elif alternative == 'two-sided':
|
1354 |
+
boschloo_less = boschloo_exact(table, alternative="less", n=n)
|
1355 |
+
boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
|
1356 |
+
|
1357 |
+
res = (
|
1358 |
+
boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
|
1359 |
+
else boschloo_greater
|
1360 |
+
)
|
1361 |
+
|
1362 |
+
# Two-sided p-value is defined as twice the minimum of the one-sided
|
1363 |
+
# p-values
|
1364 |
+
pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1)
|
1365 |
+
return BoschlooExactResult(res.statistic, pvalue)
|
1366 |
+
else:
|
1367 |
+
msg = (
|
1368 |
+
f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
|
1369 |
+
f" found {alternative!r}"
|
1370 |
+
)
|
1371 |
+
raise ValueError(msg)
|
1372 |
+
|
1373 |
+
fisher_stat = pvalues[table[0, 0], table[0, 1]]
|
1374 |
+
|
1375 |
+
# fisher_stat * (1+1e-13) guards us from small numerical error. It is
|
1376 |
+
# equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
|
1377 |
+
# For more throughout explanations, see gh-14178
|
1378 |
+
index_arr = pvalues <= fisher_stat * (1+1e-13)
|
1379 |
+
|
1380 |
+
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
|
1381 |
+
x1_log_comb = _compute_log_combinations(total_col_1)
|
1382 |
+
x2_log_comb = _compute_log_combinations(total_col_2)
|
1383 |
+
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
|
1384 |
+
|
1385 |
+
result = shgo(
|
1386 |
+
_get_binomial_log_p_value_with_nuisance_param,
|
1387 |
+
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
|
1388 |
+
bounds=((0, 1),),
|
1389 |
+
n=n,
|
1390 |
+
sampling_method="sobol",
|
1391 |
+
)
|
1392 |
+
|
1393 |
+
# result.fun is the negative log pvalue and therefore needs to be
|
1394 |
+
# changed before return
|
1395 |
+
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
|
1396 |
+
return BoschlooExactResult(fisher_stat, p_value)
|
1397 |
+
|
1398 |
+
|
1399 |
+
def _get_binomial_log_p_value_with_nuisance_param(
|
1400 |
+
nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
|
1401 |
+
):
|
1402 |
+
r"""
|
1403 |
+
Compute the log pvalue in respect of a nuisance parameter considering
|
1404 |
+
a 2x2 sample space.
|
1405 |
+
|
1406 |
+
Parameters
|
1407 |
+
----------
|
1408 |
+
nuisance_param : float
|
1409 |
+
nuisance parameter used in the computation of the maximisation of
|
1410 |
+
the p-value. Must be between 0 and 1
|
1411 |
+
|
1412 |
+
x1_sum_x2 : ndarray
|
1413 |
+
Sum of x1 and x2 inside barnard_exact
|
1414 |
+
|
1415 |
+
x1_sum_x2_log_comb : ndarray
|
1416 |
+
sum of the log combination of x1 and x2
|
1417 |
+
|
1418 |
+
index_arr : ndarray of boolean
|
1419 |
+
|
1420 |
+
Returns
|
1421 |
+
-------
|
1422 |
+
p_value : float
|
1423 |
+
Return the maximum p-value considering every nuisance parameter
|
1424 |
+
between 0 and 1
|
1425 |
+
|
1426 |
+
Notes
|
1427 |
+
-----
|
1428 |
+
|
1429 |
+
Both Barnard's test and Boschloo's test iterate over a nuisance parameter
|
1430 |
+
:math:`\pi \in [0, 1]` to find the maximum p-value. To search this
|
1431 |
+
maxima, this function return the negative log pvalue with respect to the
|
1432 |
+
nuisance parameter passed in params. This negative log p-value is then
|
1433 |
+
used in `shgo` to find the minimum negative pvalue which is our maximum
|
1434 |
+
pvalue.
|
1435 |
+
|
1436 |
+
Also, to compute the different combination used in the
|
1437 |
+
p-values' computation formula, this function uses `gammaln` which is
|
1438 |
+
more tolerant for large value than `scipy.special.comb`. `gammaln` gives
|
1439 |
+
a log combination. For the little precision loss, performances are
|
1440 |
+
improved a lot.
|
1441 |
+
"""
|
1442 |
+
t1, t2 = x1_sum_x2.shape
|
1443 |
+
n = t1 + t2 - 2
|
1444 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
1445 |
+
log_nuisance = np.log(
|
1446 |
+
nuisance_param,
|
1447 |
+
out=np.zeros_like(nuisance_param),
|
1448 |
+
where=nuisance_param >= 0,
|
1449 |
+
)
|
1450 |
+
log_1_minus_nuisance = np.log(
|
1451 |
+
1 - nuisance_param,
|
1452 |
+
out=np.zeros_like(nuisance_param),
|
1453 |
+
where=1 - nuisance_param >= 0,
|
1454 |
+
)
|
1455 |
+
|
1456 |
+
nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
|
1457 |
+
nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
|
1458 |
+
|
1459 |
+
nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
|
1460 |
+
nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
|
1461 |
+
|
1462 |
+
tmp_log_values_arr = (
|
1463 |
+
x1_sum_x2_log_comb
|
1464 |
+
+ nuisance_power_x1_x2
|
1465 |
+
+ nuisance_power_n_minus_x1_x2
|
1466 |
+
)
|
1467 |
+
|
1468 |
+
tmp_values_from_index = tmp_log_values_arr[index_arr]
|
1469 |
+
|
1470 |
+
# To avoid dividing by zero in log function and getting inf value,
|
1471 |
+
# values are centered according to the max
|
1472 |
+
max_value = tmp_values_from_index.max()
|
1473 |
+
|
1474 |
+
# To have better result's precision, the log pvalue is taken here.
|
1475 |
+
# Indeed, pvalue is included inside [0, 1] interval. Passing the
|
1476 |
+
# pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
|
1477 |
+
# help us to achieve better precision
|
1478 |
+
with np.errstate(divide="ignore", invalid="ignore"):
|
1479 |
+
log_probs = np.exp(tmp_values_from_index - max_value).sum()
|
1480 |
+
log_pvalue = max_value + np.log(
|
1481 |
+
log_probs,
|
1482 |
+
out=np.full_like(log_probs, -np.inf),
|
1483 |
+
where=log_probs > 0,
|
1484 |
+
)
|
1485 |
+
|
1486 |
+
# Since shgo find the minima, minus log pvalue is returned
|
1487 |
+
return -log_pvalue
|
1488 |
+
|
1489 |
+
|
1490 |
+
def _pval_cvm_2samp_exact(s, m, n):
|
1491 |
+
"""
|
1492 |
+
Compute the exact p-value of the Cramer-von Mises two-sample test
|
1493 |
+
for a given value s of the test statistic.
|
1494 |
+
m and n are the sizes of the samples.
|
1495 |
+
|
1496 |
+
[1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
|
1497 |
+
the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
|
1498 |
+
vol. 17, no. 8, pp. 1-15, Dec. 2006.
|
1499 |
+
[2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
|
1500 |
+
Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
|
1501 |
+
33(3), 1148-1159, (September, 1962)
|
1502 |
+
"""
|
1503 |
+
|
1504 |
+
# [1, p. 3]
|
1505 |
+
lcm = np.lcm(m, n)
|
1506 |
+
# [1, p. 4], below eq. 3
|
1507 |
+
a = lcm // m
|
1508 |
+
b = lcm // n
|
1509 |
+
# Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$
|
1510 |
+
# Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2]
|
1511 |
+
mn = m * n
|
1512 |
+
zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2)
|
1513 |
+
|
1514 |
+
# bound maximum value that may appear in `gs` (remember both rows!)
|
1515 |
+
zeta_bound = lcm**2 * (m + n) # bound elements in row 1
|
1516 |
+
combinations = comb(m + n, m) # sum of row 2
|
1517 |
+
max_gs = max(zeta_bound, combinations)
|
1518 |
+
dtype = np.min_scalar_type(max_gs)
|
1519 |
+
|
1520 |
+
# the frequency table of $g_{u, v}^+$ defined in [1, p. 6]
|
1521 |
+
gs = ([np.array([[0], [1]], dtype=dtype)]
|
1522 |
+
+ [np.empty((2, 0), dtype=dtype) for _ in range(m)])
|
1523 |
+
for u in range(n + 1):
|
1524 |
+
next_gs = []
|
1525 |
+
tmp = np.empty((2, 0), dtype=dtype)
|
1526 |
+
for v, g in enumerate(gs):
|
1527 |
+
# Calculate g recursively with eq. 11 in [1]. Even though it
|
1528 |
+
# doesn't look like it, this also does 12/13 (all of Algorithm 1).
|
1529 |
+
vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True)
|
1530 |
+
tmp = np.concatenate([
|
1531 |
+
np.stack([vi, tmp[1, i0] + g[1, i1]]),
|
1532 |
+
np.delete(tmp, i0, 1),
|
1533 |
+
np.delete(g, i1, 1)
|
1534 |
+
], 1)
|
1535 |
+
res = (a * v - b * u) ** 2
|
1536 |
+
tmp[0] += res.astype(dtype)
|
1537 |
+
next_gs.append(tmp)
|
1538 |
+
gs = next_gs
|
1539 |
+
value, freq = gs[m]
|
1540 |
+
return np.float64(np.sum(freq[value >= zeta]) / combinations)
|
1541 |
+
|
1542 |
+
|
1543 |
+
@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=2, too_small=1,
|
1544 |
+
result_to_tuple=_cvm_result_to_tuple)
|
1545 |
+
def cramervonmises_2samp(x, y, method='auto'):
|
1546 |
+
"""Perform the two-sample Cramér-von Mises test for goodness of fit.
|
1547 |
+
|
1548 |
+
This is the two-sample version of the Cramér-von Mises test ([1]_):
|
1549 |
+
for two independent samples :math:`X_1, ..., X_n` and
|
1550 |
+
:math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
|
1551 |
+
come from the same (unspecified) continuous distribution.
|
1552 |
+
|
1553 |
+
Parameters
|
1554 |
+
----------
|
1555 |
+
x : array_like
|
1556 |
+
A 1-D array of observed values of the random variables :math:`X_i`.
|
1557 |
+
y : array_like
|
1558 |
+
A 1-D array of observed values of the random variables :math:`Y_i`.
|
1559 |
+
method : {'auto', 'asymptotic', 'exact'}, optional
|
1560 |
+
The method used to compute the p-value, see Notes for details.
|
1561 |
+
The default is 'auto'.
|
1562 |
+
|
1563 |
+
Returns
|
1564 |
+
-------
|
1565 |
+
res : object with attributes
|
1566 |
+
statistic : float
|
1567 |
+
Cramér-von Mises statistic.
|
1568 |
+
pvalue : float
|
1569 |
+
The p-value.
|
1570 |
+
|
1571 |
+
See Also
|
1572 |
+
--------
|
1573 |
+
cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
|
1574 |
+
|
1575 |
+
Notes
|
1576 |
+
-----
|
1577 |
+
.. versionadded:: 1.7.0
|
1578 |
+
|
1579 |
+
The statistic is computed according to equation 9 in [2]_. The
|
1580 |
+
calculation of the p-value depends on the keyword `method`:
|
1581 |
+
|
1582 |
+
- ``asymptotic``: The p-value is approximated by using the limiting
|
1583 |
+
distribution of the test statistic.
|
1584 |
+
- ``exact``: The exact p-value is computed by enumerating all
|
1585 |
+
possible combinations of the test statistic, see [2]_.
|
1586 |
+
|
1587 |
+
If ``method='auto'``, the exact approach is used
|
1588 |
+
if both samples contain equal to or less than 20 observations,
|
1589 |
+
otherwise the asymptotic distribution is used.
|
1590 |
+
|
1591 |
+
If the underlying distribution is not continuous, the p-value is likely to
|
1592 |
+
be conservative (Section 6.2 in [3]_). When ranking the data to compute
|
1593 |
+
the test statistic, midranks are used if there are ties.
|
1594 |
+
|
1595 |
+
References
|
1596 |
+
----------
|
1597 |
+
.. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
|
1598 |
+
.. [2] Anderson, T.W. (1962). On the distribution of the two-sample
|
1599 |
+
Cramer-von-Mises criterion. The Annals of Mathematical
|
1600 |
+
Statistics, pp. 1148-1159.
|
1601 |
+
.. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
|
1602 |
+
|
1603 |
+
Examples
|
1604 |
+
--------
|
1605 |
+
|
1606 |
+
Suppose we wish to test whether two samples generated by
|
1607 |
+
``scipy.stats.norm.rvs`` have the same distribution. We choose a
|
1608 |
+
significance level of alpha=0.05.
|
1609 |
+
|
1610 |
+
>>> import numpy as np
|
1611 |
+
>>> from scipy import stats
|
1612 |
+
>>> rng = np.random.default_rng()
|
1613 |
+
>>> x = stats.norm.rvs(size=100, random_state=rng)
|
1614 |
+
>>> y = stats.norm.rvs(size=70, random_state=rng)
|
1615 |
+
>>> res = stats.cramervonmises_2samp(x, y)
|
1616 |
+
>>> res.statistic, res.pvalue
|
1617 |
+
(0.29376470588235293, 0.1412873014573014)
|
1618 |
+
|
1619 |
+
The p-value exceeds our chosen significance level, so we do not
|
1620 |
+
reject the null hypothesis that the observed samples are drawn from the
|
1621 |
+
same distribution.
|
1622 |
+
|
1623 |
+
For small sample sizes, one can compute the exact p-values:
|
1624 |
+
|
1625 |
+
>>> x = stats.norm.rvs(size=7, random_state=rng)
|
1626 |
+
>>> y = stats.t.rvs(df=2, size=6, random_state=rng)
|
1627 |
+
>>> res = stats.cramervonmises_2samp(x, y, method='exact')
|
1628 |
+
>>> res.statistic, res.pvalue
|
1629 |
+
(0.197802197802198, 0.31643356643356646)
|
1630 |
+
|
1631 |
+
The p-value based on the asymptotic distribution is a good approximation
|
1632 |
+
even though the sample size is small.
|
1633 |
+
|
1634 |
+
>>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
|
1635 |
+
>>> res.statistic, res.pvalue
|
1636 |
+
(0.197802197802198, 0.2966041181527128)
|
1637 |
+
|
1638 |
+
Independent of the method, one would not reject the null hypothesis at the
|
1639 |
+
chosen significance level in this example.
|
1640 |
+
|
1641 |
+
"""
|
1642 |
+
xa = np.sort(np.asarray(x))
|
1643 |
+
ya = np.sort(np.asarray(y))
|
1644 |
+
|
1645 |
+
if xa.size <= 1 or ya.size <= 1:
|
1646 |
+
raise ValueError('x and y must contain at least two observations.')
|
1647 |
+
if method not in ['auto', 'exact', 'asymptotic']:
|
1648 |
+
raise ValueError('method must be either auto, exact or asymptotic.')
|
1649 |
+
|
1650 |
+
nx = len(xa)
|
1651 |
+
ny = len(ya)
|
1652 |
+
|
1653 |
+
if method == 'auto':
|
1654 |
+
if max(nx, ny) > 20:
|
1655 |
+
method = 'asymptotic'
|
1656 |
+
else:
|
1657 |
+
method = 'exact'
|
1658 |
+
|
1659 |
+
# get ranks of x and y in the pooled sample
|
1660 |
+
z = np.concatenate([xa, ya])
|
1661 |
+
# in case of ties, use midrank (see [1])
|
1662 |
+
r = scipy.stats.rankdata(z, method='average')
|
1663 |
+
rx = r[:nx]
|
1664 |
+
ry = r[nx:]
|
1665 |
+
|
1666 |
+
# compute U (eq. 10 in [2])
|
1667 |
+
u = nx * np.sum((rx - np.arange(1, nx+1))**2)
|
1668 |
+
u += ny * np.sum((ry - np.arange(1, ny+1))**2)
|
1669 |
+
|
1670 |
+
# compute T (eq. 9 in [2])
|
1671 |
+
k, N = nx*ny, nx + ny
|
1672 |
+
t = u / (k*N) - (4*k - 1)/(6*N)
|
1673 |
+
|
1674 |
+
if method == 'exact':
|
1675 |
+
p = _pval_cvm_2samp_exact(u, nx, ny)
|
1676 |
+
else:
|
1677 |
+
# compute expected value and variance of T (eq. 11 and 14 in [2])
|
1678 |
+
et = (1 + 1/N)/6
|
1679 |
+
vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k)
|
1680 |
+
vt = vt / (45 * N**2 * 4 * k)
|
1681 |
+
|
1682 |
+
# computed the normalized statistic (eq. 15 in [2])
|
1683 |
+
tn = 1/6 + (t - et) / np.sqrt(45 * vt)
|
1684 |
+
|
1685 |
+
# approximate distribution of tn with limiting distribution
|
1686 |
+
# of the one-sample test statistic
|
1687 |
+
# if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
|
1688 |
+
if tn < 0.003:
|
1689 |
+
p = 1.0
|
1690 |
+
else:
|
1691 |
+
p = max(0, 1. - _cdf_cvm_inf(tn))
|
1692 |
+
|
1693 |
+
return CramerVonMisesResult(statistic=t, pvalue=p)
|
1694 |
+
|
1695 |
+
|
1696 |
+
class TukeyHSDResult:
|
1697 |
+
"""Result of `scipy.stats.tukey_hsd`.
|
1698 |
+
|
1699 |
+
Attributes
|
1700 |
+
----------
|
1701 |
+
statistic : float ndarray
|
1702 |
+
The computed statistic of the test for each comparison. The element
|
1703 |
+
at index ``(i, j)`` is the statistic for the comparison between groups
|
1704 |
+
``i`` and ``j``.
|
1705 |
+
pvalue : float ndarray
|
1706 |
+
The associated p-value from the studentized range distribution. The
|
1707 |
+
element at index ``(i, j)`` is the p-value for the comparison
|
1708 |
+
between groups ``i`` and ``j``.
|
1709 |
+
|
1710 |
+
Notes
|
1711 |
+
-----
|
1712 |
+
The string representation of this object displays the most recently
|
1713 |
+
calculated confidence interval, and if none have been previously
|
1714 |
+
calculated, it will evaluate ``confidence_interval()``.
|
1715 |
+
|
1716 |
+
References
|
1717 |
+
----------
|
1718 |
+
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
|
1719 |
+
Method."
|
1720 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
|
1721 |
+
28 November 2020.
|
1722 |
+
"""
|
1723 |
+
|
1724 |
+
def __init__(self, statistic, pvalue, _nobs, _ntreatments, _stand_err):
|
1725 |
+
self.statistic = statistic
|
1726 |
+
self.pvalue = pvalue
|
1727 |
+
self._ntreatments = _ntreatments
|
1728 |
+
self._nobs = _nobs
|
1729 |
+
self._stand_err = _stand_err
|
1730 |
+
self._ci = None
|
1731 |
+
self._ci_cl = None
|
1732 |
+
|
1733 |
+
def __str__(self):
|
1734 |
+
# Note: `__str__` prints the confidence intervals from the most
|
1735 |
+
# recent call to `confidence_interval`. If it has not been called,
|
1736 |
+
# it will be called with the default CL of .95.
|
1737 |
+
if self._ci is None:
|
1738 |
+
self.confidence_interval(confidence_level=.95)
|
1739 |
+
s = ("Tukey's HSD Pairwise Group Comparisons"
|
1740 |
+
f" ({self._ci_cl*100:.1f}% Confidence Interval)\n")
|
1741 |
+
s += "Comparison Statistic p-value Lower CI Upper CI\n"
|
1742 |
+
for i in range(self.pvalue.shape[0]):
|
1743 |
+
for j in range(self.pvalue.shape[0]):
|
1744 |
+
if i != j:
|
1745 |
+
s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}"
|
1746 |
+
f"{self.pvalue[i, j]:>10.3f}"
|
1747 |
+
f"{self._ci.low[i, j]:>10.3f}"
|
1748 |
+
f"{self._ci.high[i, j]:>10.3f}\n")
|
1749 |
+
return s
|
1750 |
+
|
1751 |
+
def confidence_interval(self, confidence_level=.95):
|
1752 |
+
"""Compute the confidence interval for the specified confidence level.
|
1753 |
+
|
1754 |
+
Parameters
|
1755 |
+
----------
|
1756 |
+
confidence_level : float, optional
|
1757 |
+
Confidence level for the computed confidence interval
|
1758 |
+
of the estimated proportion. Default is .95.
|
1759 |
+
|
1760 |
+
Returns
|
1761 |
+
-------
|
1762 |
+
ci : ``ConfidenceInterval`` object
|
1763 |
+
The object has attributes ``low`` and ``high`` that hold the
|
1764 |
+
lower and upper bounds of the confidence intervals for each
|
1765 |
+
comparison. The high and low values are accessible for each
|
1766 |
+
comparison at index ``(i, j)`` between groups ``i`` and ``j``.
|
1767 |
+
|
1768 |
+
References
|
1769 |
+
----------
|
1770 |
+
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1.
|
1771 |
+
Tukey's Method."
|
1772 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
|
1773 |
+
28 November 2020.
|
1774 |
+
|
1775 |
+
Examples
|
1776 |
+
--------
|
1777 |
+
>>> from scipy.stats import tukey_hsd
|
1778 |
+
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
|
1779 |
+
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
|
1780 |
+
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
|
1781 |
+
>>> result = tukey_hsd(group0, group1, group2)
|
1782 |
+
>>> ci = result.confidence_interval()
|
1783 |
+
>>> ci.low
|
1784 |
+
array([[-3.649159, -8.249159, -3.909159],
|
1785 |
+
[ 0.950841, -3.649159, 0.690841],
|
1786 |
+
[-3.389159, -7.989159, -3.649159]])
|
1787 |
+
>>> ci.high
|
1788 |
+
array([[ 3.649159, -0.950841, 3.389159],
|
1789 |
+
[ 8.249159, 3.649159, 7.989159],
|
1790 |
+
[ 3.909159, -0.690841, 3.649159]])
|
1791 |
+
"""
|
1792 |
+
# check to see if the supplied confidence level matches that of the
|
1793 |
+
# previously computed CI.
|
1794 |
+
if (self._ci is not None and self._ci_cl is not None and
|
1795 |
+
confidence_level == self._ci_cl):
|
1796 |
+
return self._ci
|
1797 |
+
|
1798 |
+
if not 0 < confidence_level < 1:
|
1799 |
+
raise ValueError("Confidence level must be between 0 and 1.")
|
1800 |
+
# determine the critical value of the studentized range using the
|
1801 |
+
# appropriate confidence level, number of treatments, and degrees
|
1802 |
+
# of freedom as determined by the number of data less the number of
|
1803 |
+
# treatments. ("Confidence limits for Tukey's method")[1]. Note that
|
1804 |
+
# in the cases of unequal sample sizes there will be a criterion for
|
1805 |
+
# each group comparison.
|
1806 |
+
params = (confidence_level, self._nobs, self._ntreatments - self._nobs)
|
1807 |
+
srd = distributions.studentized_range.ppf(*params)
|
1808 |
+
# also called maximum critical value, the Tukey criterion is the
|
1809 |
+
# studentized range critical value * the square root of mean square
|
1810 |
+
# error over the sample size.
|
1811 |
+
tukey_criterion = srd * self._stand_err
|
1812 |
+
# the confidence levels are determined by the
|
1813 |
+
# `mean_differences` +- `tukey_criterion`
|
1814 |
+
upper_conf = self.statistic + tukey_criterion
|
1815 |
+
lower_conf = self.statistic - tukey_criterion
|
1816 |
+
self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf)
|
1817 |
+
self._ci_cl = confidence_level
|
1818 |
+
return self._ci
|
1819 |
+
|
1820 |
+
|
1821 |
+
def _tukey_hsd_iv(args):
|
1822 |
+
if (len(args)) < 2:
|
1823 |
+
raise ValueError("There must be more than 1 treatment.")
|
1824 |
+
args = [np.asarray(arg) for arg in args]
|
1825 |
+
for arg in args:
|
1826 |
+
if arg.ndim != 1:
|
1827 |
+
raise ValueError("Input samples must be one-dimensional.")
|
1828 |
+
if arg.size <= 1:
|
1829 |
+
raise ValueError("Input sample size must be greater than one.")
|
1830 |
+
if np.isinf(arg).any():
|
1831 |
+
raise ValueError("Input samples must be finite.")
|
1832 |
+
return args
|
1833 |
+
|
1834 |
+
|
1835 |
+
def tukey_hsd(*args):
|
1836 |
+
"""Perform Tukey's HSD test for equality of means over multiple treatments.
|
1837 |
+
|
1838 |
+
Tukey's honestly significant difference (HSD) test performs pairwise
|
1839 |
+
comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`)
|
1840 |
+
assesses whether the true means underlying each sample are identical,
|
1841 |
+
Tukey's HSD is a post hoc test used to compare the mean of each sample
|
1842 |
+
to the mean of each other sample.
|
1843 |
+
|
1844 |
+
The null hypothesis is that the distributions underlying the samples all
|
1845 |
+
have the same mean. The test statistic, which is computed for every
|
1846 |
+
possible pairing of samples, is simply the difference between the sample
|
1847 |
+
means. For each pair, the p-value is the probability under the null
|
1848 |
+
hypothesis (and other assumptions; see notes) of observing such an extreme
|
1849 |
+
value of the statistic, considering that many pairwise comparisons are
|
1850 |
+
being performed. Confidence intervals for the difference between each pair
|
1851 |
+
of means are also available.
|
1852 |
+
|
1853 |
+
Parameters
|
1854 |
+
----------
|
1855 |
+
sample1, sample2, ... : array_like
|
1856 |
+
The sample measurements for each group. There must be at least
|
1857 |
+
two arguments.
|
1858 |
+
|
1859 |
+
Returns
|
1860 |
+
-------
|
1861 |
+
result : `~scipy.stats._result_classes.TukeyHSDResult` instance
|
1862 |
+
The return value is an object with the following attributes:
|
1863 |
+
|
1864 |
+
statistic : float ndarray
|
1865 |
+
The computed statistic of the test for each comparison. The element
|
1866 |
+
at index ``(i, j)`` is the statistic for the comparison between
|
1867 |
+
groups ``i`` and ``j``.
|
1868 |
+
pvalue : float ndarray
|
1869 |
+
The computed p-value of the test for each comparison. The element
|
1870 |
+
at index ``(i, j)`` is the p-value for the comparison between
|
1871 |
+
groups ``i`` and ``j``.
|
1872 |
+
|
1873 |
+
The object has the following methods:
|
1874 |
+
|
1875 |
+
confidence_interval(confidence_level=0.95):
|
1876 |
+
Compute the confidence interval for the specified confidence level.
|
1877 |
+
|
1878 |
+
See Also
|
1879 |
+
--------
|
1880 |
+
dunnett : performs comparison of means against a control group.
|
1881 |
+
|
1882 |
+
Notes
|
1883 |
+
-----
|
1884 |
+
The use of this test relies on several assumptions.
|
1885 |
+
|
1886 |
+
1. The observations are independent within and among groups.
|
1887 |
+
2. The observations within each group are normally distributed.
|
1888 |
+
3. The distributions from which the samples are drawn have the same finite
|
1889 |
+
variance.
|
1890 |
+
|
1891 |
+
The original formulation of the test was for samples of equal size [6]_.
|
1892 |
+
In case of unequal sample sizes, the test uses the Tukey-Kramer method
|
1893 |
+
[4]_.
|
1894 |
+
|
1895 |
+
References
|
1896 |
+
----------
|
1897 |
+
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
|
1898 |
+
Method."
|
1899 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
|
1900 |
+
28 November 2020.
|
1901 |
+
.. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant
|
1902 |
+
Difference (HSD) Test."
|
1903 |
+
https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf
|
1904 |
+
.. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS
|
1905 |
+
Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm.
|
1906 |
+
.. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group
|
1907 |
+
Means with Unequal Numbers of Replications." Biometrics, vol. 12,
|
1908 |
+
no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469.
|
1909 |
+
Accessed 25 May 2021.
|
1910 |
+
.. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3.
|
1911 |
+
The ANOVA table and tests of hypotheses about means"
|
1912 |
+
https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm,
|
1913 |
+
2 June 2021.
|
1914 |
+
.. [6] Tukey, John W. "Comparing Individual Means in the Analysis of
|
1915 |
+
Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR,
|
1916 |
+
www.jstor.org/stable/3001913. Accessed 14 June 2021.
|
1917 |
+
|
1918 |
+
|
1919 |
+
Examples
|
1920 |
+
--------
|
1921 |
+
Here are some data comparing the time to relief of three brands of
|
1922 |
+
headache medicine, reported in minutes. Data adapted from [3]_.
|
1923 |
+
|
1924 |
+
>>> import numpy as np
|
1925 |
+
>>> from scipy.stats import tukey_hsd
|
1926 |
+
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
|
1927 |
+
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
|
1928 |
+
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
|
1929 |
+
|
1930 |
+
We would like to see if the means between any of the groups are
|
1931 |
+
significantly different. First, visually examine a box and whisker plot.
|
1932 |
+
|
1933 |
+
>>> import matplotlib.pyplot as plt
|
1934 |
+
>>> fig, ax = plt.subplots(1, 1)
|
1935 |
+
>>> ax.boxplot([group0, group1, group2])
|
1936 |
+
>>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP
|
1937 |
+
>>> ax.set_ylabel("mean") # doctest: +SKIP
|
1938 |
+
>>> plt.show()
|
1939 |
+
|
1940 |
+
From the box and whisker plot, we can see overlap in the interquartile
|
1941 |
+
ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd``
|
1942 |
+
test to determine if the difference between means is significant. We
|
1943 |
+
set a significance level of .05 to reject the null hypothesis.
|
1944 |
+
|
1945 |
+
>>> res = tukey_hsd(group0, group1, group2)
|
1946 |
+
>>> print(res)
|
1947 |
+
Tukey's HSD Pairwise Group Comparisons (95.0% Confidence Interval)
|
1948 |
+
Comparison Statistic p-value Lower CI Upper CI
|
1949 |
+
(0 - 1) -4.600 0.014 -8.249 -0.951
|
1950 |
+
(0 - 2) -0.260 0.980 -3.909 3.389
|
1951 |
+
(1 - 0) 4.600 0.014 0.951 8.249
|
1952 |
+
(1 - 2) 4.340 0.020 0.691 7.989
|
1953 |
+
(2 - 0) 0.260 0.980 -3.389 3.909
|
1954 |
+
(2 - 1) -4.340 0.020 -7.989 -0.691
|
1955 |
+
|
1956 |
+
The null hypothesis is that each group has the same mean. The p-value for
|
1957 |
+
comparisons between ``group0`` and ``group1`` as well as ``group1`` and
|
1958 |
+
``group2`` do not exceed .05, so we reject the null hypothesis that they
|
1959 |
+
have the same means. The p-value of the comparison between ``group0``
|
1960 |
+
and ``group2`` exceeds .05, so we accept the null hypothesis that there
|
1961 |
+
is not a significant difference between their means.
|
1962 |
+
|
1963 |
+
We can also compute the confidence interval associated with our chosen
|
1964 |
+
confidence level.
|
1965 |
+
|
1966 |
+
>>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
|
1967 |
+
>>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
|
1968 |
+
>>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
|
1969 |
+
>>> result = tukey_hsd(group0, group1, group2)
|
1970 |
+
>>> conf = res.confidence_interval(confidence_level=.99)
|
1971 |
+
>>> for ((i, j), l) in np.ndenumerate(conf.low):
|
1972 |
+
... # filter out self comparisons
|
1973 |
+
... if i != j:
|
1974 |
+
... h = conf.high[i,j]
|
1975 |
+
... print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}")
|
1976 |
+
(0 - 1) -9.480 0.280
|
1977 |
+
(0 - 2) -5.140 4.620
|
1978 |
+
(1 - 0) -0.280 9.480
|
1979 |
+
(1 - 2) -0.540 9.220
|
1980 |
+
(2 - 0) -4.620 5.140
|
1981 |
+
(2 - 1) -9.220 0.540
|
1982 |
+
"""
|
1983 |
+
args = _tukey_hsd_iv(args)
|
1984 |
+
ntreatments = len(args)
|
1985 |
+
means = np.asarray([np.mean(arg) for arg in args])
|
1986 |
+
nsamples_treatments = np.asarray([a.size for a in args])
|
1987 |
+
nobs = np.sum(nsamples_treatments)
|
1988 |
+
|
1989 |
+
# determine mean square error [5]. Note that this is sometimes called
|
1990 |
+
# mean square error within.
|
1991 |
+
mse = (np.sum([np.var(arg, ddof=1) for arg in args] *
|
1992 |
+
(nsamples_treatments - 1)) / (nobs - ntreatments))
|
1993 |
+
|
1994 |
+
# The calculation of the standard error differs when treatments differ in
|
1995 |
+
# size. See ("Unequal sample sizes")[1].
|
1996 |
+
if np.unique(nsamples_treatments).size == 1:
|
1997 |
+
# all input groups are the same length, so only one value needs to be
|
1998 |
+
# calculated [1].
|
1999 |
+
normalize = 2 / nsamples_treatments[0]
|
2000 |
+
else:
|
2001 |
+
# to compare groups of differing sizes, we must compute a variance
|
2002 |
+
# value for each individual comparison. Use broadcasting to get the
|
2003 |
+
# resulting matrix. [3], verified against [4] (page 308).
|
2004 |
+
normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T
|
2005 |
+
|
2006 |
+
# the standard error is used in the computation of the tukey criterion and
|
2007 |
+
# finding the p-values.
|
2008 |
+
stand_err = np.sqrt(normalize * mse / 2)
|
2009 |
+
|
2010 |
+
# the mean difference is the test statistic.
|
2011 |
+
mean_differences = means[None].T - means
|
2012 |
+
|
2013 |
+
# Calculate the t-statistic to use within the survival function of the
|
2014 |
+
# studentized range to get the p-value.
|
2015 |
+
t_stat = np.abs(mean_differences) / stand_err
|
2016 |
+
|
2017 |
+
params = t_stat, ntreatments, nobs - ntreatments
|
2018 |
+
pvalues = distributions.studentized_range.sf(*params)
|
2019 |
+
|
2020 |
+
return TukeyHSDResult(mean_differences, pvalues, ntreatments,
|
2021 |
+
nobs, stand_err)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_kde.py
ADDED
@@ -0,0 +1,728 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#-------------------------------------------------------------------------------
|
2 |
+
#
|
3 |
+
# Define classes for (uni/multi)-variate kernel density estimation.
|
4 |
+
#
|
5 |
+
# Currently, only Gaussian kernels are implemented.
|
6 |
+
#
|
7 |
+
# Written by: Robert Kern
|
8 |
+
#
|
9 |
+
# Date: 2004-08-09
|
10 |
+
#
|
11 |
+
# Modified: 2005-02-10 by Robert Kern.
|
12 |
+
# Contributed to SciPy
|
13 |
+
# 2005-10-07 by Robert Kern.
|
14 |
+
# Some fixes to match the new scipy_core
|
15 |
+
#
|
16 |
+
# Copyright 2004-2005 by Enthought, Inc.
|
17 |
+
#
|
18 |
+
#-------------------------------------------------------------------------------
|
19 |
+
|
20 |
+
# Standard library imports.
|
21 |
+
import warnings
|
22 |
+
|
23 |
+
# SciPy imports.
|
24 |
+
from scipy import linalg, special
|
25 |
+
from scipy._lib._util import check_random_state
|
26 |
+
|
27 |
+
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi,
|
28 |
+
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
|
29 |
+
ones, cov)
|
30 |
+
import numpy as np
|
31 |
+
|
32 |
+
# Local imports.
|
33 |
+
from . import _mvn
|
34 |
+
from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log
|
35 |
+
|
36 |
+
# deprecated import to be removed in SciPy 1.13.0
|
37 |
+
from scipy.special import logsumexp # noqa: F401
|
38 |
+
|
39 |
+
|
40 |
+
__all__ = ['gaussian_kde']
|
41 |
+
|
42 |
+
|
43 |
+
class gaussian_kde:
|
44 |
+
"""Representation of a kernel-density estimate using Gaussian kernels.
|
45 |
+
|
46 |
+
Kernel density estimation is a way to estimate the probability density
|
47 |
+
function (PDF) of a random variable in a non-parametric way.
|
48 |
+
`gaussian_kde` works for both uni-variate and multi-variate data. It
|
49 |
+
includes automatic bandwidth determination. The estimation works best for
|
50 |
+
a unimodal distribution; bimodal or multi-modal distributions tend to be
|
51 |
+
oversmoothed.
|
52 |
+
|
53 |
+
Parameters
|
54 |
+
----------
|
55 |
+
dataset : array_like
|
56 |
+
Datapoints to estimate from. In case of univariate data this is a 1-D
|
57 |
+
array, otherwise a 2-D array with shape (# of dims, # of data).
|
58 |
+
bw_method : str, scalar or callable, optional
|
59 |
+
The method used to calculate the estimator bandwidth. This can be
|
60 |
+
'scott', 'silverman', a scalar constant or a callable. If a scalar,
|
61 |
+
this will be used directly as `kde.factor`. If a callable, it should
|
62 |
+
take a `gaussian_kde` instance as only parameter and return a scalar.
|
63 |
+
If None (default), 'scott' is used. See Notes for more details.
|
64 |
+
weights : array_like, optional
|
65 |
+
weights of datapoints. This must be the same shape as dataset.
|
66 |
+
If None (default), the samples are assumed to be equally weighted
|
67 |
+
|
68 |
+
Attributes
|
69 |
+
----------
|
70 |
+
dataset : ndarray
|
71 |
+
The dataset with which `gaussian_kde` was initialized.
|
72 |
+
d : int
|
73 |
+
Number of dimensions.
|
74 |
+
n : int
|
75 |
+
Number of datapoints.
|
76 |
+
neff : int
|
77 |
+
Effective number of datapoints.
|
78 |
+
|
79 |
+
.. versionadded:: 1.2.0
|
80 |
+
factor : float
|
81 |
+
The bandwidth factor, obtained from `kde.covariance_factor`. The square
|
82 |
+
of `kde.factor` multiplies the covariance matrix of the data in the kde
|
83 |
+
estimation.
|
84 |
+
covariance : ndarray
|
85 |
+
The covariance matrix of `dataset`, scaled by the calculated bandwidth
|
86 |
+
(`kde.factor`).
|
87 |
+
inv_cov : ndarray
|
88 |
+
The inverse of `covariance`.
|
89 |
+
|
90 |
+
Methods
|
91 |
+
-------
|
92 |
+
evaluate
|
93 |
+
__call__
|
94 |
+
integrate_gaussian
|
95 |
+
integrate_box_1d
|
96 |
+
integrate_box
|
97 |
+
integrate_kde
|
98 |
+
pdf
|
99 |
+
logpdf
|
100 |
+
resample
|
101 |
+
set_bandwidth
|
102 |
+
covariance_factor
|
103 |
+
|
104 |
+
Notes
|
105 |
+
-----
|
106 |
+
Bandwidth selection strongly influences the estimate obtained from the KDE
|
107 |
+
(much more so than the actual shape of the kernel). Bandwidth selection
|
108 |
+
can be done by a "rule of thumb", by cross-validation, by "plug-in
|
109 |
+
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
|
110 |
+
uses a rule of thumb, the default is Scott's Rule.
|
111 |
+
|
112 |
+
Scott's Rule [1]_, implemented as `scotts_factor`, is::
|
113 |
+
|
114 |
+
n**(-1./(d+4)),
|
115 |
+
|
116 |
+
with ``n`` the number of data points and ``d`` the number of dimensions.
|
117 |
+
In the case of unequally weighted points, `scotts_factor` becomes::
|
118 |
+
|
119 |
+
neff**(-1./(d+4)),
|
120 |
+
|
121 |
+
with ``neff`` the effective number of datapoints.
|
122 |
+
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
|
123 |
+
|
124 |
+
(n * (d + 2) / 4.)**(-1. / (d + 4)).
|
125 |
+
|
126 |
+
or in the case of unequally weighted points::
|
127 |
+
|
128 |
+
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
|
129 |
+
|
130 |
+
Good general descriptions of kernel density estimation can be found in [1]_
|
131 |
+
and [2]_, the mathematics for this multi-dimensional implementation can be
|
132 |
+
found in [1]_.
|
133 |
+
|
134 |
+
With a set of weighted samples, the effective number of datapoints ``neff``
|
135 |
+
is defined by::
|
136 |
+
|
137 |
+
neff = sum(weights)^2 / sum(weights^2)
|
138 |
+
|
139 |
+
as detailed in [5]_.
|
140 |
+
|
141 |
+
`gaussian_kde` does not currently support data that lies in a
|
142 |
+
lower-dimensional subspace of the space in which it is expressed. For such
|
143 |
+
data, consider performing principle component analysis / dimensionality
|
144 |
+
reduction and using `gaussian_kde` with the transformed data.
|
145 |
+
|
146 |
+
References
|
147 |
+
----------
|
148 |
+
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
|
149 |
+
Visualization", John Wiley & Sons, New York, Chicester, 1992.
|
150 |
+
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
|
151 |
+
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
|
152 |
+
Chapman and Hall, London, 1986.
|
153 |
+
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
|
154 |
+
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
|
155 |
+
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
|
156 |
+
conditional density estimation", Computational Statistics & Data
|
157 |
+
Analysis, Vol. 36, pp. 279-298, 2001.
|
158 |
+
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
|
159 |
+
Series A (General), 132, 272
|
160 |
+
|
161 |
+
Examples
|
162 |
+
--------
|
163 |
+
Generate some random two-dimensional data:
|
164 |
+
|
165 |
+
>>> import numpy as np
|
166 |
+
>>> from scipy import stats
|
167 |
+
>>> def measure(n):
|
168 |
+
... "Measurement model, return two coupled measurements."
|
169 |
+
... m1 = np.random.normal(size=n)
|
170 |
+
... m2 = np.random.normal(scale=0.5, size=n)
|
171 |
+
... return m1+m2, m1-m2
|
172 |
+
|
173 |
+
>>> m1, m2 = measure(2000)
|
174 |
+
>>> xmin = m1.min()
|
175 |
+
>>> xmax = m1.max()
|
176 |
+
>>> ymin = m2.min()
|
177 |
+
>>> ymax = m2.max()
|
178 |
+
|
179 |
+
Perform a kernel density estimate on the data:
|
180 |
+
|
181 |
+
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
|
182 |
+
>>> positions = np.vstack([X.ravel(), Y.ravel()])
|
183 |
+
>>> values = np.vstack([m1, m2])
|
184 |
+
>>> kernel = stats.gaussian_kde(values)
|
185 |
+
>>> Z = np.reshape(kernel(positions).T, X.shape)
|
186 |
+
|
187 |
+
Plot the results:
|
188 |
+
|
189 |
+
>>> import matplotlib.pyplot as plt
|
190 |
+
>>> fig, ax = plt.subplots()
|
191 |
+
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
|
192 |
+
... extent=[xmin, xmax, ymin, ymax])
|
193 |
+
>>> ax.plot(m1, m2, 'k.', markersize=2)
|
194 |
+
>>> ax.set_xlim([xmin, xmax])
|
195 |
+
>>> ax.set_ylim([ymin, ymax])
|
196 |
+
>>> plt.show()
|
197 |
+
|
198 |
+
"""
|
199 |
+
def __init__(self, dataset, bw_method=None, weights=None):
|
200 |
+
self.dataset = atleast_2d(asarray(dataset))
|
201 |
+
if not self.dataset.size > 1:
|
202 |
+
raise ValueError("`dataset` input should have multiple elements.")
|
203 |
+
|
204 |
+
self.d, self.n = self.dataset.shape
|
205 |
+
|
206 |
+
if weights is not None:
|
207 |
+
self._weights = atleast_1d(weights).astype(float)
|
208 |
+
self._weights /= sum(self._weights)
|
209 |
+
if self.weights.ndim != 1:
|
210 |
+
raise ValueError("`weights` input should be one-dimensional.")
|
211 |
+
if len(self._weights) != self.n:
|
212 |
+
raise ValueError("`weights` input should be of length n")
|
213 |
+
self._neff = 1/sum(self._weights**2)
|
214 |
+
|
215 |
+
# This can be converted to a warning once gh-10205 is resolved
|
216 |
+
if self.d > self.n:
|
217 |
+
msg = ("Number of dimensions is greater than number of samples. "
|
218 |
+
"This results in a singular data covariance matrix, which "
|
219 |
+
"cannot be treated using the algorithms implemented in "
|
220 |
+
"`gaussian_kde`. Note that `gaussian_kde` interprets each "
|
221 |
+
"*column* of `dataset` to be a point; consider transposing "
|
222 |
+
"the input to `dataset`.")
|
223 |
+
raise ValueError(msg)
|
224 |
+
|
225 |
+
try:
|
226 |
+
self.set_bandwidth(bw_method=bw_method)
|
227 |
+
except linalg.LinAlgError as e:
|
228 |
+
msg = ("The data appears to lie in a lower-dimensional subspace "
|
229 |
+
"of the space in which it is expressed. This has resulted "
|
230 |
+
"in a singular data covariance matrix, which cannot be "
|
231 |
+
"treated using the algorithms implemented in "
|
232 |
+
"`gaussian_kde`. Consider performing principle component "
|
233 |
+
"analysis / dimensionality reduction and using "
|
234 |
+
"`gaussian_kde` with the transformed data.")
|
235 |
+
raise linalg.LinAlgError(msg) from e
|
236 |
+
|
237 |
+
def evaluate(self, points):
|
238 |
+
"""Evaluate the estimated pdf on a set of points.
|
239 |
+
|
240 |
+
Parameters
|
241 |
+
----------
|
242 |
+
points : (# of dimensions, # of points)-array
|
243 |
+
Alternatively, a (# of dimensions,) vector can be passed in and
|
244 |
+
treated as a single point.
|
245 |
+
|
246 |
+
Returns
|
247 |
+
-------
|
248 |
+
values : (# of points,)-array
|
249 |
+
The values at each point.
|
250 |
+
|
251 |
+
Raises
|
252 |
+
------
|
253 |
+
ValueError : if the dimensionality of the input points is different than
|
254 |
+
the dimensionality of the KDE.
|
255 |
+
|
256 |
+
"""
|
257 |
+
points = atleast_2d(asarray(points))
|
258 |
+
|
259 |
+
d, m = points.shape
|
260 |
+
if d != self.d:
|
261 |
+
if d == 1 and m == self.d:
|
262 |
+
# points was passed in as a row vector
|
263 |
+
points = reshape(points, (self.d, 1))
|
264 |
+
m = 1
|
265 |
+
else:
|
266 |
+
msg = (f"points have dimension {d}, "
|
267 |
+
f"dataset has dimension {self.d}")
|
268 |
+
raise ValueError(msg)
|
269 |
+
|
270 |
+
output_dtype, spec = _get_output_dtype(self.covariance, points)
|
271 |
+
result = gaussian_kernel_estimate[spec](
|
272 |
+
self.dataset.T, self.weights[:, None],
|
273 |
+
points.T, self.cho_cov, output_dtype)
|
274 |
+
|
275 |
+
return result[:, 0]
|
276 |
+
|
277 |
+
__call__ = evaluate
|
278 |
+
|
279 |
+
def integrate_gaussian(self, mean, cov):
|
280 |
+
"""
|
281 |
+
Multiply estimated density by a multivariate Gaussian and integrate
|
282 |
+
over the whole space.
|
283 |
+
|
284 |
+
Parameters
|
285 |
+
----------
|
286 |
+
mean : aray_like
|
287 |
+
A 1-D array, specifying the mean of the Gaussian.
|
288 |
+
cov : array_like
|
289 |
+
A 2-D array, specifying the covariance matrix of the Gaussian.
|
290 |
+
|
291 |
+
Returns
|
292 |
+
-------
|
293 |
+
result : scalar
|
294 |
+
The value of the integral.
|
295 |
+
|
296 |
+
Raises
|
297 |
+
------
|
298 |
+
ValueError
|
299 |
+
If the mean or covariance of the input Gaussian differs from
|
300 |
+
the KDE's dimensionality.
|
301 |
+
|
302 |
+
"""
|
303 |
+
mean = atleast_1d(squeeze(mean))
|
304 |
+
cov = atleast_2d(cov)
|
305 |
+
|
306 |
+
if mean.shape != (self.d,):
|
307 |
+
raise ValueError("mean does not have dimension %s" % self.d)
|
308 |
+
if cov.shape != (self.d, self.d):
|
309 |
+
raise ValueError("covariance does not have dimension %s" % self.d)
|
310 |
+
|
311 |
+
# make mean a column vector
|
312 |
+
mean = mean[:, newaxis]
|
313 |
+
|
314 |
+
sum_cov = self.covariance + cov
|
315 |
+
|
316 |
+
# This will raise LinAlgError if the new cov matrix is not s.p.d
|
317 |
+
# cho_factor returns (ndarray, bool) where bool is a flag for whether
|
318 |
+
# or not ndarray is upper or lower triangular
|
319 |
+
sum_cov_chol = linalg.cho_factor(sum_cov)
|
320 |
+
|
321 |
+
diff = self.dataset - mean
|
322 |
+
tdiff = linalg.cho_solve(sum_cov_chol, diff)
|
323 |
+
|
324 |
+
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
|
325 |
+
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
|
326 |
+
|
327 |
+
energies = sum(diff * tdiff, axis=0) / 2.0
|
328 |
+
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
|
329 |
+
|
330 |
+
return result
|
331 |
+
|
332 |
+
def integrate_box_1d(self, low, high):
|
333 |
+
"""
|
334 |
+
Computes the integral of a 1D pdf between two bounds.
|
335 |
+
|
336 |
+
Parameters
|
337 |
+
----------
|
338 |
+
low : scalar
|
339 |
+
Lower bound of integration.
|
340 |
+
high : scalar
|
341 |
+
Upper bound of integration.
|
342 |
+
|
343 |
+
Returns
|
344 |
+
-------
|
345 |
+
value : scalar
|
346 |
+
The result of the integral.
|
347 |
+
|
348 |
+
Raises
|
349 |
+
------
|
350 |
+
ValueError
|
351 |
+
If the KDE is over more than one dimension.
|
352 |
+
|
353 |
+
"""
|
354 |
+
if self.d != 1:
|
355 |
+
raise ValueError("integrate_box_1d() only handles 1D pdfs")
|
356 |
+
|
357 |
+
stdev = ravel(sqrt(self.covariance))[0]
|
358 |
+
|
359 |
+
normalized_low = ravel((low - self.dataset) / stdev)
|
360 |
+
normalized_high = ravel((high - self.dataset) / stdev)
|
361 |
+
|
362 |
+
value = np.sum(self.weights*(
|
363 |
+
special.ndtr(normalized_high) -
|
364 |
+
special.ndtr(normalized_low)))
|
365 |
+
return value
|
366 |
+
|
367 |
+
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
|
368 |
+
"""Computes the integral of a pdf over a rectangular interval.
|
369 |
+
|
370 |
+
Parameters
|
371 |
+
----------
|
372 |
+
low_bounds : array_like
|
373 |
+
A 1-D array containing the lower bounds of integration.
|
374 |
+
high_bounds : array_like
|
375 |
+
A 1-D array containing the upper bounds of integration.
|
376 |
+
maxpts : int, optional
|
377 |
+
The maximum number of points to use for integration.
|
378 |
+
|
379 |
+
Returns
|
380 |
+
-------
|
381 |
+
value : scalar
|
382 |
+
The result of the integral.
|
383 |
+
|
384 |
+
"""
|
385 |
+
if maxpts is not None:
|
386 |
+
extra_kwds = {'maxpts': maxpts}
|
387 |
+
else:
|
388 |
+
extra_kwds = {}
|
389 |
+
|
390 |
+
value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds,
|
391 |
+
self.dataset, self.weights,
|
392 |
+
self.covariance, **extra_kwds)
|
393 |
+
if inform:
|
394 |
+
msg = ('An integral in _mvn.mvnun requires more points than %s' %
|
395 |
+
(self.d * 1000))
|
396 |
+
warnings.warn(msg, stacklevel=2)
|
397 |
+
|
398 |
+
return value
|
399 |
+
|
400 |
+
def integrate_kde(self, other):
|
401 |
+
"""
|
402 |
+
Computes the integral of the product of this kernel density estimate
|
403 |
+
with another.
|
404 |
+
|
405 |
+
Parameters
|
406 |
+
----------
|
407 |
+
other : gaussian_kde instance
|
408 |
+
The other kde.
|
409 |
+
|
410 |
+
Returns
|
411 |
+
-------
|
412 |
+
value : scalar
|
413 |
+
The result of the integral.
|
414 |
+
|
415 |
+
Raises
|
416 |
+
------
|
417 |
+
ValueError
|
418 |
+
If the KDEs have different dimensionality.
|
419 |
+
|
420 |
+
"""
|
421 |
+
if other.d != self.d:
|
422 |
+
raise ValueError("KDEs are not the same dimensionality")
|
423 |
+
|
424 |
+
# we want to iterate over the smallest number of points
|
425 |
+
if other.n < self.n:
|
426 |
+
small = other
|
427 |
+
large = self
|
428 |
+
else:
|
429 |
+
small = self
|
430 |
+
large = other
|
431 |
+
|
432 |
+
sum_cov = small.covariance + large.covariance
|
433 |
+
sum_cov_chol = linalg.cho_factor(sum_cov)
|
434 |
+
result = 0.0
|
435 |
+
for i in range(small.n):
|
436 |
+
mean = small.dataset[:, i, newaxis]
|
437 |
+
diff = large.dataset - mean
|
438 |
+
tdiff = linalg.cho_solve(sum_cov_chol, diff)
|
439 |
+
|
440 |
+
energies = sum(diff * tdiff, axis=0) / 2.0
|
441 |
+
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
|
442 |
+
|
443 |
+
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
|
444 |
+
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
|
445 |
+
|
446 |
+
result /= norm_const
|
447 |
+
|
448 |
+
return result
|
449 |
+
|
450 |
+
def resample(self, size=None, seed=None):
|
451 |
+
"""Randomly sample a dataset from the estimated pdf.
|
452 |
+
|
453 |
+
Parameters
|
454 |
+
----------
|
455 |
+
size : int, optional
|
456 |
+
The number of samples to draw. If not provided, then the size is
|
457 |
+
the same as the effective number of samples in the underlying
|
458 |
+
dataset.
|
459 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
460 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
461 |
+
singleton is used.
|
462 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
463 |
+
seeded with `seed`.
|
464 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
465 |
+
that instance is used.
|
466 |
+
|
467 |
+
Returns
|
468 |
+
-------
|
469 |
+
resample : (self.d, `size`) ndarray
|
470 |
+
The sampled dataset.
|
471 |
+
|
472 |
+
""" # numpy/numpydoc#87 # noqa: E501
|
473 |
+
if size is None:
|
474 |
+
size = int(self.neff)
|
475 |
+
|
476 |
+
random_state = check_random_state(seed)
|
477 |
+
norm = transpose(random_state.multivariate_normal(
|
478 |
+
zeros((self.d,), float), self.covariance, size=size
|
479 |
+
))
|
480 |
+
indices = random_state.choice(self.n, size=size, p=self.weights)
|
481 |
+
means = self.dataset[:, indices]
|
482 |
+
|
483 |
+
return means + norm
|
484 |
+
|
485 |
+
def scotts_factor(self):
|
486 |
+
"""Compute Scott's factor.
|
487 |
+
|
488 |
+
Returns
|
489 |
+
-------
|
490 |
+
s : float
|
491 |
+
Scott's factor.
|
492 |
+
"""
|
493 |
+
return power(self.neff, -1./(self.d+4))
|
494 |
+
|
495 |
+
def silverman_factor(self):
|
496 |
+
"""Compute the Silverman factor.
|
497 |
+
|
498 |
+
Returns
|
499 |
+
-------
|
500 |
+
s : float
|
501 |
+
The silverman factor.
|
502 |
+
"""
|
503 |
+
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
|
504 |
+
|
505 |
+
# Default method to calculate bandwidth, can be overwritten by subclass
|
506 |
+
covariance_factor = scotts_factor
|
507 |
+
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
|
508 |
+
multiplies the data covariance matrix to obtain the kernel covariance
|
509 |
+
matrix. The default is `scotts_factor`. A subclass can overwrite this
|
510 |
+
method to provide a different method, or set it through a call to
|
511 |
+
`kde.set_bandwidth`."""
|
512 |
+
|
513 |
+
def set_bandwidth(self, bw_method=None):
|
514 |
+
"""Compute the estimator bandwidth with given method.
|
515 |
+
|
516 |
+
The new bandwidth calculated after a call to `set_bandwidth` is used
|
517 |
+
for subsequent evaluations of the estimated density.
|
518 |
+
|
519 |
+
Parameters
|
520 |
+
----------
|
521 |
+
bw_method : str, scalar or callable, optional
|
522 |
+
The method used to calculate the estimator bandwidth. This can be
|
523 |
+
'scott', 'silverman', a scalar constant or a callable. If a
|
524 |
+
scalar, this will be used directly as `kde.factor`. If a callable,
|
525 |
+
it should take a `gaussian_kde` instance as only parameter and
|
526 |
+
return a scalar. If None (default), nothing happens; the current
|
527 |
+
`kde.covariance_factor` method is kept.
|
528 |
+
|
529 |
+
Notes
|
530 |
+
-----
|
531 |
+
.. versionadded:: 0.11
|
532 |
+
|
533 |
+
Examples
|
534 |
+
--------
|
535 |
+
>>> import numpy as np
|
536 |
+
>>> import scipy.stats as stats
|
537 |
+
>>> x1 = np.array([-7, -5, 1, 4, 5.])
|
538 |
+
>>> kde = stats.gaussian_kde(x1)
|
539 |
+
>>> xs = np.linspace(-10, 10, num=50)
|
540 |
+
>>> y1 = kde(xs)
|
541 |
+
>>> kde.set_bandwidth(bw_method='silverman')
|
542 |
+
>>> y2 = kde(xs)
|
543 |
+
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
|
544 |
+
>>> y3 = kde(xs)
|
545 |
+
|
546 |
+
>>> import matplotlib.pyplot as plt
|
547 |
+
>>> fig, ax = plt.subplots()
|
548 |
+
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
|
549 |
+
... label='Data points (rescaled)')
|
550 |
+
>>> ax.plot(xs, y1, label='Scott (default)')
|
551 |
+
>>> ax.plot(xs, y2, label='Silverman')
|
552 |
+
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
|
553 |
+
>>> ax.legend()
|
554 |
+
>>> plt.show()
|
555 |
+
|
556 |
+
"""
|
557 |
+
if bw_method is None:
|
558 |
+
pass
|
559 |
+
elif bw_method == 'scott':
|
560 |
+
self.covariance_factor = self.scotts_factor
|
561 |
+
elif bw_method == 'silverman':
|
562 |
+
self.covariance_factor = self.silverman_factor
|
563 |
+
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
|
564 |
+
self._bw_method = 'use constant'
|
565 |
+
self.covariance_factor = lambda: bw_method
|
566 |
+
elif callable(bw_method):
|
567 |
+
self._bw_method = bw_method
|
568 |
+
self.covariance_factor = lambda: self._bw_method(self)
|
569 |
+
else:
|
570 |
+
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
|
571 |
+
"or a callable."
|
572 |
+
raise ValueError(msg)
|
573 |
+
|
574 |
+
self._compute_covariance()
|
575 |
+
|
576 |
+
def _compute_covariance(self):
|
577 |
+
"""Computes the covariance matrix for each Gaussian kernel using
|
578 |
+
covariance_factor().
|
579 |
+
"""
|
580 |
+
self.factor = self.covariance_factor()
|
581 |
+
# Cache covariance and Cholesky decomp of covariance
|
582 |
+
if not hasattr(self, '_data_cho_cov'):
|
583 |
+
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
|
584 |
+
bias=False,
|
585 |
+
aweights=self.weights))
|
586 |
+
self._data_cho_cov = linalg.cholesky(self._data_covariance,
|
587 |
+
lower=True)
|
588 |
+
|
589 |
+
self.covariance = self._data_covariance * self.factor**2
|
590 |
+
self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)
|
591 |
+
self.log_det = 2*np.log(np.diag(self.cho_cov
|
592 |
+
* np.sqrt(2*pi))).sum()
|
593 |
+
|
594 |
+
@property
|
595 |
+
def inv_cov(self):
|
596 |
+
# Re-compute from scratch each time because I'm not sure how this is
|
597 |
+
# used in the wild. (Perhaps users change the `dataset`, since it's
|
598 |
+
# not a private attribute?) `_compute_covariance` used to recalculate
|
599 |
+
# all these, so we'll recalculate everything now that this is a
|
600 |
+
# a property.
|
601 |
+
self.factor = self.covariance_factor()
|
602 |
+
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
|
603 |
+
bias=False, aweights=self.weights))
|
604 |
+
return linalg.inv(self._data_covariance) / self.factor**2
|
605 |
+
|
606 |
+
def pdf(self, x):
|
607 |
+
"""
|
608 |
+
Evaluate the estimated pdf on a provided set of points.
|
609 |
+
|
610 |
+
Notes
|
611 |
+
-----
|
612 |
+
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
|
613 |
+
docstring for more details.
|
614 |
+
|
615 |
+
"""
|
616 |
+
return self.evaluate(x)
|
617 |
+
|
618 |
+
def logpdf(self, x):
|
619 |
+
"""
|
620 |
+
Evaluate the log of the estimated pdf on a provided set of points.
|
621 |
+
"""
|
622 |
+
points = atleast_2d(x)
|
623 |
+
|
624 |
+
d, m = points.shape
|
625 |
+
if d != self.d:
|
626 |
+
if d == 1 and m == self.d:
|
627 |
+
# points was passed in as a row vector
|
628 |
+
points = reshape(points, (self.d, 1))
|
629 |
+
m = 1
|
630 |
+
else:
|
631 |
+
msg = (f"points have dimension {d}, "
|
632 |
+
f"dataset has dimension {self.d}")
|
633 |
+
raise ValueError(msg)
|
634 |
+
|
635 |
+
output_dtype, spec = _get_output_dtype(self.covariance, points)
|
636 |
+
result = gaussian_kernel_estimate_log[spec](
|
637 |
+
self.dataset.T, self.weights[:, None],
|
638 |
+
points.T, self.cho_cov, output_dtype)
|
639 |
+
|
640 |
+
return result[:, 0]
|
641 |
+
|
642 |
+
def marginal(self, dimensions):
|
643 |
+
"""Return a marginal KDE distribution
|
644 |
+
|
645 |
+
Parameters
|
646 |
+
----------
|
647 |
+
dimensions : int or 1-d array_like
|
648 |
+
The dimensions of the multivariate distribution corresponding
|
649 |
+
with the marginal variables, that is, the indices of the dimensions
|
650 |
+
that are being retained. The other dimensions are marginalized out.
|
651 |
+
|
652 |
+
Returns
|
653 |
+
-------
|
654 |
+
marginal_kde : gaussian_kde
|
655 |
+
An object representing the marginal distribution.
|
656 |
+
|
657 |
+
Notes
|
658 |
+
-----
|
659 |
+
.. versionadded:: 1.10.0
|
660 |
+
|
661 |
+
"""
|
662 |
+
|
663 |
+
dims = np.atleast_1d(dimensions)
|
664 |
+
|
665 |
+
if not np.issubdtype(dims.dtype, np.integer):
|
666 |
+
msg = ("Elements of `dimensions` must be integers - the indices "
|
667 |
+
"of the marginal variables being retained.")
|
668 |
+
raise ValueError(msg)
|
669 |
+
|
670 |
+
n = len(self.dataset) # number of dimensions
|
671 |
+
original_dims = dims.copy()
|
672 |
+
|
673 |
+
dims[dims < 0] = n + dims[dims < 0]
|
674 |
+
|
675 |
+
if len(np.unique(dims)) != len(dims):
|
676 |
+
msg = ("All elements of `dimensions` must be unique.")
|
677 |
+
raise ValueError(msg)
|
678 |
+
|
679 |
+
i_invalid = (dims < 0) | (dims >= n)
|
680 |
+
if np.any(i_invalid):
|
681 |
+
msg = (f"Dimensions {original_dims[i_invalid]} are invalid "
|
682 |
+
f"for a distribution in {n} dimensions.")
|
683 |
+
raise ValueError(msg)
|
684 |
+
|
685 |
+
dataset = self.dataset[dims]
|
686 |
+
weights = self.weights
|
687 |
+
|
688 |
+
return gaussian_kde(dataset, bw_method=self.covariance_factor(),
|
689 |
+
weights=weights)
|
690 |
+
|
691 |
+
@property
|
692 |
+
def weights(self):
|
693 |
+
try:
|
694 |
+
return self._weights
|
695 |
+
except AttributeError:
|
696 |
+
self._weights = ones(self.n)/self.n
|
697 |
+
return self._weights
|
698 |
+
|
699 |
+
@property
|
700 |
+
def neff(self):
|
701 |
+
try:
|
702 |
+
return self._neff
|
703 |
+
except AttributeError:
|
704 |
+
self._neff = 1/sum(self.weights**2)
|
705 |
+
return self._neff
|
706 |
+
|
707 |
+
|
708 |
+
def _get_output_dtype(covariance, points):
|
709 |
+
"""
|
710 |
+
Calculates the output dtype and the "spec" (=C type name).
|
711 |
+
|
712 |
+
This was necessary in order to deal with the fused types in the Cython
|
713 |
+
routine `gaussian_kernel_estimate`. See gh-10824 for details.
|
714 |
+
"""
|
715 |
+
output_dtype = np.common_type(covariance, points)
|
716 |
+
itemsize = np.dtype(output_dtype).itemsize
|
717 |
+
if itemsize == 4:
|
718 |
+
spec = 'float'
|
719 |
+
elif itemsize == 8:
|
720 |
+
spec = 'double'
|
721 |
+
elif itemsize in (12, 16):
|
722 |
+
spec = 'long double'
|
723 |
+
else:
|
724 |
+
raise ValueError(
|
725 |
+
f"{output_dtype} has unexpected item size: {itemsize}"
|
726 |
+
)
|
727 |
+
|
728 |
+
return output_dtype, spec
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_morestats.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_mstats_basic.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_mstats_extras.py
ADDED
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Additional statistics functions with support for masked arrays.
|
3 |
+
|
4 |
+
"""
|
5 |
+
|
6 |
+
# Original author (2007): Pierre GF Gerard-Marchant
|
7 |
+
|
8 |
+
|
9 |
+
__all__ = ['compare_medians_ms',
|
10 |
+
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
|
11 |
+
'idealfourths',
|
12 |
+
'median_cihs','mjci','mquantiles_cimj',
|
13 |
+
'rsh',
|
14 |
+
'trimmed_mean_ci',]
|
15 |
+
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
from numpy import float64, ndarray
|
19 |
+
|
20 |
+
import numpy.ma as ma
|
21 |
+
from numpy.ma import MaskedArray
|
22 |
+
|
23 |
+
from . import _mstats_basic as mstats
|
24 |
+
|
25 |
+
from scipy.stats.distributions import norm, beta, t, binom
|
26 |
+
|
27 |
+
|
28 |
+
def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
|
29 |
+
"""
|
30 |
+
Computes quantile estimates with the Harrell-Davis method.
|
31 |
+
|
32 |
+
The quantile estimates are calculated as a weighted linear combination
|
33 |
+
of order statistics.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
data : array_like
|
38 |
+
Data array.
|
39 |
+
prob : sequence, optional
|
40 |
+
Sequence of probabilities at which to compute the quantiles.
|
41 |
+
axis : int or None, optional
|
42 |
+
Axis along which to compute the quantiles. If None, use a flattened
|
43 |
+
array.
|
44 |
+
var : bool, optional
|
45 |
+
Whether to return the variance of the estimate.
|
46 |
+
|
47 |
+
Returns
|
48 |
+
-------
|
49 |
+
hdquantiles : MaskedArray
|
50 |
+
A (p,) array of quantiles (if `var` is False), or a (2,p) array of
|
51 |
+
quantiles and variances (if `var` is True), where ``p`` is the
|
52 |
+
number of quantiles.
|
53 |
+
|
54 |
+
See Also
|
55 |
+
--------
|
56 |
+
hdquantiles_sd
|
57 |
+
|
58 |
+
Examples
|
59 |
+
--------
|
60 |
+
>>> import numpy as np
|
61 |
+
>>> from scipy.stats.mstats import hdquantiles
|
62 |
+
>>>
|
63 |
+
>>> # Sample data
|
64 |
+
>>> data = np.array([1.2, 2.5, 3.7, 4.0, 5.1, 6.3, 7.0, 8.2, 9.4])
|
65 |
+
>>>
|
66 |
+
>>> # Probabilities at which to compute quantiles
|
67 |
+
>>> probabilities = [0.25, 0.5, 0.75]
|
68 |
+
>>>
|
69 |
+
>>> # Compute Harrell-Davis quantile estimates
|
70 |
+
>>> quantile_estimates = hdquantiles(data, prob=probabilities)
|
71 |
+
>>>
|
72 |
+
>>> # Display the quantile estimates
|
73 |
+
>>> for i, quantile in enumerate(probabilities):
|
74 |
+
... print(f"{int(quantile * 100)}th percentile: {quantile_estimates[i]}")
|
75 |
+
25th percentile: 3.1505820231763066 # may vary
|
76 |
+
50th percentile: 5.194344084883956
|
77 |
+
75th percentile: 7.430626414674935
|
78 |
+
|
79 |
+
"""
|
80 |
+
def _hd_1D(data,prob,var):
|
81 |
+
"Computes the HD quantiles for a 1D array. Returns nan for invalid data."
|
82 |
+
xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
|
83 |
+
# Don't use length here, in case we have a numpy scalar
|
84 |
+
n = xsorted.size
|
85 |
+
|
86 |
+
hd = np.empty((2,len(prob)), float64)
|
87 |
+
if n < 2:
|
88 |
+
hd.flat = np.nan
|
89 |
+
if var:
|
90 |
+
return hd
|
91 |
+
return hd[0]
|
92 |
+
|
93 |
+
v = np.arange(n+1) / float(n)
|
94 |
+
betacdf = beta.cdf
|
95 |
+
for (i,p) in enumerate(prob):
|
96 |
+
_w = betacdf(v, (n+1)*p, (n+1)*(1-p))
|
97 |
+
w = _w[1:] - _w[:-1]
|
98 |
+
hd_mean = np.dot(w, xsorted)
|
99 |
+
hd[0,i] = hd_mean
|
100 |
+
#
|
101 |
+
hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
|
102 |
+
#
|
103 |
+
hd[0, prob == 0] = xsorted[0]
|
104 |
+
hd[0, prob == 1] = xsorted[-1]
|
105 |
+
if var:
|
106 |
+
hd[1, prob == 0] = hd[1, prob == 1] = np.nan
|
107 |
+
return hd
|
108 |
+
return hd[0]
|
109 |
+
# Initialization & checks
|
110 |
+
data = ma.array(data, copy=False, dtype=float64)
|
111 |
+
p = np.atleast_1d(np.asarray(prob))
|
112 |
+
# Computes quantiles along axis (or globally)
|
113 |
+
if (axis is None) or (data.ndim == 1):
|
114 |
+
result = _hd_1D(data, p, var)
|
115 |
+
else:
|
116 |
+
if data.ndim > 2:
|
117 |
+
raise ValueError("Array 'data' must be at most two dimensional, "
|
118 |
+
"but got data.ndim = %d" % data.ndim)
|
119 |
+
result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
|
120 |
+
|
121 |
+
return ma.fix_invalid(result, copy=False)
|
122 |
+
|
123 |
+
|
124 |
+
def hdmedian(data, axis=-1, var=False):
|
125 |
+
"""
|
126 |
+
Returns the Harrell-Davis estimate of the median along the given axis.
|
127 |
+
|
128 |
+
Parameters
|
129 |
+
----------
|
130 |
+
data : ndarray
|
131 |
+
Data array.
|
132 |
+
axis : int, optional
|
133 |
+
Axis along which to compute the quantiles. If None, use a flattened
|
134 |
+
array.
|
135 |
+
var : bool, optional
|
136 |
+
Whether to return the variance of the estimate.
|
137 |
+
|
138 |
+
Returns
|
139 |
+
-------
|
140 |
+
hdmedian : MaskedArray
|
141 |
+
The median values. If ``var=True``, the variance is returned inside
|
142 |
+
the masked array. E.g. for a 1-D array the shape change from (1,) to
|
143 |
+
(2,).
|
144 |
+
|
145 |
+
"""
|
146 |
+
result = hdquantiles(data,[0.5], axis=axis, var=var)
|
147 |
+
return result.squeeze()
|
148 |
+
|
149 |
+
|
150 |
+
def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
|
151 |
+
"""
|
152 |
+
The standard error of the Harrell-Davis quantile estimates by jackknife.
|
153 |
+
|
154 |
+
Parameters
|
155 |
+
----------
|
156 |
+
data : array_like
|
157 |
+
Data array.
|
158 |
+
prob : sequence, optional
|
159 |
+
Sequence of quantiles to compute.
|
160 |
+
axis : int, optional
|
161 |
+
Axis along which to compute the quantiles. If None, use a flattened
|
162 |
+
array.
|
163 |
+
|
164 |
+
Returns
|
165 |
+
-------
|
166 |
+
hdquantiles_sd : MaskedArray
|
167 |
+
Standard error of the Harrell-Davis quantile estimates.
|
168 |
+
|
169 |
+
See Also
|
170 |
+
--------
|
171 |
+
hdquantiles
|
172 |
+
|
173 |
+
"""
|
174 |
+
def _hdsd_1D(data, prob):
|
175 |
+
"Computes the std error for 1D arrays."
|
176 |
+
xsorted = np.sort(data.compressed())
|
177 |
+
n = len(xsorted)
|
178 |
+
|
179 |
+
hdsd = np.empty(len(prob), float64)
|
180 |
+
if n < 2:
|
181 |
+
hdsd.flat = np.nan
|
182 |
+
|
183 |
+
vv = np.arange(n) / float(n-1)
|
184 |
+
betacdf = beta.cdf
|
185 |
+
|
186 |
+
for (i,p) in enumerate(prob):
|
187 |
+
_w = betacdf(vv, n*p, n*(1-p))
|
188 |
+
w = _w[1:] - _w[:-1]
|
189 |
+
# cumulative sum of weights and data points if
|
190 |
+
# ith point is left out for jackknife
|
191 |
+
mx_ = np.zeros_like(xsorted)
|
192 |
+
mx_[1:] = np.cumsum(w * xsorted[:-1])
|
193 |
+
# similar but from the right
|
194 |
+
mx_[:-1] += np.cumsum(w[::-1] * xsorted[:0:-1])[::-1]
|
195 |
+
hdsd[i] = np.sqrt(mx_.var() * (n - 1))
|
196 |
+
return hdsd
|
197 |
+
|
198 |
+
# Initialization & checks
|
199 |
+
data = ma.array(data, copy=False, dtype=float64)
|
200 |
+
p = np.atleast_1d(np.asarray(prob))
|
201 |
+
# Computes quantiles along axis (or globally)
|
202 |
+
if (axis is None):
|
203 |
+
result = _hdsd_1D(data, p)
|
204 |
+
else:
|
205 |
+
if data.ndim > 2:
|
206 |
+
raise ValueError("Array 'data' must be at most two dimensional, "
|
207 |
+
"but got data.ndim = %d" % data.ndim)
|
208 |
+
result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
|
209 |
+
|
210 |
+
return ma.fix_invalid(result, copy=False).ravel()
|
211 |
+
|
212 |
+
|
213 |
+
def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
|
214 |
+
alpha=0.05, axis=None):
|
215 |
+
"""
|
216 |
+
Selected confidence interval of the trimmed mean along the given axis.
|
217 |
+
|
218 |
+
Parameters
|
219 |
+
----------
|
220 |
+
data : array_like
|
221 |
+
Input data.
|
222 |
+
limits : {None, tuple}, optional
|
223 |
+
None or a two item tuple.
|
224 |
+
Tuple of the percentages to cut on each side of the array, with respect
|
225 |
+
to the number of unmasked data, as floats between 0. and 1. If ``n``
|
226 |
+
is the number of unmasked data before trimming, then
|
227 |
+
(``n * limits[0]``)th smallest data and (``n * limits[1]``)th
|
228 |
+
largest data are masked. The total number of unmasked data after
|
229 |
+
trimming is ``n * (1. - sum(limits))``.
|
230 |
+
The value of one limit can be set to None to indicate an open interval.
|
231 |
+
|
232 |
+
Defaults to (0.2, 0.2).
|
233 |
+
inclusive : (2,) tuple of boolean, optional
|
234 |
+
If relative==False, tuple indicating whether values exactly equal to
|
235 |
+
the absolute limits are allowed.
|
236 |
+
If relative==True, tuple indicating whether the number of data being
|
237 |
+
masked on each side should be rounded (True) or truncated (False).
|
238 |
+
|
239 |
+
Defaults to (True, True).
|
240 |
+
alpha : float, optional
|
241 |
+
Confidence level of the intervals.
|
242 |
+
|
243 |
+
Defaults to 0.05.
|
244 |
+
axis : int, optional
|
245 |
+
Axis along which to cut. If None, uses a flattened version of `data`.
|
246 |
+
|
247 |
+
Defaults to None.
|
248 |
+
|
249 |
+
Returns
|
250 |
+
-------
|
251 |
+
trimmed_mean_ci : (2,) ndarray
|
252 |
+
The lower and upper confidence intervals of the trimmed data.
|
253 |
+
|
254 |
+
"""
|
255 |
+
data = ma.array(data, copy=False)
|
256 |
+
trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
|
257 |
+
tmean = trimmed.mean(axis)
|
258 |
+
tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis)
|
259 |
+
df = trimmed.count(axis) - 1
|
260 |
+
tppf = t.ppf(1-alpha/2.,df)
|
261 |
+
return np.array((tmean - tppf*tstde, tmean+tppf*tstde))
|
262 |
+
|
263 |
+
|
264 |
+
def mjci(data, prob=[0.25,0.5,0.75], axis=None):
|
265 |
+
"""
|
266 |
+
Returns the Maritz-Jarrett estimators of the standard error of selected
|
267 |
+
experimental quantiles of the data.
|
268 |
+
|
269 |
+
Parameters
|
270 |
+
----------
|
271 |
+
data : ndarray
|
272 |
+
Data array.
|
273 |
+
prob : sequence, optional
|
274 |
+
Sequence of quantiles to compute.
|
275 |
+
axis : int or None, optional
|
276 |
+
Axis along which to compute the quantiles. If None, use a flattened
|
277 |
+
array.
|
278 |
+
|
279 |
+
"""
|
280 |
+
def _mjci_1D(data, p):
|
281 |
+
data = np.sort(data.compressed())
|
282 |
+
n = data.size
|
283 |
+
prob = (np.array(p) * n + 0.5).astype(int)
|
284 |
+
betacdf = beta.cdf
|
285 |
+
|
286 |
+
mj = np.empty(len(prob), float64)
|
287 |
+
x = np.arange(1,n+1, dtype=float64) / n
|
288 |
+
y = x - 1./n
|
289 |
+
for (i,m) in enumerate(prob):
|
290 |
+
W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
|
291 |
+
C1 = np.dot(W,data)
|
292 |
+
C2 = np.dot(W,data**2)
|
293 |
+
mj[i] = np.sqrt(C2 - C1**2)
|
294 |
+
return mj
|
295 |
+
|
296 |
+
data = ma.array(data, copy=False)
|
297 |
+
if data.ndim > 2:
|
298 |
+
raise ValueError("Array 'data' must be at most two dimensional, "
|
299 |
+
"but got data.ndim = %d" % data.ndim)
|
300 |
+
|
301 |
+
p = np.atleast_1d(np.asarray(prob))
|
302 |
+
# Computes quantiles along axis (or globally)
|
303 |
+
if (axis is None):
|
304 |
+
return _mjci_1D(data, p)
|
305 |
+
else:
|
306 |
+
return ma.apply_along_axis(_mjci_1D, axis, data, p)
|
307 |
+
|
308 |
+
|
309 |
+
def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None):
|
310 |
+
"""
|
311 |
+
Computes the alpha confidence interval for the selected quantiles of the
|
312 |
+
data, with Maritz-Jarrett estimators.
|
313 |
+
|
314 |
+
Parameters
|
315 |
+
----------
|
316 |
+
data : ndarray
|
317 |
+
Data array.
|
318 |
+
prob : sequence, optional
|
319 |
+
Sequence of quantiles to compute.
|
320 |
+
alpha : float, optional
|
321 |
+
Confidence level of the intervals.
|
322 |
+
axis : int or None, optional
|
323 |
+
Axis along which to compute the quantiles.
|
324 |
+
If None, use a flattened array.
|
325 |
+
|
326 |
+
Returns
|
327 |
+
-------
|
328 |
+
ci_lower : ndarray
|
329 |
+
The lower boundaries of the confidence interval. Of the same length as
|
330 |
+
`prob`.
|
331 |
+
ci_upper : ndarray
|
332 |
+
The upper boundaries of the confidence interval. Of the same length as
|
333 |
+
`prob`.
|
334 |
+
|
335 |
+
"""
|
336 |
+
alpha = min(alpha, 1 - alpha)
|
337 |
+
z = norm.ppf(1 - alpha/2.)
|
338 |
+
xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)
|
339 |
+
smj = mjci(data, prob, axis=axis)
|
340 |
+
return (xq - z * smj, xq + z * smj)
|
341 |
+
|
342 |
+
|
343 |
+
def median_cihs(data, alpha=0.05, axis=None):
|
344 |
+
"""
|
345 |
+
Computes the alpha-level confidence interval for the median of the data.
|
346 |
+
|
347 |
+
Uses the Hettmasperger-Sheather method.
|
348 |
+
|
349 |
+
Parameters
|
350 |
+
----------
|
351 |
+
data : array_like
|
352 |
+
Input data. Masked values are discarded. The input should be 1D only,
|
353 |
+
or `axis` should be set to None.
|
354 |
+
alpha : float, optional
|
355 |
+
Confidence level of the intervals.
|
356 |
+
axis : int or None, optional
|
357 |
+
Axis along which to compute the quantiles. If None, use a flattened
|
358 |
+
array.
|
359 |
+
|
360 |
+
Returns
|
361 |
+
-------
|
362 |
+
median_cihs
|
363 |
+
Alpha level confidence interval.
|
364 |
+
|
365 |
+
"""
|
366 |
+
def _cihs_1D(data, alpha):
|
367 |
+
data = np.sort(data.compressed())
|
368 |
+
n = len(data)
|
369 |
+
alpha = min(alpha, 1-alpha)
|
370 |
+
k = int(binom._ppf(alpha/2., n, 0.5))
|
371 |
+
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
|
372 |
+
if gk < 1-alpha:
|
373 |
+
k -= 1
|
374 |
+
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
|
375 |
+
gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
|
376 |
+
I = (gk - 1 + alpha)/(gk - gkk)
|
377 |
+
lambd = (n-k) * I / float(k + (n-2*k)*I)
|
378 |
+
lims = (lambd*data[k] + (1-lambd)*data[k-1],
|
379 |
+
lambd*data[n-k-1] + (1-lambd)*data[n-k])
|
380 |
+
return lims
|
381 |
+
data = ma.array(data, copy=False)
|
382 |
+
# Computes quantiles along axis (or globally)
|
383 |
+
if (axis is None):
|
384 |
+
result = _cihs_1D(data, alpha)
|
385 |
+
else:
|
386 |
+
if data.ndim > 2:
|
387 |
+
raise ValueError("Array 'data' must be at most two dimensional, "
|
388 |
+
"but got data.ndim = %d" % data.ndim)
|
389 |
+
result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
|
390 |
+
|
391 |
+
return result
|
392 |
+
|
393 |
+
|
394 |
+
def compare_medians_ms(group_1, group_2, axis=None):
|
395 |
+
"""
|
396 |
+
Compares the medians from two independent groups along the given axis.
|
397 |
+
|
398 |
+
The comparison is performed using the McKean-Schrader estimate of the
|
399 |
+
standard error of the medians.
|
400 |
+
|
401 |
+
Parameters
|
402 |
+
----------
|
403 |
+
group_1 : array_like
|
404 |
+
First dataset. Has to be of size >=7.
|
405 |
+
group_2 : array_like
|
406 |
+
Second dataset. Has to be of size >=7.
|
407 |
+
axis : int, optional
|
408 |
+
Axis along which the medians are estimated. If None, the arrays are
|
409 |
+
flattened. If `axis` is not None, then `group_1` and `group_2`
|
410 |
+
should have the same shape.
|
411 |
+
|
412 |
+
Returns
|
413 |
+
-------
|
414 |
+
compare_medians_ms : {float, ndarray}
|
415 |
+
If `axis` is None, then returns a float, otherwise returns a 1-D
|
416 |
+
ndarray of floats with a length equal to the length of `group_1`
|
417 |
+
along `axis`.
|
418 |
+
|
419 |
+
Examples
|
420 |
+
--------
|
421 |
+
|
422 |
+
>>> from scipy import stats
|
423 |
+
>>> a = [1, 2, 3, 4, 5, 6, 7]
|
424 |
+
>>> b = [8, 9, 10, 11, 12, 13, 14]
|
425 |
+
>>> stats.mstats.compare_medians_ms(a, b, axis=None)
|
426 |
+
1.0693225866553746e-05
|
427 |
+
|
428 |
+
The function is vectorized to compute along a given axis.
|
429 |
+
|
430 |
+
>>> import numpy as np
|
431 |
+
>>> rng = np.random.default_rng()
|
432 |
+
>>> x = rng.random(size=(3, 7))
|
433 |
+
>>> y = rng.random(size=(3, 8))
|
434 |
+
>>> stats.mstats.compare_medians_ms(x, y, axis=1)
|
435 |
+
array([0.36908985, 0.36092538, 0.2765313 ])
|
436 |
+
|
437 |
+
References
|
438 |
+
----------
|
439 |
+
.. [1] McKean, Joseph W., and Ronald M. Schrader. "A comparison of methods
|
440 |
+
for studentizing the sample median." Communications in
|
441 |
+
Statistics-Simulation and Computation 13.6 (1984): 751-773.
|
442 |
+
|
443 |
+
"""
|
444 |
+
(med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
|
445 |
+
(std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
|
446 |
+
mstats.stde_median(group_2, axis=axis))
|
447 |
+
W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
|
448 |
+
return 1 - norm.cdf(W)
|
449 |
+
|
450 |
+
|
451 |
+
def idealfourths(data, axis=None):
|
452 |
+
"""
|
453 |
+
Returns an estimate of the lower and upper quartiles.
|
454 |
+
|
455 |
+
Uses the ideal fourths algorithm.
|
456 |
+
|
457 |
+
Parameters
|
458 |
+
----------
|
459 |
+
data : array_like
|
460 |
+
Input array.
|
461 |
+
axis : int, optional
|
462 |
+
Axis along which the quartiles are estimated. If None, the arrays are
|
463 |
+
flattened.
|
464 |
+
|
465 |
+
Returns
|
466 |
+
-------
|
467 |
+
idealfourths : {list of floats, masked array}
|
468 |
+
Returns the two internal values that divide `data` into four parts
|
469 |
+
using the ideal fourths algorithm either along the flattened array
|
470 |
+
(if `axis` is None) or along `axis` of `data`.
|
471 |
+
|
472 |
+
"""
|
473 |
+
def _idf(data):
|
474 |
+
x = data.compressed()
|
475 |
+
n = len(x)
|
476 |
+
if n < 3:
|
477 |
+
return [np.nan,np.nan]
|
478 |
+
(j,h) = divmod(n/4. + 5/12.,1)
|
479 |
+
j = int(j)
|
480 |
+
qlo = (1-h)*x[j-1] + h*x[j]
|
481 |
+
k = n - j
|
482 |
+
qup = (1-h)*x[k] + h*x[k-1]
|
483 |
+
return [qlo, qup]
|
484 |
+
data = ma.sort(data, axis=axis).view(MaskedArray)
|
485 |
+
if (axis is None):
|
486 |
+
return _idf(data)
|
487 |
+
else:
|
488 |
+
return ma.apply_along_axis(_idf, axis, data)
|
489 |
+
|
490 |
+
|
491 |
+
def rsh(data, points=None):
|
492 |
+
"""
|
493 |
+
Evaluates Rosenblatt's shifted histogram estimators for each data point.
|
494 |
+
|
495 |
+
Rosenblatt's estimator is a centered finite-difference approximation to the
|
496 |
+
derivative of the empirical cumulative distribution function.
|
497 |
+
|
498 |
+
Parameters
|
499 |
+
----------
|
500 |
+
data : sequence
|
501 |
+
Input data, should be 1-D. Masked values are ignored.
|
502 |
+
points : sequence or None, optional
|
503 |
+
Sequence of points where to evaluate Rosenblatt shifted histogram.
|
504 |
+
If None, use the data.
|
505 |
+
|
506 |
+
"""
|
507 |
+
data = ma.array(data, copy=False)
|
508 |
+
if points is None:
|
509 |
+
points = data
|
510 |
+
else:
|
511 |
+
points = np.atleast_1d(np.asarray(points))
|
512 |
+
|
513 |
+
if data.ndim != 1:
|
514 |
+
raise AttributeError("The input array should be 1D only !")
|
515 |
+
|
516 |
+
n = data.count()
|
517 |
+
r = idealfourths(data, axis=None)
|
518 |
+
h = 1.2 * (r[-1]-r[0]) / n**(1./5)
|
519 |
+
nhi = (data[:,None] <= points[None,:] + h).sum(0)
|
520 |
+
nlo = (data[:,None] < points[None,:] - h).sum(0)
|
521 |
+
return (nhi-nlo) / (2.*n*h)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_page_trend_test.py
ADDED
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from itertools import permutations
|
2 |
+
import numpy as np
|
3 |
+
import math
|
4 |
+
from ._continuous_distns import norm
|
5 |
+
import scipy.stats
|
6 |
+
from dataclasses import dataclass
|
7 |
+
|
8 |
+
|
9 |
+
@dataclass
|
10 |
+
class PageTrendTestResult:
|
11 |
+
statistic: float
|
12 |
+
pvalue: float
|
13 |
+
method: str
|
14 |
+
|
15 |
+
|
16 |
+
def page_trend_test(data, ranked=False, predicted_ranks=None, method='auto'):
|
17 |
+
r"""
|
18 |
+
Perform Page's Test, a measure of trend in observations between treatments.
|
19 |
+
|
20 |
+
Page's Test (also known as Page's :math:`L` test) is useful when:
|
21 |
+
|
22 |
+
* there are :math:`n \geq 3` treatments,
|
23 |
+
* :math:`m \geq 2` subjects are observed for each treatment, and
|
24 |
+
* the observations are hypothesized to have a particular order.
|
25 |
+
|
26 |
+
Specifically, the test considers the null hypothesis that
|
27 |
+
|
28 |
+
.. math::
|
29 |
+
|
30 |
+
m_1 = m_2 = m_3 \cdots = m_n,
|
31 |
+
|
32 |
+
where :math:`m_j` is the mean of the observed quantity under treatment
|
33 |
+
:math:`j`, against the alternative hypothesis that
|
34 |
+
|
35 |
+
.. math::
|
36 |
+
|
37 |
+
m_1 \leq m_2 \leq m_3 \leq \cdots \leq m_n,
|
38 |
+
|
39 |
+
where at least one inequality is strict.
|
40 |
+
|
41 |
+
As noted by [4]_, Page's :math:`L` test has greater statistical power than
|
42 |
+
the Friedman test against the alternative that there is a difference in
|
43 |
+
trend, as Friedman's test only considers a difference in the means of the
|
44 |
+
observations without considering their order. Whereas Spearman :math:`\rho`
|
45 |
+
considers the correlation between the ranked observations of two variables
|
46 |
+
(e.g. the airspeed velocity of a swallow vs. the weight of the coconut it
|
47 |
+
carries), Page's :math:`L` is concerned with a trend in an observation
|
48 |
+
(e.g. the airspeed velocity of a swallow) across several distinct
|
49 |
+
treatments (e.g. carrying each of five coconuts of different weight) even
|
50 |
+
as the observation is repeated with multiple subjects (e.g. one European
|
51 |
+
swallow and one African swallow).
|
52 |
+
|
53 |
+
Parameters
|
54 |
+
----------
|
55 |
+
data : array-like
|
56 |
+
A :math:`m \times n` array; the element in row :math:`i` and
|
57 |
+
column :math:`j` is the observation corresponding with subject
|
58 |
+
:math:`i` and treatment :math:`j`. By default, the columns are
|
59 |
+
assumed to be arranged in order of increasing predicted mean.
|
60 |
+
|
61 |
+
ranked : boolean, optional
|
62 |
+
By default, `data` is assumed to be observations rather than ranks;
|
63 |
+
it will be ranked with `scipy.stats.rankdata` along ``axis=1``. If
|
64 |
+
`data` is provided in the form of ranks, pass argument ``True``.
|
65 |
+
|
66 |
+
predicted_ranks : array-like, optional
|
67 |
+
The predicted ranks of the column means. If not specified,
|
68 |
+
the columns are assumed to be arranged in order of increasing
|
69 |
+
predicted mean, so the default `predicted_ranks` are
|
70 |
+
:math:`[1, 2, \dots, n-1, n]`.
|
71 |
+
|
72 |
+
method : {'auto', 'asymptotic', 'exact'}, optional
|
73 |
+
Selects the method used to calculate the *p*-value. The following
|
74 |
+
options are available.
|
75 |
+
|
76 |
+
* 'auto': selects between 'exact' and 'asymptotic' to
|
77 |
+
achieve reasonably accurate results in reasonable time (default)
|
78 |
+
* 'asymptotic': compares the standardized test statistic against
|
79 |
+
the normal distribution
|
80 |
+
* 'exact': computes the exact *p*-value by comparing the observed
|
81 |
+
:math:`L` statistic against those realized by all possible
|
82 |
+
permutations of ranks (under the null hypothesis that each
|
83 |
+
permutation is equally likely)
|
84 |
+
|
85 |
+
Returns
|
86 |
+
-------
|
87 |
+
res : PageTrendTestResult
|
88 |
+
An object containing attributes:
|
89 |
+
|
90 |
+
statistic : float
|
91 |
+
Page's :math:`L` test statistic.
|
92 |
+
pvalue : float
|
93 |
+
The associated *p*-value
|
94 |
+
method : {'asymptotic', 'exact'}
|
95 |
+
The method used to compute the *p*-value
|
96 |
+
|
97 |
+
See Also
|
98 |
+
--------
|
99 |
+
rankdata, friedmanchisquare, spearmanr
|
100 |
+
|
101 |
+
Notes
|
102 |
+
-----
|
103 |
+
As noted in [1]_, "the :math:`n` 'treatments' could just as well represent
|
104 |
+
:math:`n` objects or events or performances or persons or trials ranked."
|
105 |
+
Similarly, the :math:`m` 'subjects' could equally stand for :math:`m`
|
106 |
+
"groupings by ability or some other control variable, or judges doing
|
107 |
+
the ranking, or random replications of some other sort."
|
108 |
+
|
109 |
+
The procedure for calculating the :math:`L` statistic, adapted from
|
110 |
+
[1]_, is:
|
111 |
+
|
112 |
+
1. "Predetermine with careful logic the appropriate hypotheses
|
113 |
+
concerning the predicted ordering of the experimental results.
|
114 |
+
If no reasonable basis for ordering any treatments is known, the
|
115 |
+
:math:`L` test is not appropriate."
|
116 |
+
2. "As in other experiments, determine at what level of confidence
|
117 |
+
you will reject the null hypothesis that there is no agreement of
|
118 |
+
experimental results with the monotonic hypothesis."
|
119 |
+
3. "Cast the experimental material into a two-way table of :math:`n`
|
120 |
+
columns (treatments, objects ranked, conditions) and :math:`m`
|
121 |
+
rows (subjects, replication groups, levels of control variables)."
|
122 |
+
4. "When experimental observations are recorded, rank them across each
|
123 |
+
row", e.g. ``ranks = scipy.stats.rankdata(data, axis=1)``.
|
124 |
+
5. "Add the ranks in each column", e.g.
|
125 |
+
``colsums = np.sum(ranks, axis=0)``.
|
126 |
+
6. "Multiply each sum of ranks by the predicted rank for that same
|
127 |
+
column", e.g. ``products = predicted_ranks * colsums``.
|
128 |
+
7. "Sum all such products", e.g. ``L = products.sum()``.
|
129 |
+
|
130 |
+
[1]_ continues by suggesting use of the standardized statistic
|
131 |
+
|
132 |
+
.. math::
|
133 |
+
|
134 |
+
\chi_L^2 = \frac{\left[12L-3mn(n+1)^2\right]^2}{mn^2(n^2-1)(n+1)}
|
135 |
+
|
136 |
+
"which is distributed approximately as chi-square with 1 degree of
|
137 |
+
freedom. The ordinary use of :math:`\chi^2` tables would be
|
138 |
+
equivalent to a two-sided test of agreement. If a one-sided test
|
139 |
+
is desired, *as will almost always be the case*, the probability
|
140 |
+
discovered in the chi-square table should be *halved*."
|
141 |
+
|
142 |
+
However, this standardized statistic does not distinguish between the
|
143 |
+
observed values being well correlated with the predicted ranks and being
|
144 |
+
_anti_-correlated with the predicted ranks. Instead, we follow [2]_
|
145 |
+
and calculate the standardized statistic
|
146 |
+
|
147 |
+
.. math::
|
148 |
+
|
149 |
+
\Lambda = \frac{L - E_0}{\sqrt{V_0}},
|
150 |
+
|
151 |
+
where :math:`E_0 = \frac{1}{4} mn(n+1)^2` and
|
152 |
+
:math:`V_0 = \frac{1}{144} mn^2(n+1)(n^2-1)`, "which is asymptotically
|
153 |
+
normal under the null hypothesis".
|
154 |
+
|
155 |
+
The *p*-value for ``method='exact'`` is generated by comparing the observed
|
156 |
+
value of :math:`L` against the :math:`L` values generated for all
|
157 |
+
:math:`(n!)^m` possible permutations of ranks. The calculation is performed
|
158 |
+
using the recursive method of [5].
|
159 |
+
|
160 |
+
The *p*-values are not adjusted for the possibility of ties. When
|
161 |
+
ties are present, the reported ``'exact'`` *p*-values may be somewhat
|
162 |
+
larger (i.e. more conservative) than the true *p*-value [2]_. The
|
163 |
+
``'asymptotic'``` *p*-values, however, tend to be smaller (i.e. less
|
164 |
+
conservative) than the ``'exact'`` *p*-values.
|
165 |
+
|
166 |
+
References
|
167 |
+
----------
|
168 |
+
.. [1] Ellis Batten Page, "Ordered hypotheses for multiple treatments:
|
169 |
+
a significant test for linear ranks", *Journal of the American
|
170 |
+
Statistical Association* 58(301), p. 216--230, 1963.
|
171 |
+
|
172 |
+
.. [2] Markus Neuhauser, *Nonparametric Statistical Test: A computational
|
173 |
+
approach*, CRC Press, p. 150--152, 2012.
|
174 |
+
|
175 |
+
.. [3] Statext LLC, "Page's L Trend Test - Easy Statistics", *Statext -
|
176 |
+
Statistics Study*, https://www.statext.com/practice/PageTrendTest03.php,
|
177 |
+
Accessed July 12, 2020.
|
178 |
+
|
179 |
+
.. [4] "Page's Trend Test", *Wikipedia*, WikimediaFoundation,
|
180 |
+
https://en.wikipedia.org/wiki/Page%27s_trend_test,
|
181 |
+
Accessed July 12, 2020.
|
182 |
+
|
183 |
+
.. [5] Robert E. Odeh, "The exact distribution of Page's L-statistic in
|
184 |
+
the two-way layout", *Communications in Statistics - Simulation and
|
185 |
+
Computation*, 6(1), p. 49--61, 1977.
|
186 |
+
|
187 |
+
Examples
|
188 |
+
--------
|
189 |
+
We use the example from [3]_: 10 students are asked to rate three
|
190 |
+
teaching methods - tutorial, lecture, and seminar - on a scale of 1-5,
|
191 |
+
with 1 being the lowest and 5 being the highest. We have decided that
|
192 |
+
a confidence level of 99% is required to reject the null hypothesis in
|
193 |
+
favor of our alternative: that the seminar will have the highest ratings
|
194 |
+
and the tutorial will have the lowest. Initially, the data have been
|
195 |
+
tabulated with each row representing an individual student's ratings of
|
196 |
+
the three methods in the following order: tutorial, lecture, seminar.
|
197 |
+
|
198 |
+
>>> table = [[3, 4, 3],
|
199 |
+
... [2, 2, 4],
|
200 |
+
... [3, 3, 5],
|
201 |
+
... [1, 3, 2],
|
202 |
+
... [2, 3, 2],
|
203 |
+
... [2, 4, 5],
|
204 |
+
... [1, 2, 4],
|
205 |
+
... [3, 4, 4],
|
206 |
+
... [2, 4, 5],
|
207 |
+
... [1, 3, 4]]
|
208 |
+
|
209 |
+
Because the tutorial is hypothesized to have the lowest ratings, the
|
210 |
+
column corresponding with tutorial rankings should be first; the seminar
|
211 |
+
is hypothesized to have the highest ratings, so its column should be last.
|
212 |
+
Since the columns are already arranged in this order of increasing
|
213 |
+
predicted mean, we can pass the table directly into `page_trend_test`.
|
214 |
+
|
215 |
+
>>> from scipy.stats import page_trend_test
|
216 |
+
>>> res = page_trend_test(table)
|
217 |
+
>>> res
|
218 |
+
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
|
219 |
+
method='exact')
|
220 |
+
|
221 |
+
This *p*-value indicates that there is a 0.1819% chance that
|
222 |
+
the :math:`L` statistic would reach such an extreme value under the null
|
223 |
+
hypothesis. Because 0.1819% is less than 1%, we have evidence to reject
|
224 |
+
the null hypothesis in favor of our alternative at a 99% confidence level.
|
225 |
+
|
226 |
+
The value of the :math:`L` statistic is 133.5. To check this manually,
|
227 |
+
we rank the data such that high scores correspond with high ranks, settling
|
228 |
+
ties with an average rank:
|
229 |
+
|
230 |
+
>>> from scipy.stats import rankdata
|
231 |
+
>>> ranks = rankdata(table, axis=1)
|
232 |
+
>>> ranks
|
233 |
+
array([[1.5, 3. , 1.5],
|
234 |
+
[1.5, 1.5, 3. ],
|
235 |
+
[1.5, 1.5, 3. ],
|
236 |
+
[1. , 3. , 2. ],
|
237 |
+
[1.5, 3. , 1.5],
|
238 |
+
[1. , 2. , 3. ],
|
239 |
+
[1. , 2. , 3. ],
|
240 |
+
[1. , 2.5, 2.5],
|
241 |
+
[1. , 2. , 3. ],
|
242 |
+
[1. , 2. , 3. ]])
|
243 |
+
|
244 |
+
We add the ranks within each column, multiply the sums by the
|
245 |
+
predicted ranks, and sum the products.
|
246 |
+
|
247 |
+
>>> import numpy as np
|
248 |
+
>>> m, n = ranks.shape
|
249 |
+
>>> predicted_ranks = np.arange(1, n+1)
|
250 |
+
>>> L = (predicted_ranks * np.sum(ranks, axis=0)).sum()
|
251 |
+
>>> res.statistic == L
|
252 |
+
True
|
253 |
+
|
254 |
+
As presented in [3]_, the asymptotic approximation of the *p*-value is the
|
255 |
+
survival function of the normal distribution evaluated at the standardized
|
256 |
+
test statistic:
|
257 |
+
|
258 |
+
>>> from scipy.stats import norm
|
259 |
+
>>> E0 = (m*n*(n+1)**2)/4
|
260 |
+
>>> V0 = (m*n**2*(n+1)*(n**2-1))/144
|
261 |
+
>>> Lambda = (L-E0)/np.sqrt(V0)
|
262 |
+
>>> p = norm.sf(Lambda)
|
263 |
+
>>> p
|
264 |
+
0.0012693433690751756
|
265 |
+
|
266 |
+
This does not precisely match the *p*-value reported by `page_trend_test`
|
267 |
+
above. The asymptotic distribution is not very accurate, nor conservative,
|
268 |
+
for :math:`m \leq 12` and :math:`n \leq 8`, so `page_trend_test` chose to
|
269 |
+
use ``method='exact'`` based on the dimensions of the table and the
|
270 |
+
recommendations in Page's original paper [1]_. To override
|
271 |
+
`page_trend_test`'s choice, provide the `method` argument.
|
272 |
+
|
273 |
+
>>> res = page_trend_test(table, method="asymptotic")
|
274 |
+
>>> res
|
275 |
+
PageTrendTestResult(statistic=133.5, pvalue=0.0012693433690751756,
|
276 |
+
method='asymptotic')
|
277 |
+
|
278 |
+
If the data are already ranked, we can pass in the ``ranks`` instead of
|
279 |
+
the ``table`` to save computation time.
|
280 |
+
|
281 |
+
>>> res = page_trend_test(ranks, # ranks of data
|
282 |
+
... ranked=True, # data is already ranked
|
283 |
+
... )
|
284 |
+
>>> res
|
285 |
+
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
|
286 |
+
method='exact')
|
287 |
+
|
288 |
+
Suppose the raw data had been tabulated in an order different from the
|
289 |
+
order of predicted means, say lecture, seminar, tutorial.
|
290 |
+
|
291 |
+
>>> table = np.asarray(table)[:, [1, 2, 0]]
|
292 |
+
|
293 |
+
Since the arrangement of this table is not consistent with the assumed
|
294 |
+
ordering, we can either rearrange the table or provide the
|
295 |
+
`predicted_ranks`. Remembering that the lecture is predicted
|
296 |
+
to have the middle rank, the seminar the highest, and tutorial the lowest,
|
297 |
+
we pass:
|
298 |
+
|
299 |
+
>>> res = page_trend_test(table, # data as originally tabulated
|
300 |
+
... predicted_ranks=[2, 3, 1], # our predicted order
|
301 |
+
... )
|
302 |
+
>>> res
|
303 |
+
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
|
304 |
+
method='exact')
|
305 |
+
|
306 |
+
"""
|
307 |
+
|
308 |
+
# Possible values of the method parameter and the corresponding function
|
309 |
+
# used to evaluate the p value
|
310 |
+
methods = {"asymptotic": _l_p_asymptotic,
|
311 |
+
"exact": _l_p_exact,
|
312 |
+
"auto": None}
|
313 |
+
if method not in methods:
|
314 |
+
raise ValueError(f"`method` must be in {set(methods)}")
|
315 |
+
|
316 |
+
ranks = np.asarray(data)
|
317 |
+
if ranks.ndim != 2: # TODO: relax this to accept 3d arrays?
|
318 |
+
raise ValueError("`data` must be a 2d array.")
|
319 |
+
|
320 |
+
m, n = ranks.shape
|
321 |
+
if m < 2 or n < 3:
|
322 |
+
raise ValueError("Page's L is only appropriate for data with two "
|
323 |
+
"or more rows and three or more columns.")
|
324 |
+
|
325 |
+
if np.any(np.isnan(data)):
|
326 |
+
raise ValueError("`data` contains NaNs, which cannot be ranked "
|
327 |
+
"meaningfully")
|
328 |
+
|
329 |
+
# ensure NumPy array and rank the data if it's not already ranked
|
330 |
+
if ranked:
|
331 |
+
# Only a basic check on whether data is ranked. Checking that the data
|
332 |
+
# is properly ranked could take as much time as ranking it.
|
333 |
+
if not (ranks.min() >= 1 and ranks.max() <= ranks.shape[1]):
|
334 |
+
raise ValueError("`data` is not properly ranked. Rank the data or "
|
335 |
+
"pass `ranked=False`.")
|
336 |
+
else:
|
337 |
+
ranks = scipy.stats.rankdata(data, axis=-1)
|
338 |
+
|
339 |
+
# generate predicted ranks if not provided, ensure valid NumPy array
|
340 |
+
if predicted_ranks is None:
|
341 |
+
predicted_ranks = np.arange(1, n+1)
|
342 |
+
else:
|
343 |
+
predicted_ranks = np.asarray(predicted_ranks)
|
344 |
+
if (predicted_ranks.ndim < 1 or
|
345 |
+
(set(predicted_ranks) != set(range(1, n+1)) or
|
346 |
+
len(predicted_ranks) != n)):
|
347 |
+
raise ValueError(f"`predicted_ranks` must include each integer "
|
348 |
+
f"from 1 to {n} (the number of columns in "
|
349 |
+
f"`data`) exactly once.")
|
350 |
+
|
351 |
+
if not isinstance(ranked, bool):
|
352 |
+
raise TypeError("`ranked` must be boolean.")
|
353 |
+
|
354 |
+
# Calculate the L statistic
|
355 |
+
L = _l_vectorized(ranks, predicted_ranks)
|
356 |
+
|
357 |
+
# Calculate the p-value
|
358 |
+
if method == "auto":
|
359 |
+
method = _choose_method(ranks)
|
360 |
+
p_fun = methods[method] # get the function corresponding with the method
|
361 |
+
p = p_fun(L, m, n)
|
362 |
+
|
363 |
+
page_result = PageTrendTestResult(statistic=L, pvalue=p, method=method)
|
364 |
+
return page_result
|
365 |
+
|
366 |
+
|
367 |
+
def _choose_method(ranks):
|
368 |
+
'''Choose method for computing p-value automatically'''
|
369 |
+
m, n = ranks.shape
|
370 |
+
if n > 8 or (m > 12 and n > 3) or m > 20: # as in [1], [4]
|
371 |
+
method = "asymptotic"
|
372 |
+
else:
|
373 |
+
method = "exact"
|
374 |
+
return method
|
375 |
+
|
376 |
+
|
377 |
+
def _l_vectorized(ranks, predicted_ranks):
|
378 |
+
'''Calculate's Page's L statistic for each page of a 3d array'''
|
379 |
+
colsums = ranks.sum(axis=-2, keepdims=True)
|
380 |
+
products = predicted_ranks * colsums
|
381 |
+
Ls = products.sum(axis=-1)
|
382 |
+
Ls = Ls[0] if Ls.size == 1 else Ls.ravel()
|
383 |
+
return Ls
|
384 |
+
|
385 |
+
|
386 |
+
def _l_p_asymptotic(L, m, n):
|
387 |
+
'''Calculate the p-value of Page's L from the asymptotic distribution'''
|
388 |
+
# Using [1] as a reference, the asymptotic p-value would be calculated as:
|
389 |
+
# chi_L = (12*L - 3*m*n*(n+1)**2)**2/(m*n**2*(n**2-1)*(n+1))
|
390 |
+
# p = chi2.sf(chi_L, df=1, loc=0, scale=1)/2
|
391 |
+
# but this is insensitive to the direction of the hypothesized ranking
|
392 |
+
|
393 |
+
# See [2] page 151
|
394 |
+
E0 = (m*n*(n+1)**2)/4
|
395 |
+
V0 = (m*n**2*(n+1)*(n**2-1))/144
|
396 |
+
Lambda = (L-E0)/np.sqrt(V0)
|
397 |
+
# This is a one-sided "greater" test - calculate the probability that the
|
398 |
+
# L statistic under H0 would be greater than the observed L statistic
|
399 |
+
p = norm.sf(Lambda)
|
400 |
+
return p
|
401 |
+
|
402 |
+
|
403 |
+
def _l_p_exact(L, m, n):
|
404 |
+
'''Calculate the p-value of Page's L exactly'''
|
405 |
+
# [1] uses m, n; [5] uses n, k.
|
406 |
+
# Switch convention here because exact calculation code references [5].
|
407 |
+
L, n, k = int(L), int(m), int(n)
|
408 |
+
_pagel_state.set_k(k)
|
409 |
+
return _pagel_state.sf(L, n)
|
410 |
+
|
411 |
+
|
412 |
+
class _PageL:
|
413 |
+
'''Maintains state between `page_trend_test` executions'''
|
414 |
+
|
415 |
+
def __init__(self):
|
416 |
+
'''Lightweight initialization'''
|
417 |
+
self.all_pmfs = {}
|
418 |
+
|
419 |
+
def set_k(self, k):
|
420 |
+
'''Calculate lower and upper limits of L for single row'''
|
421 |
+
self.k = k
|
422 |
+
# See [5] top of page 52
|
423 |
+
self.a, self.b = (k*(k+1)*(k+2))//6, (k*(k+1)*(2*k+1))//6
|
424 |
+
|
425 |
+
def sf(self, l, n):
|
426 |
+
'''Survival function of Page's L statistic'''
|
427 |
+
ps = [self.pmf(l, n) for l in range(l, n*self.b + 1)]
|
428 |
+
return np.sum(ps)
|
429 |
+
|
430 |
+
def p_l_k_1(self):
|
431 |
+
'''Relative frequency of each L value over all possible single rows'''
|
432 |
+
|
433 |
+
# See [5] Equation (6)
|
434 |
+
ranks = range(1, self.k+1)
|
435 |
+
# generate all possible rows of length k
|
436 |
+
rank_perms = np.array(list(permutations(ranks)))
|
437 |
+
# compute Page's L for all possible rows
|
438 |
+
Ls = (ranks*rank_perms).sum(axis=1)
|
439 |
+
# count occurrences of each L value
|
440 |
+
counts = np.histogram(Ls, np.arange(self.a-0.5, self.b+1.5))[0]
|
441 |
+
# factorial(k) is number of possible permutations
|
442 |
+
return counts/math.factorial(self.k)
|
443 |
+
|
444 |
+
def pmf(self, l, n):
|
445 |
+
'''Recursive function to evaluate p(l, k, n); see [5] Equation 1'''
|
446 |
+
|
447 |
+
if n not in self.all_pmfs:
|
448 |
+
self.all_pmfs[n] = {}
|
449 |
+
if self.k not in self.all_pmfs[n]:
|
450 |
+
self.all_pmfs[n][self.k] = {}
|
451 |
+
|
452 |
+
# Cache results to avoid repeating calculation. Initially this was
|
453 |
+
# written with lru_cache, but this seems faster? Also, we could add
|
454 |
+
# an option to save this for future lookup.
|
455 |
+
if l in self.all_pmfs[n][self.k]:
|
456 |
+
return self.all_pmfs[n][self.k][l]
|
457 |
+
|
458 |
+
if n == 1:
|
459 |
+
ps = self.p_l_k_1() # [5] Equation 6
|
460 |
+
ls = range(self.a, self.b+1)
|
461 |
+
# not fast, but we'll only be here once
|
462 |
+
self.all_pmfs[n][self.k] = {l: p for l, p in zip(ls, ps)}
|
463 |
+
return self.all_pmfs[n][self.k][l]
|
464 |
+
|
465 |
+
p = 0
|
466 |
+
low = max(l-(n-1)*self.b, self.a) # [5] Equation 2
|
467 |
+
high = min(l-(n-1)*self.a, self.b)
|
468 |
+
|
469 |
+
# [5] Equation 1
|
470 |
+
for t in range(low, high+1):
|
471 |
+
p1 = self.pmf(l-t, n-1)
|
472 |
+
p2 = self.pmf(t, 1)
|
473 |
+
p += p1*p2
|
474 |
+
self.all_pmfs[n][self.k][l] = p
|
475 |
+
return p
|
476 |
+
|
477 |
+
|
478 |
+
# Maintain state for faster repeat calls to page_trend_test w/ method='exact'
|
479 |
+
_pagel_state = _PageL()
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmc.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy._lib._util import DecimalNumber, IntNumber
|
3 |
+
|
4 |
+
|
5 |
+
def _cy_wrapper_centered_discrepancy(
|
6 |
+
sample: np.ndarray,
|
7 |
+
iterative: bool,
|
8 |
+
workers: IntNumber,
|
9 |
+
) -> float: ...
|
10 |
+
|
11 |
+
|
12 |
+
def _cy_wrapper_wrap_around_discrepancy(
|
13 |
+
sample: np.ndarray,
|
14 |
+
iterative: bool,
|
15 |
+
workers: IntNumber,
|
16 |
+
) -> float: ...
|
17 |
+
|
18 |
+
|
19 |
+
def _cy_wrapper_mixture_discrepancy(
|
20 |
+
sample: np.ndarray,
|
21 |
+
iterative: bool,
|
22 |
+
workers: IntNumber,
|
23 |
+
) -> float: ...
|
24 |
+
|
25 |
+
|
26 |
+
def _cy_wrapper_l2_star_discrepancy(
|
27 |
+
sample: np.ndarray,
|
28 |
+
iterative: bool,
|
29 |
+
workers: IntNumber,
|
30 |
+
) -> float: ...
|
31 |
+
|
32 |
+
|
33 |
+
def _cy_wrapper_update_discrepancy(
|
34 |
+
x_new_view: np.ndarray,
|
35 |
+
sample_view: np.ndarray,
|
36 |
+
initial_disc: DecimalNumber,
|
37 |
+
) -> float: ...
|
38 |
+
|
39 |
+
|
40 |
+
def _cy_van_der_corput(
|
41 |
+
n: IntNumber,
|
42 |
+
base: IntNumber,
|
43 |
+
start_index: IntNumber,
|
44 |
+
workers: IntNumber,
|
45 |
+
) -> np.ndarray: ...
|
46 |
+
|
47 |
+
|
48 |
+
def _cy_van_der_corput_scrambled(
|
49 |
+
n: IntNumber,
|
50 |
+
base: IntNumber,
|
51 |
+
start_index: IntNumber,
|
52 |
+
permutations: np.ndarray,
|
53 |
+
workers: IntNumber,
|
54 |
+
) -> np.ndarray: ...
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_relative_risk.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
from dataclasses import dataclass
|
3 |
+
import numpy as np
|
4 |
+
from scipy.special import ndtri
|
5 |
+
from ._common import ConfidenceInterval
|
6 |
+
|
7 |
+
|
8 |
+
def _validate_int(n, bound, name):
|
9 |
+
msg = f'{name} must be an integer not less than {bound}, but got {n!r}'
|
10 |
+
try:
|
11 |
+
n = operator.index(n)
|
12 |
+
except TypeError:
|
13 |
+
raise TypeError(msg) from None
|
14 |
+
if n < bound:
|
15 |
+
raise ValueError(msg)
|
16 |
+
return n
|
17 |
+
|
18 |
+
|
19 |
+
@dataclass
|
20 |
+
class RelativeRiskResult:
|
21 |
+
"""
|
22 |
+
Result of `scipy.stats.contingency.relative_risk`.
|
23 |
+
|
24 |
+
Attributes
|
25 |
+
----------
|
26 |
+
relative_risk : float
|
27 |
+
This is::
|
28 |
+
|
29 |
+
(exposed_cases/exposed_total) / (control_cases/control_total)
|
30 |
+
|
31 |
+
exposed_cases : int
|
32 |
+
The number of "cases" (i.e. occurrence of disease or other event
|
33 |
+
of interest) among the sample of "exposed" individuals.
|
34 |
+
exposed_total : int
|
35 |
+
The total number of "exposed" individuals in the sample.
|
36 |
+
control_cases : int
|
37 |
+
The number of "cases" among the sample of "control" or non-exposed
|
38 |
+
individuals.
|
39 |
+
control_total : int
|
40 |
+
The total number of "control" individuals in the sample.
|
41 |
+
|
42 |
+
Methods
|
43 |
+
-------
|
44 |
+
confidence_interval :
|
45 |
+
Compute the confidence interval for the relative risk estimate.
|
46 |
+
"""
|
47 |
+
|
48 |
+
relative_risk: float
|
49 |
+
exposed_cases: int
|
50 |
+
exposed_total: int
|
51 |
+
control_cases: int
|
52 |
+
control_total: int
|
53 |
+
|
54 |
+
def confidence_interval(self, confidence_level=0.95):
|
55 |
+
"""
|
56 |
+
Compute the confidence interval for the relative risk.
|
57 |
+
|
58 |
+
The confidence interval is computed using the Katz method
|
59 |
+
(i.e. "Method C" of [1]_; see also [2]_, section 3.1.2).
|
60 |
+
|
61 |
+
Parameters
|
62 |
+
----------
|
63 |
+
confidence_level : float, optional
|
64 |
+
The confidence level to use for the confidence interval.
|
65 |
+
Default is 0.95.
|
66 |
+
|
67 |
+
Returns
|
68 |
+
-------
|
69 |
+
ci : ConfidenceInterval instance
|
70 |
+
The return value is an object with attributes ``low`` and
|
71 |
+
``high`` that hold the confidence interval.
|
72 |
+
|
73 |
+
References
|
74 |
+
----------
|
75 |
+
.. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining
|
76 |
+
confidence intervals for the risk ratio in cohort studies",
|
77 |
+
Biometrics, 34, 469-474 (1978).
|
78 |
+
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
|
79 |
+
CRC Press LLC, Boca Raton, FL, USA (1996).
|
80 |
+
|
81 |
+
|
82 |
+
Examples
|
83 |
+
--------
|
84 |
+
>>> from scipy.stats.contingency import relative_risk
|
85 |
+
>>> result = relative_risk(exposed_cases=10, exposed_total=75,
|
86 |
+
... control_cases=12, control_total=225)
|
87 |
+
>>> result.relative_risk
|
88 |
+
2.5
|
89 |
+
>>> result.confidence_interval()
|
90 |
+
ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033)
|
91 |
+
"""
|
92 |
+
if not 0 <= confidence_level <= 1:
|
93 |
+
raise ValueError('confidence_level must be in the interval '
|
94 |
+
'[0, 1].')
|
95 |
+
|
96 |
+
# Handle edge cases where either exposed_cases or control_cases
|
97 |
+
# is zero. We follow the convention of the R function riskratio
|
98 |
+
# from the epitools library.
|
99 |
+
if self.exposed_cases == 0 and self.control_cases == 0:
|
100 |
+
# relative risk is nan.
|
101 |
+
return ConfidenceInterval(low=np.nan, high=np.nan)
|
102 |
+
elif self.exposed_cases == 0:
|
103 |
+
# relative risk is 0.
|
104 |
+
return ConfidenceInterval(low=0.0, high=np.nan)
|
105 |
+
elif self.control_cases == 0:
|
106 |
+
# relative risk is inf
|
107 |
+
return ConfidenceInterval(low=np.nan, high=np.inf)
|
108 |
+
|
109 |
+
alpha = 1 - confidence_level
|
110 |
+
z = ndtri(1 - alpha/2)
|
111 |
+
rr = self.relative_risk
|
112 |
+
|
113 |
+
# Estimate of the variance of log(rr) is
|
114 |
+
# var(log(rr)) = 1/exposed_cases - 1/exposed_total +
|
115 |
+
# 1/control_cases - 1/control_total
|
116 |
+
# and the standard error is the square root of that.
|
117 |
+
se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total +
|
118 |
+
1/self.control_cases - 1/self.control_total)
|
119 |
+
delta = z*se
|
120 |
+
katz_lo = rr*np.exp(-delta)
|
121 |
+
katz_hi = rr*np.exp(delta)
|
122 |
+
return ConfidenceInterval(low=katz_lo, high=katz_hi)
|
123 |
+
|
124 |
+
|
125 |
+
def relative_risk(exposed_cases, exposed_total, control_cases, control_total):
|
126 |
+
"""
|
127 |
+
Compute the relative risk (also known as the risk ratio).
|
128 |
+
|
129 |
+
This function computes the relative risk associated with a 2x2
|
130 |
+
contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead
|
131 |
+
of accepting a table as an argument, the individual numbers that are
|
132 |
+
used to compute the relative risk are given as separate parameters.
|
133 |
+
This is to avoid the ambiguity of which row or column of the contingency
|
134 |
+
table corresponds to the "exposed" cases and which corresponds to the
|
135 |
+
"control" cases. Unlike, say, the odds ratio, the relative risk is not
|
136 |
+
invariant under an interchange of the rows or columns.
|
137 |
+
|
138 |
+
Parameters
|
139 |
+
----------
|
140 |
+
exposed_cases : nonnegative int
|
141 |
+
The number of "cases" (i.e. occurrence of disease or other event
|
142 |
+
of interest) among the sample of "exposed" individuals.
|
143 |
+
exposed_total : positive int
|
144 |
+
The total number of "exposed" individuals in the sample.
|
145 |
+
control_cases : nonnegative int
|
146 |
+
The number of "cases" among the sample of "control" or non-exposed
|
147 |
+
individuals.
|
148 |
+
control_total : positive int
|
149 |
+
The total number of "control" individuals in the sample.
|
150 |
+
|
151 |
+
Returns
|
152 |
+
-------
|
153 |
+
result : instance of `~scipy.stats._result_classes.RelativeRiskResult`
|
154 |
+
The object has the float attribute ``relative_risk``, which is::
|
155 |
+
|
156 |
+
rr = (exposed_cases/exposed_total) / (control_cases/control_total)
|
157 |
+
|
158 |
+
The object also has the method ``confidence_interval`` to compute
|
159 |
+
the confidence interval of the relative risk for a given confidence
|
160 |
+
level.
|
161 |
+
|
162 |
+
See Also
|
163 |
+
--------
|
164 |
+
odds_ratio
|
165 |
+
|
166 |
+
Notes
|
167 |
+
-----
|
168 |
+
The R package epitools has the function `riskratio`, which accepts
|
169 |
+
a table with the following layout::
|
170 |
+
|
171 |
+
disease=0 disease=1
|
172 |
+
exposed=0 (ref) n00 n01
|
173 |
+
exposed=1 n10 n11
|
174 |
+
|
175 |
+
With a 2x2 table in the above format, the estimate of the CI is
|
176 |
+
computed by `riskratio` when the argument method="wald" is given,
|
177 |
+
or with the function `riskratio.wald`.
|
178 |
+
|
179 |
+
For example, in a test of the incidence of lung cancer among a
|
180 |
+
sample of smokers and nonsmokers, the "exposed" category would
|
181 |
+
correspond to "is a smoker" and the "disease" category would
|
182 |
+
correspond to "has or had lung cancer".
|
183 |
+
|
184 |
+
To pass the same data to ``relative_risk``, use::
|
185 |
+
|
186 |
+
relative_risk(n11, n10 + n11, n01, n00 + n01)
|
187 |
+
|
188 |
+
.. versionadded:: 1.7.0
|
189 |
+
|
190 |
+
References
|
191 |
+
----------
|
192 |
+
.. [1] Alan Agresti, An Introduction to Categorical Data Analysis
|
193 |
+
(second edition), Wiley, Hoboken, NJ, USA (2007).
|
194 |
+
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
|
195 |
+
CRC Press LLC, Boca Raton, FL, USA (1996).
|
196 |
+
|
197 |
+
Examples
|
198 |
+
--------
|
199 |
+
>>> from scipy.stats.contingency import relative_risk
|
200 |
+
|
201 |
+
This example is from Example 3.1 of [2]_. The results of a heart
|
202 |
+
disease study are summarized in the following table::
|
203 |
+
|
204 |
+
High CAT Low CAT Total
|
205 |
+
-------- ------- -----
|
206 |
+
CHD 27 44 71
|
207 |
+
No CHD 95 443 538
|
208 |
+
|
209 |
+
Total 122 487 609
|
210 |
+
|
211 |
+
CHD is coronary heart disease, and CAT refers to the level of
|
212 |
+
circulating catecholamine. CAT is the "exposure" variable, and
|
213 |
+
high CAT is the "exposed" category. So the data from the table
|
214 |
+
to be passed to ``relative_risk`` is::
|
215 |
+
|
216 |
+
exposed_cases = 27
|
217 |
+
exposed_total = 122
|
218 |
+
control_cases = 44
|
219 |
+
control_total = 487
|
220 |
+
|
221 |
+
>>> result = relative_risk(27, 122, 44, 487)
|
222 |
+
>>> result.relative_risk
|
223 |
+
2.4495156482861398
|
224 |
+
|
225 |
+
Find the confidence interval for the relative risk.
|
226 |
+
|
227 |
+
>>> result.confidence_interval(confidence_level=0.95)
|
228 |
+
ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354)
|
229 |
+
|
230 |
+
The interval does not contain 1, so the data supports the statement
|
231 |
+
that high CAT is associated with greater risk of CHD.
|
232 |
+
"""
|
233 |
+
# Relative risk is a trivial calculation. The nontrivial part is in the
|
234 |
+
# `confidence_interval` method of the RelativeRiskResult class.
|
235 |
+
|
236 |
+
exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases")
|
237 |
+
exposed_total = _validate_int(exposed_total, 1, "exposed_total")
|
238 |
+
control_cases = _validate_int(control_cases, 0, "control_cases")
|
239 |
+
control_total = _validate_int(control_total, 1, "control_total")
|
240 |
+
|
241 |
+
if exposed_cases > exposed_total:
|
242 |
+
raise ValueError('exposed_cases must not exceed exposed_total.')
|
243 |
+
if control_cases > control_total:
|
244 |
+
raise ValueError('control_cases must not exceed control_total.')
|
245 |
+
|
246 |
+
if exposed_cases == 0 and control_cases == 0:
|
247 |
+
# relative risk is 0/0.
|
248 |
+
rr = np.nan
|
249 |
+
elif exposed_cases == 0:
|
250 |
+
# relative risk is 0/nonzero
|
251 |
+
rr = 0.0
|
252 |
+
elif control_cases == 0:
|
253 |
+
# relative risk is nonzero/0.
|
254 |
+
rr = np.inf
|
255 |
+
else:
|
256 |
+
p1 = exposed_cases / exposed_total
|
257 |
+
p2 = control_cases / control_total
|
258 |
+
rr = p1 / p2
|
259 |
+
return RelativeRiskResult(relative_risk=rr,
|
260 |
+
exposed_cases=exposed_cases,
|
261 |
+
exposed_total=exposed_total,
|
262 |
+
control_cases=control_cases,
|
263 |
+
control_total=control_total)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_resampling.py
ADDED
@@ -0,0 +1,1870 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import warnings
|
4 |
+
import numpy as np
|
5 |
+
from itertools import combinations, permutations, product
|
6 |
+
from collections.abc import Sequence
|
7 |
+
import inspect
|
8 |
+
|
9 |
+
from scipy._lib._util import check_random_state, _rename_parameter
|
10 |
+
from scipy.special import ndtr, ndtri, comb, factorial
|
11 |
+
from scipy._lib._util import rng_integers
|
12 |
+
from dataclasses import dataclass
|
13 |
+
from ._common import ConfidenceInterval
|
14 |
+
from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays
|
15 |
+
from ._warnings_errors import DegenerateDataWarning
|
16 |
+
|
17 |
+
__all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test']
|
18 |
+
|
19 |
+
|
20 |
+
def _vectorize_statistic(statistic):
|
21 |
+
"""Vectorize an n-sample statistic"""
|
22 |
+
# This is a little cleaner than np.nditer at the expense of some data
|
23 |
+
# copying: concatenate samples together, then use np.apply_along_axis
|
24 |
+
def stat_nd(*data, axis=0):
|
25 |
+
lengths = [sample.shape[axis] for sample in data]
|
26 |
+
split_indices = np.cumsum(lengths)[:-1]
|
27 |
+
z = _broadcast_concatenate(data, axis)
|
28 |
+
|
29 |
+
# move working axis to position 0 so that new dimensions in the output
|
30 |
+
# of `statistic` are _prepended_. ("This axis is removed, and replaced
|
31 |
+
# with new dimensions...")
|
32 |
+
z = np.moveaxis(z, axis, 0)
|
33 |
+
|
34 |
+
def stat_1d(z):
|
35 |
+
data = np.split(z, split_indices)
|
36 |
+
return statistic(*data)
|
37 |
+
|
38 |
+
return np.apply_along_axis(stat_1d, 0, z)[()]
|
39 |
+
return stat_nd
|
40 |
+
|
41 |
+
|
42 |
+
def _jackknife_resample(sample, batch=None):
|
43 |
+
"""Jackknife resample the sample. Only one-sample stats for now."""
|
44 |
+
n = sample.shape[-1]
|
45 |
+
batch_nominal = batch or n
|
46 |
+
|
47 |
+
for k in range(0, n, batch_nominal):
|
48 |
+
# col_start:col_end are the observations to remove
|
49 |
+
batch_actual = min(batch_nominal, n-k)
|
50 |
+
|
51 |
+
# jackknife - each row leaves out one observation
|
52 |
+
j = np.ones((batch_actual, n), dtype=bool)
|
53 |
+
np.fill_diagonal(j[:, k:k+batch_actual], False)
|
54 |
+
i = np.arange(n)
|
55 |
+
i = np.broadcast_to(i, (batch_actual, n))
|
56 |
+
i = i[j].reshape((batch_actual, n-1))
|
57 |
+
|
58 |
+
resamples = sample[..., i]
|
59 |
+
yield resamples
|
60 |
+
|
61 |
+
|
62 |
+
def _bootstrap_resample(sample, n_resamples=None, random_state=None):
|
63 |
+
"""Bootstrap resample the sample."""
|
64 |
+
n = sample.shape[-1]
|
65 |
+
|
66 |
+
# bootstrap - each row is a random resample of original observations
|
67 |
+
i = rng_integers(random_state, 0, n, (n_resamples, n))
|
68 |
+
|
69 |
+
resamples = sample[..., i]
|
70 |
+
return resamples
|
71 |
+
|
72 |
+
|
73 |
+
def _percentile_of_score(a, score, axis):
|
74 |
+
"""Vectorized, simplified `scipy.stats.percentileofscore`.
|
75 |
+
Uses logic of the 'mean' value of percentileofscore's kind parameter.
|
76 |
+
|
77 |
+
Unlike `stats.percentileofscore`, the percentile returned is a fraction
|
78 |
+
in [0, 1].
|
79 |
+
"""
|
80 |
+
B = a.shape[axis]
|
81 |
+
return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B)
|
82 |
+
|
83 |
+
|
84 |
+
def _percentile_along_axis(theta_hat_b, alpha):
|
85 |
+
"""`np.percentile` with different percentile for each slice."""
|
86 |
+
# the difference between _percentile_along_axis and np.percentile is that
|
87 |
+
# np.percentile gets _all_ the qs for each axis slice, whereas
|
88 |
+
# _percentile_along_axis gets the q corresponding with each axis slice
|
89 |
+
shape = theta_hat_b.shape[:-1]
|
90 |
+
alpha = np.broadcast_to(alpha, shape)
|
91 |
+
percentiles = np.zeros_like(alpha, dtype=np.float64)
|
92 |
+
for indices, alpha_i in np.ndenumerate(alpha):
|
93 |
+
if np.isnan(alpha_i):
|
94 |
+
# e.g. when bootstrap distribution has only one unique element
|
95 |
+
msg = (
|
96 |
+
"The BCa confidence interval cannot be calculated."
|
97 |
+
" This problem is known to occur when the distribution"
|
98 |
+
" is degenerate or the statistic is np.min."
|
99 |
+
)
|
100 |
+
warnings.warn(DegenerateDataWarning(msg), stacklevel=3)
|
101 |
+
percentiles[indices] = np.nan
|
102 |
+
else:
|
103 |
+
theta_hat_b_i = theta_hat_b[indices]
|
104 |
+
percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i)
|
105 |
+
return percentiles[()] # return scalar instead of 0d array
|
106 |
+
|
107 |
+
|
108 |
+
def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch):
|
109 |
+
"""Bias-corrected and accelerated interval."""
|
110 |
+
# closely follows [1] 14.3 and 15.4 (Eq. 15.36)
|
111 |
+
|
112 |
+
# calculate z0_hat
|
113 |
+
theta_hat = np.asarray(statistic(*data, axis=axis))[..., None]
|
114 |
+
percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1)
|
115 |
+
z0_hat = ndtri(percentile)
|
116 |
+
|
117 |
+
# calculate a_hat
|
118 |
+
theta_hat_ji = [] # j is for sample of data, i is for jackknife resample
|
119 |
+
for j, sample in enumerate(data):
|
120 |
+
# _jackknife_resample will add an axis prior to the last axis that
|
121 |
+
# corresponds with the different jackknife resamples. Do the same for
|
122 |
+
# each sample of the data to ensure broadcastability. We need to
|
123 |
+
# create a copy of the list containing the samples anyway, so do this
|
124 |
+
# in the loop to simplify the code. This is not the bottleneck...
|
125 |
+
samples = [np.expand_dims(sample, -2) for sample in data]
|
126 |
+
theta_hat_i = []
|
127 |
+
for jackknife_sample in _jackknife_resample(sample, batch):
|
128 |
+
samples[j] = jackknife_sample
|
129 |
+
broadcasted = _broadcast_arrays(samples, axis=-1)
|
130 |
+
theta_hat_i.append(statistic(*broadcasted, axis=-1))
|
131 |
+
theta_hat_ji.append(theta_hat_i)
|
132 |
+
|
133 |
+
theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1)
|
134 |
+
for theta_hat_i in theta_hat_ji]
|
135 |
+
|
136 |
+
n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji]
|
137 |
+
|
138 |
+
theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True)
|
139 |
+
for theta_hat_i in theta_hat_ji]
|
140 |
+
|
141 |
+
U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i)
|
142 |
+
for theta_hat_dot, theta_hat_i, n
|
143 |
+
in zip(theta_hat_j_dot, theta_hat_ji, n_j)]
|
144 |
+
|
145 |
+
nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)]
|
146 |
+
dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)]
|
147 |
+
a_hat = 1/6 * sum(nums) / sum(dens)**(3/2)
|
148 |
+
|
149 |
+
# calculate alpha_1, alpha_2
|
150 |
+
z_alpha = ndtri(alpha)
|
151 |
+
z_1alpha = -z_alpha
|
152 |
+
num1 = z0_hat + z_alpha
|
153 |
+
alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1))
|
154 |
+
num2 = z0_hat + z_1alpha
|
155 |
+
alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2))
|
156 |
+
return alpha_1, alpha_2, a_hat # return a_hat for testing
|
157 |
+
|
158 |
+
|
159 |
+
def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level,
|
160 |
+
alternative, n_resamples, batch, method, bootstrap_result,
|
161 |
+
random_state):
|
162 |
+
"""Input validation and standardization for `bootstrap`."""
|
163 |
+
|
164 |
+
if vectorized not in {True, False, None}:
|
165 |
+
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
|
166 |
+
|
167 |
+
if vectorized is None:
|
168 |
+
vectorized = 'axis' in inspect.signature(statistic).parameters
|
169 |
+
|
170 |
+
if not vectorized:
|
171 |
+
statistic = _vectorize_statistic(statistic)
|
172 |
+
|
173 |
+
axis_int = int(axis)
|
174 |
+
if axis != axis_int:
|
175 |
+
raise ValueError("`axis` must be an integer.")
|
176 |
+
|
177 |
+
n_samples = 0
|
178 |
+
try:
|
179 |
+
n_samples = len(data)
|
180 |
+
except TypeError:
|
181 |
+
raise ValueError("`data` must be a sequence of samples.")
|
182 |
+
|
183 |
+
if n_samples == 0:
|
184 |
+
raise ValueError("`data` must contain at least one sample.")
|
185 |
+
|
186 |
+
data_iv = []
|
187 |
+
for sample in data:
|
188 |
+
sample = np.atleast_1d(sample)
|
189 |
+
if sample.shape[axis_int] <= 1:
|
190 |
+
raise ValueError("each sample in `data` must contain two or more "
|
191 |
+
"observations along `axis`.")
|
192 |
+
sample = np.moveaxis(sample, axis_int, -1)
|
193 |
+
data_iv.append(sample)
|
194 |
+
|
195 |
+
if paired not in {True, False}:
|
196 |
+
raise ValueError("`paired` must be `True` or `False`.")
|
197 |
+
|
198 |
+
if paired:
|
199 |
+
n = data_iv[0].shape[-1]
|
200 |
+
for sample in data_iv[1:]:
|
201 |
+
if sample.shape[-1] != n:
|
202 |
+
message = ("When `paired is True`, all samples must have the "
|
203 |
+
"same length along `axis`")
|
204 |
+
raise ValueError(message)
|
205 |
+
|
206 |
+
# to generate the bootstrap distribution for paired-sample statistics,
|
207 |
+
# resample the indices of the observations
|
208 |
+
def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic):
|
209 |
+
data = [sample[..., i] for sample in data]
|
210 |
+
return unpaired_statistic(*data, axis=axis)
|
211 |
+
|
212 |
+
data_iv = [np.arange(n)]
|
213 |
+
|
214 |
+
confidence_level_float = float(confidence_level)
|
215 |
+
|
216 |
+
alternative = alternative.lower()
|
217 |
+
alternatives = {'two-sided', 'less', 'greater'}
|
218 |
+
if alternative not in alternatives:
|
219 |
+
raise ValueError(f"`alternative` must be one of {alternatives}")
|
220 |
+
|
221 |
+
n_resamples_int = int(n_resamples)
|
222 |
+
if n_resamples != n_resamples_int or n_resamples_int < 0:
|
223 |
+
raise ValueError("`n_resamples` must be a non-negative integer.")
|
224 |
+
|
225 |
+
if batch is None:
|
226 |
+
batch_iv = batch
|
227 |
+
else:
|
228 |
+
batch_iv = int(batch)
|
229 |
+
if batch != batch_iv or batch_iv <= 0:
|
230 |
+
raise ValueError("`batch` must be a positive integer or None.")
|
231 |
+
|
232 |
+
methods = {'percentile', 'basic', 'bca'}
|
233 |
+
method = method.lower()
|
234 |
+
if method not in methods:
|
235 |
+
raise ValueError(f"`method` must be in {methods}")
|
236 |
+
|
237 |
+
message = "`bootstrap_result` must have attribute `bootstrap_distribution'"
|
238 |
+
if (bootstrap_result is not None
|
239 |
+
and not hasattr(bootstrap_result, "bootstrap_distribution")):
|
240 |
+
raise ValueError(message)
|
241 |
+
|
242 |
+
message = ("Either `bootstrap_result.bootstrap_distribution.size` or "
|
243 |
+
"`n_resamples` must be positive.")
|
244 |
+
if ((not bootstrap_result or
|
245 |
+
not bootstrap_result.bootstrap_distribution.size)
|
246 |
+
and n_resamples_int == 0):
|
247 |
+
raise ValueError(message)
|
248 |
+
|
249 |
+
random_state = check_random_state(random_state)
|
250 |
+
|
251 |
+
return (data_iv, statistic, vectorized, paired, axis_int,
|
252 |
+
confidence_level_float, alternative, n_resamples_int, batch_iv,
|
253 |
+
method, bootstrap_result, random_state)
|
254 |
+
|
255 |
+
|
256 |
+
@dataclass
|
257 |
+
class BootstrapResult:
|
258 |
+
"""Result object returned by `scipy.stats.bootstrap`.
|
259 |
+
|
260 |
+
Attributes
|
261 |
+
----------
|
262 |
+
confidence_interval : ConfidenceInterval
|
263 |
+
The bootstrap confidence interval as an instance of
|
264 |
+
`collections.namedtuple` with attributes `low` and `high`.
|
265 |
+
bootstrap_distribution : ndarray
|
266 |
+
The bootstrap distribution, that is, the value of `statistic` for
|
267 |
+
each resample. The last dimension corresponds with the resamples
|
268 |
+
(e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``).
|
269 |
+
standard_error : float or ndarray
|
270 |
+
The bootstrap standard error, that is, the sample standard
|
271 |
+
deviation of the bootstrap distribution.
|
272 |
+
|
273 |
+
"""
|
274 |
+
confidence_interval: ConfidenceInterval
|
275 |
+
bootstrap_distribution: np.ndarray
|
276 |
+
standard_error: float | np.ndarray
|
277 |
+
|
278 |
+
|
279 |
+
def bootstrap(data, statistic, *, n_resamples=9999, batch=None,
|
280 |
+
vectorized=None, paired=False, axis=0, confidence_level=0.95,
|
281 |
+
alternative='two-sided', method='BCa', bootstrap_result=None,
|
282 |
+
random_state=None):
|
283 |
+
r"""
|
284 |
+
Compute a two-sided bootstrap confidence interval of a statistic.
|
285 |
+
|
286 |
+
When `method` is ``'percentile'`` and `alternative` is ``'two-sided'``,
|
287 |
+
a bootstrap confidence interval is computed according to the following
|
288 |
+
procedure.
|
289 |
+
|
290 |
+
1. Resample the data: for each sample in `data` and for each of
|
291 |
+
`n_resamples`, take a random sample of the original sample
|
292 |
+
(with replacement) of the same size as the original sample.
|
293 |
+
|
294 |
+
2. Compute the bootstrap distribution of the statistic: for each set of
|
295 |
+
resamples, compute the test statistic.
|
296 |
+
|
297 |
+
3. Determine the confidence interval: find the interval of the bootstrap
|
298 |
+
distribution that is
|
299 |
+
|
300 |
+
- symmetric about the median and
|
301 |
+
- contains `confidence_level` of the resampled statistic values.
|
302 |
+
|
303 |
+
While the ``'percentile'`` method is the most intuitive, it is rarely
|
304 |
+
used in practice. Two more common methods are available, ``'basic'``
|
305 |
+
('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated');
|
306 |
+
they differ in how step 3 is performed.
|
307 |
+
|
308 |
+
If the samples in `data` are taken at random from their respective
|
309 |
+
distributions :math:`n` times, the confidence interval returned by
|
310 |
+
`bootstrap` will contain the true value of the statistic for those
|
311 |
+
distributions approximately `confidence_level`:math:`\, \times \, n` times.
|
312 |
+
|
313 |
+
Parameters
|
314 |
+
----------
|
315 |
+
data : sequence of array-like
|
316 |
+
Each element of data is a sample from an underlying distribution.
|
317 |
+
statistic : callable
|
318 |
+
Statistic for which the confidence interval is to be calculated.
|
319 |
+
`statistic` must be a callable that accepts ``len(data)`` samples
|
320 |
+
as separate arguments and returns the resulting statistic.
|
321 |
+
If `vectorized` is set ``True``,
|
322 |
+
`statistic` must also accept a keyword argument `axis` and be
|
323 |
+
vectorized to compute the statistic along the provided `axis`.
|
324 |
+
n_resamples : int, default: ``9999``
|
325 |
+
The number of resamples performed to form the bootstrap distribution
|
326 |
+
of the statistic.
|
327 |
+
batch : int, optional
|
328 |
+
The number of resamples to process in each vectorized call to
|
329 |
+
`statistic`. Memory usage is O( `batch` * ``n`` ), where ``n`` is the
|
330 |
+
sample size. Default is ``None``, in which case ``batch = n_resamples``
|
331 |
+
(or ``batch = max(n_resamples, n)`` for ``method='BCa'``).
|
332 |
+
vectorized : bool, optional
|
333 |
+
If `vectorized` is set ``False``, `statistic` will not be passed
|
334 |
+
keyword argument `axis` and is expected to calculate the statistic
|
335 |
+
only for 1D samples. If ``True``, `statistic` will be passed keyword
|
336 |
+
argument `axis` and is expected to calculate the statistic along `axis`
|
337 |
+
when passed an ND sample array. If ``None`` (default), `vectorized`
|
338 |
+
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of
|
339 |
+
a vectorized statistic typically reduces computation time.
|
340 |
+
paired : bool, default: ``False``
|
341 |
+
Whether the statistic treats corresponding elements of the samples
|
342 |
+
in `data` as paired.
|
343 |
+
axis : int, default: ``0``
|
344 |
+
The axis of the samples in `data` along which the `statistic` is
|
345 |
+
calculated.
|
346 |
+
confidence_level : float, default: ``0.95``
|
347 |
+
The confidence level of the confidence interval.
|
348 |
+
alternative : {'two-sided', 'less', 'greater'}, default: ``'two-sided'``
|
349 |
+
Choose ``'two-sided'`` (default) for a two-sided confidence interval,
|
350 |
+
``'less'`` for a one-sided confidence interval with the lower bound
|
351 |
+
at ``-np.inf``, and ``'greater'`` for a one-sided confidence interval
|
352 |
+
with the upper bound at ``np.inf``. The other bound of the one-sided
|
353 |
+
confidence intervals is the same as that of a two-sided confidence
|
354 |
+
interval with `confidence_level` twice as far from 1.0; e.g. the upper
|
355 |
+
bound of a 95% ``'less'`` confidence interval is the same as the upper
|
356 |
+
bound of a 90% ``'two-sided'`` confidence interval.
|
357 |
+
method : {'percentile', 'basic', 'bca'}, default: ``'BCa'``
|
358 |
+
Whether to return the 'percentile' bootstrap confidence interval
|
359 |
+
(``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence
|
360 |
+
interval (``'basic'``), or the bias-corrected and accelerated bootstrap
|
361 |
+
confidence interval (``'BCa'``).
|
362 |
+
bootstrap_result : BootstrapResult, optional
|
363 |
+
Provide the result object returned by a previous call to `bootstrap`
|
364 |
+
to include the previous bootstrap distribution in the new bootstrap
|
365 |
+
distribution. This can be used, for example, to change
|
366 |
+
`confidence_level`, change `method`, or see the effect of performing
|
367 |
+
additional resampling without repeating computations.
|
368 |
+
random_state : {None, int, `numpy.random.Generator`,
|
369 |
+
`numpy.random.RandomState`}, optional
|
370 |
+
|
371 |
+
Pseudorandom number generator state used to generate resamples.
|
372 |
+
|
373 |
+
If `random_state` is ``None`` (or `np.random`), the
|
374 |
+
`numpy.random.RandomState` singleton is used.
|
375 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
376 |
+
seeded with `random_state`.
|
377 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
378 |
+
instance then that instance is used.
|
379 |
+
|
380 |
+
Returns
|
381 |
+
-------
|
382 |
+
res : BootstrapResult
|
383 |
+
An object with attributes:
|
384 |
+
|
385 |
+
confidence_interval : ConfidenceInterval
|
386 |
+
The bootstrap confidence interval as an instance of
|
387 |
+
`collections.namedtuple` with attributes `low` and `high`.
|
388 |
+
bootstrap_distribution : ndarray
|
389 |
+
The bootstrap distribution, that is, the value of `statistic` for
|
390 |
+
each resample. The last dimension corresponds with the resamples
|
391 |
+
(e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``).
|
392 |
+
standard_error : float or ndarray
|
393 |
+
The bootstrap standard error, that is, the sample standard
|
394 |
+
deviation of the bootstrap distribution.
|
395 |
+
|
396 |
+
Warns
|
397 |
+
-----
|
398 |
+
`~scipy.stats.DegenerateDataWarning`
|
399 |
+
Generated when ``method='BCa'`` and the bootstrap distribution is
|
400 |
+
degenerate (e.g. all elements are identical).
|
401 |
+
|
402 |
+
Notes
|
403 |
+
-----
|
404 |
+
Elements of the confidence interval may be NaN for ``method='BCa'`` if
|
405 |
+
the bootstrap distribution is degenerate (e.g. all elements are identical).
|
406 |
+
In this case, consider using another `method` or inspecting `data` for
|
407 |
+
indications that other analysis may be more appropriate (e.g. all
|
408 |
+
observations are identical).
|
409 |
+
|
410 |
+
References
|
411 |
+
----------
|
412 |
+
.. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap,
|
413 |
+
Chapman & Hall/CRC, Boca Raton, FL, USA (1993)
|
414 |
+
.. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals",
|
415 |
+
http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf
|
416 |
+
.. [3] Bootstrapping (statistics), Wikipedia,
|
417 |
+
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
|
418 |
+
|
419 |
+
Examples
|
420 |
+
--------
|
421 |
+
Suppose we have sampled data from an unknown distribution.
|
422 |
+
|
423 |
+
>>> import numpy as np
|
424 |
+
>>> rng = np.random.default_rng()
|
425 |
+
>>> from scipy.stats import norm
|
426 |
+
>>> dist = norm(loc=2, scale=4) # our "unknown" distribution
|
427 |
+
>>> data = dist.rvs(size=100, random_state=rng)
|
428 |
+
|
429 |
+
We are interested in the standard deviation of the distribution.
|
430 |
+
|
431 |
+
>>> std_true = dist.std() # the true value of the statistic
|
432 |
+
>>> print(std_true)
|
433 |
+
4.0
|
434 |
+
>>> std_sample = np.std(data) # the sample statistic
|
435 |
+
>>> print(std_sample)
|
436 |
+
3.9460644295563863
|
437 |
+
|
438 |
+
The bootstrap is used to approximate the variability we would expect if we
|
439 |
+
were to repeatedly sample from the unknown distribution and calculate the
|
440 |
+
statistic of the sample each time. It does this by repeatedly resampling
|
441 |
+
values *from the original sample* with replacement and calculating the
|
442 |
+
statistic of each resample. This results in a "bootstrap distribution" of
|
443 |
+
the statistic.
|
444 |
+
|
445 |
+
>>> import matplotlib.pyplot as plt
|
446 |
+
>>> from scipy.stats import bootstrap
|
447 |
+
>>> data = (data,) # samples must be in a sequence
|
448 |
+
>>> res = bootstrap(data, np.std, confidence_level=0.9,
|
449 |
+
... random_state=rng)
|
450 |
+
>>> fig, ax = plt.subplots()
|
451 |
+
>>> ax.hist(res.bootstrap_distribution, bins=25)
|
452 |
+
>>> ax.set_title('Bootstrap Distribution')
|
453 |
+
>>> ax.set_xlabel('statistic value')
|
454 |
+
>>> ax.set_ylabel('frequency')
|
455 |
+
>>> plt.show()
|
456 |
+
|
457 |
+
The standard error quantifies this variability. It is calculated as the
|
458 |
+
standard deviation of the bootstrap distribution.
|
459 |
+
|
460 |
+
>>> res.standard_error
|
461 |
+
0.24427002125829136
|
462 |
+
>>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1)
|
463 |
+
True
|
464 |
+
|
465 |
+
The bootstrap distribution of the statistic is often approximately normal
|
466 |
+
with scale equal to the standard error.
|
467 |
+
|
468 |
+
>>> x = np.linspace(3, 5)
|
469 |
+
>>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error)
|
470 |
+
>>> fig, ax = plt.subplots()
|
471 |
+
>>> ax.hist(res.bootstrap_distribution, bins=25, density=True)
|
472 |
+
>>> ax.plot(x, pdf)
|
473 |
+
>>> ax.set_title('Normal Approximation of the Bootstrap Distribution')
|
474 |
+
>>> ax.set_xlabel('statistic value')
|
475 |
+
>>> ax.set_ylabel('pdf')
|
476 |
+
>>> plt.show()
|
477 |
+
|
478 |
+
This suggests that we could construct a 90% confidence interval on the
|
479 |
+
statistic based on quantiles of this normal distribution.
|
480 |
+
|
481 |
+
>>> norm.interval(0.9, loc=std_sample, scale=res.standard_error)
|
482 |
+
(3.5442759991341726, 4.3478528599786)
|
483 |
+
|
484 |
+
Due to central limit theorem, this normal approximation is accurate for a
|
485 |
+
variety of statistics and distributions underlying the samples; however,
|
486 |
+
the approximation is not reliable in all cases. Because `bootstrap` is
|
487 |
+
designed to work with arbitrary underlying distributions and statistics,
|
488 |
+
it uses more advanced techniques to generate an accurate confidence
|
489 |
+
interval.
|
490 |
+
|
491 |
+
>>> print(res.confidence_interval)
|
492 |
+
ConfidenceInterval(low=3.57655333533867, high=4.382043696342881)
|
493 |
+
|
494 |
+
If we sample from the original distribution 1000 times and form a bootstrap
|
495 |
+
confidence interval for each sample, the confidence interval
|
496 |
+
contains the true value of the statistic approximately 90% of the time.
|
497 |
+
|
498 |
+
>>> n_trials = 1000
|
499 |
+
>>> ci_contains_true_std = 0
|
500 |
+
>>> for i in range(n_trials):
|
501 |
+
... data = (dist.rvs(size=100, random_state=rng),)
|
502 |
+
... ci = bootstrap(data, np.std, confidence_level=0.9, n_resamples=1000,
|
503 |
+
... random_state=rng).confidence_interval
|
504 |
+
... if ci[0] < std_true < ci[1]:
|
505 |
+
... ci_contains_true_std += 1
|
506 |
+
>>> print(ci_contains_true_std)
|
507 |
+
875
|
508 |
+
|
509 |
+
Rather than writing a loop, we can also determine the confidence intervals
|
510 |
+
for all 1000 samples at once.
|
511 |
+
|
512 |
+
>>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),)
|
513 |
+
>>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9,
|
514 |
+
... n_resamples=1000, random_state=rng)
|
515 |
+
>>> ci_l, ci_u = res.confidence_interval
|
516 |
+
|
517 |
+
Here, `ci_l` and `ci_u` contain the confidence interval for each of the
|
518 |
+
``n_trials = 1000`` samples.
|
519 |
+
|
520 |
+
>>> print(ci_l[995:])
|
521 |
+
[3.77729695 3.75090233 3.45829131 3.34078217 3.48072829]
|
522 |
+
>>> print(ci_u[995:])
|
523 |
+
[4.88316666 4.86924034 4.32032996 4.2822427 4.59360598]
|
524 |
+
|
525 |
+
And again, approximately 90% contain the true value, ``std_true = 4``.
|
526 |
+
|
527 |
+
>>> print(np.sum((ci_l < std_true) & (std_true < ci_u)))
|
528 |
+
900
|
529 |
+
|
530 |
+
`bootstrap` can also be used to estimate confidence intervals of
|
531 |
+
multi-sample statistics, including those calculated by hypothesis
|
532 |
+
tests. `scipy.stats.mood` perform's Mood's test for equal scale parameters,
|
533 |
+
and it returns two outputs: a statistic, and a p-value. To get a
|
534 |
+
confidence interval for the test statistic, we first wrap
|
535 |
+
`scipy.stats.mood` in a function that accepts two sample arguments,
|
536 |
+
accepts an `axis` keyword argument, and returns only the statistic.
|
537 |
+
|
538 |
+
>>> from scipy.stats import mood
|
539 |
+
>>> def my_statistic(sample1, sample2, axis):
|
540 |
+
... statistic, _ = mood(sample1, sample2, axis=-1)
|
541 |
+
... return statistic
|
542 |
+
|
543 |
+
Here, we use the 'percentile' method with the default 95% confidence level.
|
544 |
+
|
545 |
+
>>> sample1 = norm.rvs(scale=1, size=100, random_state=rng)
|
546 |
+
>>> sample2 = norm.rvs(scale=2, size=100, random_state=rng)
|
547 |
+
>>> data = (sample1, sample2)
|
548 |
+
>>> res = bootstrap(data, my_statistic, method='basic', random_state=rng)
|
549 |
+
>>> print(mood(sample1, sample2)[0]) # element 0 is the statistic
|
550 |
+
-5.521109549096542
|
551 |
+
>>> print(res.confidence_interval)
|
552 |
+
ConfidenceInterval(low=-7.255994487314675, high=-4.016202624747605)
|
553 |
+
|
554 |
+
The bootstrap estimate of the standard error is also available.
|
555 |
+
|
556 |
+
>>> print(res.standard_error)
|
557 |
+
0.8344963846318795
|
558 |
+
|
559 |
+
Paired-sample statistics work, too. For example, consider the Pearson
|
560 |
+
correlation coefficient.
|
561 |
+
|
562 |
+
>>> from scipy.stats import pearsonr
|
563 |
+
>>> n = 100
|
564 |
+
>>> x = np.linspace(0, 10, n)
|
565 |
+
>>> y = x + rng.uniform(size=n)
|
566 |
+
>>> print(pearsonr(x, y)[0]) # element 0 is the statistic
|
567 |
+
0.9962357936065914
|
568 |
+
|
569 |
+
We wrap `pearsonr` so that it returns only the statistic.
|
570 |
+
|
571 |
+
>>> def my_statistic(x, y):
|
572 |
+
... return pearsonr(x, y)[0]
|
573 |
+
|
574 |
+
We call `bootstrap` using ``paired=True``.
|
575 |
+
Also, since ``my_statistic`` isn't vectorized to calculate the statistic
|
576 |
+
along a given axis, we pass in ``vectorized=False``.
|
577 |
+
|
578 |
+
>>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
|
579 |
+
... random_state=rng)
|
580 |
+
>>> print(res.confidence_interval)
|
581 |
+
ConfidenceInterval(low=0.9950085825848624, high=0.9971212407917498)
|
582 |
+
|
583 |
+
The result object can be passed back into `bootstrap` to perform additional
|
584 |
+
resampling:
|
585 |
+
|
586 |
+
>>> len(res.bootstrap_distribution)
|
587 |
+
9999
|
588 |
+
>>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
|
589 |
+
... n_resamples=1001, random_state=rng,
|
590 |
+
... bootstrap_result=res)
|
591 |
+
>>> len(res.bootstrap_distribution)
|
592 |
+
11000
|
593 |
+
|
594 |
+
or to change the confidence interval options:
|
595 |
+
|
596 |
+
>>> res2 = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
|
597 |
+
... n_resamples=0, random_state=rng, bootstrap_result=res,
|
598 |
+
... method='percentile', confidence_level=0.9)
|
599 |
+
>>> np.testing.assert_equal(res2.bootstrap_distribution,
|
600 |
+
... res.bootstrap_distribution)
|
601 |
+
>>> res.confidence_interval
|
602 |
+
ConfidenceInterval(low=0.9950035351407804, high=0.9971170323404578)
|
603 |
+
|
604 |
+
without repeating computation of the original bootstrap distribution.
|
605 |
+
|
606 |
+
"""
|
607 |
+
# Input validation
|
608 |
+
args = _bootstrap_iv(data, statistic, vectorized, paired, axis,
|
609 |
+
confidence_level, alternative, n_resamples, batch,
|
610 |
+
method, bootstrap_result, random_state)
|
611 |
+
(data, statistic, vectorized, paired, axis, confidence_level,
|
612 |
+
alternative, n_resamples, batch, method, bootstrap_result,
|
613 |
+
random_state) = args
|
614 |
+
|
615 |
+
theta_hat_b = ([] if bootstrap_result is None
|
616 |
+
else [bootstrap_result.bootstrap_distribution])
|
617 |
+
|
618 |
+
batch_nominal = batch or n_resamples or 1
|
619 |
+
|
620 |
+
for k in range(0, n_resamples, batch_nominal):
|
621 |
+
batch_actual = min(batch_nominal, n_resamples-k)
|
622 |
+
# Generate resamples
|
623 |
+
resampled_data = []
|
624 |
+
for sample in data:
|
625 |
+
resample = _bootstrap_resample(sample, n_resamples=batch_actual,
|
626 |
+
random_state=random_state)
|
627 |
+
resampled_data.append(resample)
|
628 |
+
|
629 |
+
# Compute bootstrap distribution of statistic
|
630 |
+
theta_hat_b.append(statistic(*resampled_data, axis=-1))
|
631 |
+
theta_hat_b = np.concatenate(theta_hat_b, axis=-1)
|
632 |
+
|
633 |
+
# Calculate percentile interval
|
634 |
+
alpha = ((1 - confidence_level)/2 if alternative == 'two-sided'
|
635 |
+
else (1 - confidence_level))
|
636 |
+
if method == 'bca':
|
637 |
+
interval = _bca_interval(data, statistic, axis=-1, alpha=alpha,
|
638 |
+
theta_hat_b=theta_hat_b, batch=batch)[:2]
|
639 |
+
percentile_fun = _percentile_along_axis
|
640 |
+
else:
|
641 |
+
interval = alpha, 1-alpha
|
642 |
+
|
643 |
+
def percentile_fun(a, q):
|
644 |
+
return np.percentile(a=a, q=q, axis=-1)
|
645 |
+
|
646 |
+
# Calculate confidence interval of statistic
|
647 |
+
ci_l = percentile_fun(theta_hat_b, interval[0]*100)
|
648 |
+
ci_u = percentile_fun(theta_hat_b, interval[1]*100)
|
649 |
+
if method == 'basic': # see [3]
|
650 |
+
theta_hat = statistic(*data, axis=-1)
|
651 |
+
ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l
|
652 |
+
|
653 |
+
if alternative == 'less':
|
654 |
+
ci_l = np.full_like(ci_l, -np.inf)
|
655 |
+
elif alternative == 'greater':
|
656 |
+
ci_u = np.full_like(ci_u, np.inf)
|
657 |
+
|
658 |
+
return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u),
|
659 |
+
bootstrap_distribution=theta_hat_b,
|
660 |
+
standard_error=np.std(theta_hat_b, ddof=1, axis=-1))
|
661 |
+
|
662 |
+
|
663 |
+
def _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples,
|
664 |
+
batch, alternative, axis):
|
665 |
+
"""Input validation for `monte_carlo_test`."""
|
666 |
+
|
667 |
+
axis_int = int(axis)
|
668 |
+
if axis != axis_int:
|
669 |
+
raise ValueError("`axis` must be an integer.")
|
670 |
+
|
671 |
+
if vectorized not in {True, False, None}:
|
672 |
+
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
|
673 |
+
|
674 |
+
if not isinstance(rvs, Sequence):
|
675 |
+
rvs = (rvs,)
|
676 |
+
data = (data,)
|
677 |
+
for rvs_i in rvs:
|
678 |
+
if not callable(rvs_i):
|
679 |
+
raise TypeError("`rvs` must be callable or sequence of callables.")
|
680 |
+
|
681 |
+
if not len(rvs) == len(data):
|
682 |
+
message = "If `rvs` is a sequence, `len(rvs)` must equal `len(data)`."
|
683 |
+
raise ValueError(message)
|
684 |
+
|
685 |
+
if not callable(statistic):
|
686 |
+
raise TypeError("`statistic` must be callable.")
|
687 |
+
|
688 |
+
if vectorized is None:
|
689 |
+
vectorized = 'axis' in inspect.signature(statistic).parameters
|
690 |
+
|
691 |
+
if not vectorized:
|
692 |
+
statistic_vectorized = _vectorize_statistic(statistic)
|
693 |
+
else:
|
694 |
+
statistic_vectorized = statistic
|
695 |
+
|
696 |
+
data = _broadcast_arrays(data, axis)
|
697 |
+
data_iv = []
|
698 |
+
for sample in data:
|
699 |
+
sample = np.atleast_1d(sample)
|
700 |
+
sample = np.moveaxis(sample, axis_int, -1)
|
701 |
+
data_iv.append(sample)
|
702 |
+
|
703 |
+
n_resamples_int = int(n_resamples)
|
704 |
+
if n_resamples != n_resamples_int or n_resamples_int <= 0:
|
705 |
+
raise ValueError("`n_resamples` must be a positive integer.")
|
706 |
+
|
707 |
+
if batch is None:
|
708 |
+
batch_iv = batch
|
709 |
+
else:
|
710 |
+
batch_iv = int(batch)
|
711 |
+
if batch != batch_iv or batch_iv <= 0:
|
712 |
+
raise ValueError("`batch` must be a positive integer or None.")
|
713 |
+
|
714 |
+
alternatives = {'two-sided', 'greater', 'less'}
|
715 |
+
alternative = alternative.lower()
|
716 |
+
if alternative not in alternatives:
|
717 |
+
raise ValueError(f"`alternative` must be in {alternatives}")
|
718 |
+
|
719 |
+
return (data_iv, rvs, statistic_vectorized, vectorized, n_resamples_int,
|
720 |
+
batch_iv, alternative, axis_int)
|
721 |
+
|
722 |
+
|
723 |
+
@dataclass
|
724 |
+
class MonteCarloTestResult:
|
725 |
+
"""Result object returned by `scipy.stats.monte_carlo_test`.
|
726 |
+
|
727 |
+
Attributes
|
728 |
+
----------
|
729 |
+
statistic : float or ndarray
|
730 |
+
The observed test statistic of the sample.
|
731 |
+
pvalue : float or ndarray
|
732 |
+
The p-value for the given alternative.
|
733 |
+
null_distribution : ndarray
|
734 |
+
The values of the test statistic generated under the null
|
735 |
+
hypothesis.
|
736 |
+
"""
|
737 |
+
statistic: float | np.ndarray
|
738 |
+
pvalue: float | np.ndarray
|
739 |
+
null_distribution: np.ndarray
|
740 |
+
|
741 |
+
|
742 |
+
@_rename_parameter('sample', 'data')
|
743 |
+
def monte_carlo_test(data, rvs, statistic, *, vectorized=None,
|
744 |
+
n_resamples=9999, batch=None, alternative="two-sided",
|
745 |
+
axis=0):
|
746 |
+
r"""Perform a Monte Carlo hypothesis test.
|
747 |
+
|
748 |
+
`data` contains a sample or a sequence of one or more samples. `rvs`
|
749 |
+
specifies the distribution(s) of the sample(s) in `data` under the null
|
750 |
+
hypothesis. The value of `statistic` for the given `data` is compared
|
751 |
+
against a Monte Carlo null distribution: the value of the statistic for
|
752 |
+
each of `n_resamples` sets of samples generated using `rvs`. This gives
|
753 |
+
the p-value, the probability of observing such an extreme value of the
|
754 |
+
test statistic under the null hypothesis.
|
755 |
+
|
756 |
+
Parameters
|
757 |
+
----------
|
758 |
+
data : array-like or sequence of array-like
|
759 |
+
An array or sequence of arrays of observations.
|
760 |
+
rvs : callable or tuple of callables
|
761 |
+
A callable or sequence of callables that generates random variates
|
762 |
+
under the null hypothesis. Each element of `rvs` must be a callable
|
763 |
+
that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and
|
764 |
+
returns an N-d array sample of that shape. If `rvs` is a sequence, the
|
765 |
+
number of callables in `rvs` must match the number of samples in
|
766 |
+
`data`, i.e. ``len(rvs) == len(data)``. If `rvs` is a single callable,
|
767 |
+
`data` is treated as a single sample.
|
768 |
+
statistic : callable
|
769 |
+
Statistic for which the p-value of the hypothesis test is to be
|
770 |
+
calculated. `statistic` must be a callable that accepts a sample
|
771 |
+
(e.g. ``statistic(sample)``) or ``len(rvs)`` separate samples (e.g.
|
772 |
+
``statistic(samples1, sample2)`` if `rvs` contains two callables and
|
773 |
+
`data` contains two samples) and returns the resulting statistic.
|
774 |
+
If `vectorized` is set ``True``, `statistic` must also accept a keyword
|
775 |
+
argument `axis` and be vectorized to compute the statistic along the
|
776 |
+
provided `axis` of the samples in `data`.
|
777 |
+
vectorized : bool, optional
|
778 |
+
If `vectorized` is set ``False``, `statistic` will not be passed
|
779 |
+
keyword argument `axis` and is expected to calculate the statistic
|
780 |
+
only for 1D samples. If ``True``, `statistic` will be passed keyword
|
781 |
+
argument `axis` and is expected to calculate the statistic along `axis`
|
782 |
+
when passed ND sample arrays. If ``None`` (default), `vectorized`
|
783 |
+
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of
|
784 |
+
a vectorized statistic typically reduces computation time.
|
785 |
+
n_resamples : int, default: 9999
|
786 |
+
Number of samples drawn from each of the callables of `rvs`.
|
787 |
+
Equivalently, the number statistic values under the null hypothesis
|
788 |
+
used as the Monte Carlo null distribution.
|
789 |
+
batch : int, optional
|
790 |
+
The number of Monte Carlo samples to process in each call to
|
791 |
+
`statistic`. Memory usage is O( `batch` * ``sample.size[axis]`` ). Default
|
792 |
+
is ``None``, in which case `batch` equals `n_resamples`.
|
793 |
+
alternative : {'two-sided', 'less', 'greater'}
|
794 |
+
The alternative hypothesis for which the p-value is calculated.
|
795 |
+
For each alternative, the p-value is defined as follows.
|
796 |
+
|
797 |
+
- ``'greater'`` : the percentage of the null distribution that is
|
798 |
+
greater than or equal to the observed value of the test statistic.
|
799 |
+
- ``'less'`` : the percentage of the null distribution that is
|
800 |
+
less than or equal to the observed value of the test statistic.
|
801 |
+
- ``'two-sided'`` : twice the smaller of the p-values above.
|
802 |
+
|
803 |
+
axis : int, default: 0
|
804 |
+
The axis of `data` (or each sample within `data`) over which to
|
805 |
+
calculate the statistic.
|
806 |
+
|
807 |
+
Returns
|
808 |
+
-------
|
809 |
+
res : MonteCarloTestResult
|
810 |
+
An object with attributes:
|
811 |
+
|
812 |
+
statistic : float or ndarray
|
813 |
+
The test statistic of the observed `data`.
|
814 |
+
pvalue : float or ndarray
|
815 |
+
The p-value for the given alternative.
|
816 |
+
null_distribution : ndarray
|
817 |
+
The values of the test statistic generated under the null
|
818 |
+
hypothesis.
|
819 |
+
|
820 |
+
.. warning::
|
821 |
+
The p-value is calculated by counting the elements of the null
|
822 |
+
distribution that are as extreme or more extreme than the observed
|
823 |
+
value of the statistic. Due to the use of finite precision arithmetic,
|
824 |
+
some statistic functions return numerically distinct values when the
|
825 |
+
theoretical values would be exactly equal. In some cases, this could
|
826 |
+
lead to a large error in the calculated p-value. `monte_carlo_test`
|
827 |
+
guards against this by considering elements in the null distribution
|
828 |
+
that are "close" (within a relative tolerance of 100 times the
|
829 |
+
floating point epsilon of inexact dtypes) to the observed
|
830 |
+
value of the test statistic as equal to the observed value of the
|
831 |
+
test statistic. However, the user is advised to inspect the null
|
832 |
+
distribution to assess whether this method of comparison is
|
833 |
+
appropriate, and if not, calculate the p-value manually.
|
834 |
+
|
835 |
+
References
|
836 |
+
----------
|
837 |
+
|
838 |
+
.. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
|
839 |
+
Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
|
840 |
+
Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
|
841 |
+
|
842 |
+
Examples
|
843 |
+
--------
|
844 |
+
|
845 |
+
Suppose we wish to test whether a small sample has been drawn from a normal
|
846 |
+
distribution. We decide that we will use the skew of the sample as a
|
847 |
+
test statistic, and we will consider a p-value of 0.05 to be statistically
|
848 |
+
significant.
|
849 |
+
|
850 |
+
>>> import numpy as np
|
851 |
+
>>> from scipy import stats
|
852 |
+
>>> def statistic(x, axis):
|
853 |
+
... return stats.skew(x, axis)
|
854 |
+
|
855 |
+
After collecting our data, we calculate the observed value of the test
|
856 |
+
statistic.
|
857 |
+
|
858 |
+
>>> rng = np.random.default_rng()
|
859 |
+
>>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng)
|
860 |
+
>>> statistic(x, axis=0)
|
861 |
+
0.12457412450240658
|
862 |
+
|
863 |
+
To determine the probability of observing such an extreme value of the
|
864 |
+
skewness by chance if the sample were drawn from the normal distribution,
|
865 |
+
we can perform a Monte Carlo hypothesis test. The test will draw many
|
866 |
+
samples at random from their normal distribution, calculate the skewness
|
867 |
+
of each sample, and compare our original skewness against this
|
868 |
+
distribution to determine an approximate p-value.
|
869 |
+
|
870 |
+
>>> from scipy.stats import monte_carlo_test
|
871 |
+
>>> # because our statistic is vectorized, we pass `vectorized=True`
|
872 |
+
>>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng)
|
873 |
+
>>> res = monte_carlo_test(x, rvs, statistic, vectorized=True)
|
874 |
+
>>> print(res.statistic)
|
875 |
+
0.12457412450240658
|
876 |
+
>>> print(res.pvalue)
|
877 |
+
0.7012
|
878 |
+
|
879 |
+
The probability of obtaining a test statistic less than or equal to the
|
880 |
+
observed value under the null hypothesis is ~70%. This is greater than
|
881 |
+
our chosen threshold of 5%, so we cannot consider this to be significant
|
882 |
+
evidence against the null hypothesis.
|
883 |
+
|
884 |
+
Note that this p-value essentially matches that of
|
885 |
+
`scipy.stats.skewtest`, which relies on an asymptotic distribution of a
|
886 |
+
test statistic based on the sample skewness.
|
887 |
+
|
888 |
+
>>> stats.skewtest(x).pvalue
|
889 |
+
0.6892046027110614
|
890 |
+
|
891 |
+
This asymptotic approximation is not valid for small sample sizes, but
|
892 |
+
`monte_carlo_test` can be used with samples of any size.
|
893 |
+
|
894 |
+
>>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng)
|
895 |
+
>>> # stats.skewtest(x) would produce an error due to small sample
|
896 |
+
>>> res = monte_carlo_test(x, rvs, statistic, vectorized=True)
|
897 |
+
|
898 |
+
The Monte Carlo distribution of the test statistic is provided for
|
899 |
+
further investigation.
|
900 |
+
|
901 |
+
>>> import matplotlib.pyplot as plt
|
902 |
+
>>> fig, ax = plt.subplots()
|
903 |
+
>>> ax.hist(res.null_distribution, bins=50)
|
904 |
+
>>> ax.set_title("Monte Carlo distribution of test statistic")
|
905 |
+
>>> ax.set_xlabel("Value of Statistic")
|
906 |
+
>>> ax.set_ylabel("Frequency")
|
907 |
+
>>> plt.show()
|
908 |
+
|
909 |
+
"""
|
910 |
+
args = _monte_carlo_test_iv(data, rvs, statistic, vectorized,
|
911 |
+
n_resamples, batch, alternative, axis)
|
912 |
+
(data, rvs, statistic, vectorized,
|
913 |
+
n_resamples, batch, alternative, axis) = args
|
914 |
+
|
915 |
+
# Some statistics return plain floats; ensure they're at least a NumPy float
|
916 |
+
observed = np.asarray(statistic(*data, axis=-1))[()]
|
917 |
+
|
918 |
+
n_observations = [sample.shape[-1] for sample in data]
|
919 |
+
batch_nominal = batch or n_resamples
|
920 |
+
null_distribution = []
|
921 |
+
for k in range(0, n_resamples, batch_nominal):
|
922 |
+
batch_actual = min(batch_nominal, n_resamples - k)
|
923 |
+
resamples = [rvs_i(size=(batch_actual, n_observations_i))
|
924 |
+
for rvs_i, n_observations_i in zip(rvs, n_observations)]
|
925 |
+
null_distribution.append(statistic(*resamples, axis=-1))
|
926 |
+
null_distribution = np.concatenate(null_distribution)
|
927 |
+
null_distribution = null_distribution.reshape([-1] + [1]*observed.ndim)
|
928 |
+
|
929 |
+
# relative tolerance for detecting numerically distinct but
|
930 |
+
# theoretically equal values in the null distribution
|
931 |
+
eps = (0 if not np.issubdtype(observed.dtype, np.inexact)
|
932 |
+
else np.finfo(observed.dtype).eps*100)
|
933 |
+
gamma = np.abs(eps * observed)
|
934 |
+
|
935 |
+
def less(null_distribution, observed):
|
936 |
+
cmps = null_distribution <= observed + gamma
|
937 |
+
pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1]
|
938 |
+
return pvalues
|
939 |
+
|
940 |
+
def greater(null_distribution, observed):
|
941 |
+
cmps = null_distribution >= observed - gamma
|
942 |
+
pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1]
|
943 |
+
return pvalues
|
944 |
+
|
945 |
+
def two_sided(null_distribution, observed):
|
946 |
+
pvalues_less = less(null_distribution, observed)
|
947 |
+
pvalues_greater = greater(null_distribution, observed)
|
948 |
+
pvalues = np.minimum(pvalues_less, pvalues_greater) * 2
|
949 |
+
return pvalues
|
950 |
+
|
951 |
+
compare = {"less": less,
|
952 |
+
"greater": greater,
|
953 |
+
"two-sided": two_sided}
|
954 |
+
|
955 |
+
pvalues = compare[alternative](null_distribution, observed)
|
956 |
+
pvalues = np.clip(pvalues, 0, 1)
|
957 |
+
|
958 |
+
return MonteCarloTestResult(observed, pvalues, null_distribution)
|
959 |
+
|
960 |
+
|
961 |
+
@dataclass
|
962 |
+
class PermutationTestResult:
|
963 |
+
"""Result object returned by `scipy.stats.permutation_test`.
|
964 |
+
|
965 |
+
Attributes
|
966 |
+
----------
|
967 |
+
statistic : float or ndarray
|
968 |
+
The observed test statistic of the data.
|
969 |
+
pvalue : float or ndarray
|
970 |
+
The p-value for the given alternative.
|
971 |
+
null_distribution : ndarray
|
972 |
+
The values of the test statistic generated under the null
|
973 |
+
hypothesis.
|
974 |
+
"""
|
975 |
+
statistic: float | np.ndarray
|
976 |
+
pvalue: float | np.ndarray
|
977 |
+
null_distribution: np.ndarray
|
978 |
+
|
979 |
+
|
980 |
+
def _all_partitions_concatenated(ns):
|
981 |
+
"""
|
982 |
+
Generate all partitions of indices of groups of given sizes, concatenated
|
983 |
+
|
984 |
+
`ns` is an iterable of ints.
|
985 |
+
"""
|
986 |
+
def all_partitions(z, n):
|
987 |
+
for c in combinations(z, n):
|
988 |
+
x0 = set(c)
|
989 |
+
x1 = z - x0
|
990 |
+
yield [x0, x1]
|
991 |
+
|
992 |
+
def all_partitions_n(z, ns):
|
993 |
+
if len(ns) == 0:
|
994 |
+
yield [z]
|
995 |
+
return
|
996 |
+
for c in all_partitions(z, ns[0]):
|
997 |
+
for d in all_partitions_n(c[1], ns[1:]):
|
998 |
+
yield c[0:1] + d
|
999 |
+
|
1000 |
+
z = set(range(np.sum(ns)))
|
1001 |
+
for partitioning in all_partitions_n(z, ns[:]):
|
1002 |
+
x = np.concatenate([list(partition)
|
1003 |
+
for partition in partitioning]).astype(int)
|
1004 |
+
yield x
|
1005 |
+
|
1006 |
+
|
1007 |
+
def _batch_generator(iterable, batch):
|
1008 |
+
"""A generator that yields batches of elements from an iterable"""
|
1009 |
+
iterator = iter(iterable)
|
1010 |
+
if batch <= 0:
|
1011 |
+
raise ValueError("`batch` must be positive.")
|
1012 |
+
z = [item for i, item in zip(range(batch), iterator)]
|
1013 |
+
while z: # we don't want StopIteration without yielding an empty list
|
1014 |
+
yield z
|
1015 |
+
z = [item for i, item in zip(range(batch), iterator)]
|
1016 |
+
|
1017 |
+
|
1018 |
+
def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch,
|
1019 |
+
random_state):
|
1020 |
+
# Returns a generator that yields arrays of size
|
1021 |
+
# `(batch, n_samples, n_obs_sample)`.
|
1022 |
+
# Each row is an independent permutation of indices 0 to `n_obs_sample`.
|
1023 |
+
batch = min(batch, n_permutations)
|
1024 |
+
|
1025 |
+
if hasattr(random_state, 'permuted'):
|
1026 |
+
def batched_perm_generator():
|
1027 |
+
indices = np.arange(n_obs_sample)
|
1028 |
+
indices = np.tile(indices, (batch, n_samples, 1))
|
1029 |
+
for k in range(0, n_permutations, batch):
|
1030 |
+
batch_actual = min(batch, n_permutations-k)
|
1031 |
+
# Don't permute in place, otherwise results depend on `batch`
|
1032 |
+
permuted_indices = random_state.permuted(indices, axis=-1)
|
1033 |
+
yield permuted_indices[:batch_actual]
|
1034 |
+
else: # RandomState and early Generators don't have `permuted`
|
1035 |
+
def batched_perm_generator():
|
1036 |
+
for k in range(0, n_permutations, batch):
|
1037 |
+
batch_actual = min(batch, n_permutations-k)
|
1038 |
+
size = (batch_actual, n_samples, n_obs_sample)
|
1039 |
+
x = random_state.random(size=size)
|
1040 |
+
yield np.argsort(x, axis=-1)[:batch_actual]
|
1041 |
+
|
1042 |
+
return batched_perm_generator()
|
1043 |
+
|
1044 |
+
|
1045 |
+
def _calculate_null_both(data, statistic, n_permutations, batch,
|
1046 |
+
random_state=None):
|
1047 |
+
"""
|
1048 |
+
Calculate null distribution for independent sample tests.
|
1049 |
+
"""
|
1050 |
+
n_samples = len(data)
|
1051 |
+
|
1052 |
+
# compute number of permutations
|
1053 |
+
# (distinct partitions of data into samples of these sizes)
|
1054 |
+
n_obs_i = [sample.shape[-1] for sample in data] # observations per sample
|
1055 |
+
n_obs_ic = np.cumsum(n_obs_i)
|
1056 |
+
n_obs = n_obs_ic[-1] # total number of observations
|
1057 |
+
n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1])
|
1058 |
+
for i in range(n_samples-1, 0, -1)])
|
1059 |
+
|
1060 |
+
# perm_generator is an iterator that produces permutations of indices
|
1061 |
+
# from 0 to n_obs. We'll concatenate the samples, use these indices to
|
1062 |
+
# permute the data, then split the samples apart again.
|
1063 |
+
if n_permutations >= n_max:
|
1064 |
+
exact_test = True
|
1065 |
+
n_permutations = n_max
|
1066 |
+
perm_generator = _all_partitions_concatenated(n_obs_i)
|
1067 |
+
else:
|
1068 |
+
exact_test = False
|
1069 |
+
# Neither RandomState.permutation nor Generator.permutation
|
1070 |
+
# can permute axis-slices independently. If this feature is
|
1071 |
+
# added in the future, batches of the desired size should be
|
1072 |
+
# generated in a single call.
|
1073 |
+
perm_generator = (random_state.permutation(n_obs)
|
1074 |
+
for i in range(n_permutations))
|
1075 |
+
|
1076 |
+
batch = batch or int(n_permutations)
|
1077 |
+
null_distribution = []
|
1078 |
+
|
1079 |
+
# First, concatenate all the samples. In batches, permute samples with
|
1080 |
+
# indices produced by the `perm_generator`, split them into new samples of
|
1081 |
+
# the original sizes, compute the statistic for each batch, and add these
|
1082 |
+
# statistic values to the null distribution.
|
1083 |
+
data = np.concatenate(data, axis=-1)
|
1084 |
+
for indices in _batch_generator(perm_generator, batch=batch):
|
1085 |
+
indices = np.array(indices)
|
1086 |
+
|
1087 |
+
# `indices` is 2D: each row is a permutation of the indices.
|
1088 |
+
# We use it to index `data` along its last axis, which corresponds
|
1089 |
+
# with observations.
|
1090 |
+
# After indexing, the second to last axis of `data_batch` corresponds
|
1091 |
+
# with permutations, and the last axis corresponds with observations.
|
1092 |
+
data_batch = data[..., indices]
|
1093 |
+
|
1094 |
+
# Move the permutation axis to the front: we'll concatenate a list
|
1095 |
+
# of batched statistic values along this zeroth axis to form the
|
1096 |
+
# null distribution.
|
1097 |
+
data_batch = np.moveaxis(data_batch, -2, 0)
|
1098 |
+
data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1)
|
1099 |
+
null_distribution.append(statistic(*data_batch, axis=-1))
|
1100 |
+
null_distribution = np.concatenate(null_distribution, axis=0)
|
1101 |
+
|
1102 |
+
return null_distribution, n_permutations, exact_test
|
1103 |
+
|
1104 |
+
|
1105 |
+
def _calculate_null_pairings(data, statistic, n_permutations, batch,
|
1106 |
+
random_state=None):
|
1107 |
+
"""
|
1108 |
+
Calculate null distribution for association tests.
|
1109 |
+
"""
|
1110 |
+
n_samples = len(data)
|
1111 |
+
|
1112 |
+
# compute number of permutations (factorial(n) permutations of each sample)
|
1113 |
+
n_obs_sample = data[0].shape[-1] # observations per sample; same for each
|
1114 |
+
n_max = factorial(n_obs_sample)**n_samples
|
1115 |
+
|
1116 |
+
# `perm_generator` is an iterator that produces a list of permutations of
|
1117 |
+
# indices from 0 to n_obs_sample, one for each sample.
|
1118 |
+
if n_permutations >= n_max:
|
1119 |
+
exact_test = True
|
1120 |
+
n_permutations = n_max
|
1121 |
+
batch = batch or int(n_permutations)
|
1122 |
+
# cartesian product of the sets of all permutations of indices
|
1123 |
+
perm_generator = product(*(permutations(range(n_obs_sample))
|
1124 |
+
for i in range(n_samples)))
|
1125 |
+
batched_perm_generator = _batch_generator(perm_generator, batch=batch)
|
1126 |
+
else:
|
1127 |
+
exact_test = False
|
1128 |
+
batch = batch or int(n_permutations)
|
1129 |
+
# Separate random permutations of indices for each sample.
|
1130 |
+
# Again, it would be nice if RandomState/Generator.permutation
|
1131 |
+
# could permute each axis-slice separately.
|
1132 |
+
args = n_permutations, n_samples, n_obs_sample, batch, random_state
|
1133 |
+
batched_perm_generator = _pairings_permutations_gen(*args)
|
1134 |
+
|
1135 |
+
null_distribution = []
|
1136 |
+
|
1137 |
+
for indices in batched_perm_generator:
|
1138 |
+
indices = np.array(indices)
|
1139 |
+
|
1140 |
+
# `indices` is 3D: the zeroth axis is for permutations, the next is
|
1141 |
+
# for samples, and the last is for observations. Swap the first two
|
1142 |
+
# to make the zeroth axis correspond with samples, as it does for
|
1143 |
+
# `data`.
|
1144 |
+
indices = np.swapaxes(indices, 0, 1)
|
1145 |
+
|
1146 |
+
# When we're done, `data_batch` will be a list of length `n_samples`.
|
1147 |
+
# Each element will be a batch of random permutations of one sample.
|
1148 |
+
# The zeroth axis of each batch will correspond with permutations,
|
1149 |
+
# and the last will correspond with observations. (This makes it
|
1150 |
+
# easy to pass into `statistic`.)
|
1151 |
+
data_batch = [None]*n_samples
|
1152 |
+
for i in range(n_samples):
|
1153 |
+
data_batch[i] = data[i][..., indices[i]]
|
1154 |
+
data_batch[i] = np.moveaxis(data_batch[i], -2, 0)
|
1155 |
+
|
1156 |
+
null_distribution.append(statistic(*data_batch, axis=-1))
|
1157 |
+
null_distribution = np.concatenate(null_distribution, axis=0)
|
1158 |
+
|
1159 |
+
return null_distribution, n_permutations, exact_test
|
1160 |
+
|
1161 |
+
|
1162 |
+
def _calculate_null_samples(data, statistic, n_permutations, batch,
|
1163 |
+
random_state=None):
|
1164 |
+
"""
|
1165 |
+
Calculate null distribution for paired-sample tests.
|
1166 |
+
"""
|
1167 |
+
n_samples = len(data)
|
1168 |
+
|
1169 |
+
# By convention, the meaning of the "samples" permutations type for
|
1170 |
+
# data with only one sample is to flip the sign of the observations.
|
1171 |
+
# Achieve this by adding a second sample - the negative of the original.
|
1172 |
+
if n_samples == 1:
|
1173 |
+
data = [data[0], -data[0]]
|
1174 |
+
|
1175 |
+
# The "samples" permutation strategy is the same as the "pairings"
|
1176 |
+
# strategy except the roles of samples and observations are flipped.
|
1177 |
+
# So swap these axes, then we'll use the function for the "pairings"
|
1178 |
+
# strategy to do all the work!
|
1179 |
+
data = np.swapaxes(data, 0, -1)
|
1180 |
+
|
1181 |
+
# (Of course, the user's statistic doesn't know what we've done here,
|
1182 |
+
# so we need to pass it what it's expecting.)
|
1183 |
+
def statistic_wrapped(*data, axis):
|
1184 |
+
data = np.swapaxes(data, 0, -1)
|
1185 |
+
if n_samples == 1:
|
1186 |
+
data = data[0:1]
|
1187 |
+
return statistic(*data, axis=axis)
|
1188 |
+
|
1189 |
+
return _calculate_null_pairings(data, statistic_wrapped, n_permutations,
|
1190 |
+
batch, random_state)
|
1191 |
+
|
1192 |
+
|
1193 |
+
def _permutation_test_iv(data, statistic, permutation_type, vectorized,
|
1194 |
+
n_resamples, batch, alternative, axis, random_state):
|
1195 |
+
"""Input validation for `permutation_test`."""
|
1196 |
+
|
1197 |
+
axis_int = int(axis)
|
1198 |
+
if axis != axis_int:
|
1199 |
+
raise ValueError("`axis` must be an integer.")
|
1200 |
+
|
1201 |
+
permutation_types = {'samples', 'pairings', 'independent'}
|
1202 |
+
permutation_type = permutation_type.lower()
|
1203 |
+
if permutation_type not in permutation_types:
|
1204 |
+
raise ValueError(f"`permutation_type` must be in {permutation_types}.")
|
1205 |
+
|
1206 |
+
if vectorized not in {True, False, None}:
|
1207 |
+
raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
|
1208 |
+
|
1209 |
+
if vectorized is None:
|
1210 |
+
vectorized = 'axis' in inspect.signature(statistic).parameters
|
1211 |
+
|
1212 |
+
if not vectorized:
|
1213 |
+
statistic = _vectorize_statistic(statistic)
|
1214 |
+
|
1215 |
+
message = "`data` must be a tuple containing at least two samples"
|
1216 |
+
try:
|
1217 |
+
if len(data) < 2 and permutation_type == 'independent':
|
1218 |
+
raise ValueError(message)
|
1219 |
+
except TypeError:
|
1220 |
+
raise TypeError(message)
|
1221 |
+
|
1222 |
+
data = _broadcast_arrays(data, axis)
|
1223 |
+
data_iv = []
|
1224 |
+
for sample in data:
|
1225 |
+
sample = np.atleast_1d(sample)
|
1226 |
+
if sample.shape[axis] <= 1:
|
1227 |
+
raise ValueError("each sample in `data` must contain two or more "
|
1228 |
+
"observations along `axis`.")
|
1229 |
+
sample = np.moveaxis(sample, axis_int, -1)
|
1230 |
+
data_iv.append(sample)
|
1231 |
+
|
1232 |
+
n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples)
|
1233 |
+
else np.inf)
|
1234 |
+
if n_resamples != n_resamples_int or n_resamples_int <= 0:
|
1235 |
+
raise ValueError("`n_resamples` must be a positive integer.")
|
1236 |
+
|
1237 |
+
if batch is None:
|
1238 |
+
batch_iv = batch
|
1239 |
+
else:
|
1240 |
+
batch_iv = int(batch)
|
1241 |
+
if batch != batch_iv or batch_iv <= 0:
|
1242 |
+
raise ValueError("`batch` must be a positive integer or None.")
|
1243 |
+
|
1244 |
+
alternatives = {'two-sided', 'greater', 'less'}
|
1245 |
+
alternative = alternative.lower()
|
1246 |
+
if alternative not in alternatives:
|
1247 |
+
raise ValueError(f"`alternative` must be in {alternatives}")
|
1248 |
+
|
1249 |
+
random_state = check_random_state(random_state)
|
1250 |
+
|
1251 |
+
return (data_iv, statistic, permutation_type, vectorized, n_resamples_int,
|
1252 |
+
batch_iv, alternative, axis_int, random_state)
|
1253 |
+
|
1254 |
+
|
1255 |
+
def permutation_test(data, statistic, *, permutation_type='independent',
|
1256 |
+
vectorized=None, n_resamples=9999, batch=None,
|
1257 |
+
alternative="two-sided", axis=0, random_state=None):
|
1258 |
+
r"""
|
1259 |
+
Performs a permutation test of a given statistic on provided data.
|
1260 |
+
|
1261 |
+
For independent sample statistics, the null hypothesis is that the data are
|
1262 |
+
randomly sampled from the same distribution.
|
1263 |
+
For paired sample statistics, two null hypothesis can be tested:
|
1264 |
+
that the data are paired at random or that the data are assigned to samples
|
1265 |
+
at random.
|
1266 |
+
|
1267 |
+
Parameters
|
1268 |
+
----------
|
1269 |
+
data : iterable of array-like
|
1270 |
+
Contains the samples, each of which is an array of observations.
|
1271 |
+
Dimensions of sample arrays must be compatible for broadcasting except
|
1272 |
+
along `axis`.
|
1273 |
+
statistic : callable
|
1274 |
+
Statistic for which the p-value of the hypothesis test is to be
|
1275 |
+
calculated. `statistic` must be a callable that accepts samples
|
1276 |
+
as separate arguments (e.g. ``statistic(*data)``) and returns the
|
1277 |
+
resulting statistic.
|
1278 |
+
If `vectorized` is set ``True``, `statistic` must also accept a keyword
|
1279 |
+
argument `axis` and be vectorized to compute the statistic along the
|
1280 |
+
provided `axis` of the sample arrays.
|
1281 |
+
permutation_type : {'independent', 'samples', 'pairings'}, optional
|
1282 |
+
The type of permutations to be performed, in accordance with the
|
1283 |
+
null hypothesis. The first two permutation types are for paired sample
|
1284 |
+
statistics, in which all samples contain the same number of
|
1285 |
+
observations and observations with corresponding indices along `axis`
|
1286 |
+
are considered to be paired; the third is for independent sample
|
1287 |
+
statistics.
|
1288 |
+
|
1289 |
+
- ``'samples'`` : observations are assigned to different samples
|
1290 |
+
but remain paired with the same observations from other samples.
|
1291 |
+
This permutation type is appropriate for paired sample hypothesis
|
1292 |
+
tests such as the Wilcoxon signed-rank test and the paired t-test.
|
1293 |
+
- ``'pairings'`` : observations are paired with different observations,
|
1294 |
+
but they remain within the same sample. This permutation type is
|
1295 |
+
appropriate for association/correlation tests with statistics such
|
1296 |
+
as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's
|
1297 |
+
:math:`r`.
|
1298 |
+
- ``'independent'`` (default) : observations are assigned to different
|
1299 |
+
samples. Samples may contain different numbers of observations. This
|
1300 |
+
permutation type is appropriate for independent sample hypothesis
|
1301 |
+
tests such as the Mann-Whitney :math:`U` test and the independent
|
1302 |
+
sample t-test.
|
1303 |
+
|
1304 |
+
Please see the Notes section below for more detailed descriptions
|
1305 |
+
of the permutation types.
|
1306 |
+
|
1307 |
+
vectorized : bool, optional
|
1308 |
+
If `vectorized` is set ``False``, `statistic` will not be passed
|
1309 |
+
keyword argument `axis` and is expected to calculate the statistic
|
1310 |
+
only for 1D samples. If ``True``, `statistic` will be passed keyword
|
1311 |
+
argument `axis` and is expected to calculate the statistic along `axis`
|
1312 |
+
when passed an ND sample array. If ``None`` (default), `vectorized`
|
1313 |
+
will be set ``True`` if ``axis`` is a parameter of `statistic`. Use
|
1314 |
+
of a vectorized statistic typically reduces computation time.
|
1315 |
+
n_resamples : int or np.inf, default: 9999
|
1316 |
+
Number of random permutations (resamples) used to approximate the null
|
1317 |
+
distribution. If greater than or equal to the number of distinct
|
1318 |
+
permutations, the exact null distribution will be computed.
|
1319 |
+
Note that the number of distinct permutations grows very rapidly with
|
1320 |
+
the sizes of samples, so exact tests are feasible only for very small
|
1321 |
+
data sets.
|
1322 |
+
batch : int, optional
|
1323 |
+
The number of permutations to process in each call to `statistic`.
|
1324 |
+
Memory usage is O( `batch` * ``n`` ), where ``n`` is the total size
|
1325 |
+
of all samples, regardless of the value of `vectorized`. Default is
|
1326 |
+
``None``, in which case ``batch`` is the number of permutations.
|
1327 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
1328 |
+
The alternative hypothesis for which the p-value is calculated.
|
1329 |
+
For each alternative, the p-value is defined for exact tests as
|
1330 |
+
follows.
|
1331 |
+
|
1332 |
+
- ``'greater'`` : the percentage of the null distribution that is
|
1333 |
+
greater than or equal to the observed value of the test statistic.
|
1334 |
+
- ``'less'`` : the percentage of the null distribution that is
|
1335 |
+
less than or equal to the observed value of the test statistic.
|
1336 |
+
- ``'two-sided'`` (default) : twice the smaller of the p-values above.
|
1337 |
+
|
1338 |
+
Note that p-values for randomized tests are calculated according to the
|
1339 |
+
conservative (over-estimated) approximation suggested in [2]_ and [3]_
|
1340 |
+
rather than the unbiased estimator suggested in [4]_. That is, when
|
1341 |
+
calculating the proportion of the randomized null distribution that is
|
1342 |
+
as extreme as the observed value of the test statistic, the values in
|
1343 |
+
the numerator and denominator are both increased by one. An
|
1344 |
+
interpretation of this adjustment is that the observed value of the
|
1345 |
+
test statistic is always included as an element of the randomized
|
1346 |
+
null distribution.
|
1347 |
+
The convention used for two-sided p-values is not universal;
|
1348 |
+
the observed test statistic and null distribution are returned in
|
1349 |
+
case a different definition is preferred.
|
1350 |
+
|
1351 |
+
axis : int, default: 0
|
1352 |
+
The axis of the (broadcasted) samples over which to calculate the
|
1353 |
+
statistic. If samples have a different number of dimensions,
|
1354 |
+
singleton dimensions are prepended to samples with fewer dimensions
|
1355 |
+
before `axis` is considered.
|
1356 |
+
random_state : {None, int, `numpy.random.Generator`,
|
1357 |
+
`numpy.random.RandomState`}, optional
|
1358 |
+
|
1359 |
+
Pseudorandom number generator state used to generate permutations.
|
1360 |
+
|
1361 |
+
If `random_state` is ``None`` (default), the
|
1362 |
+
`numpy.random.RandomState` singleton is used.
|
1363 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
1364 |
+
seeded with `random_state`.
|
1365 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
1366 |
+
instance then that instance is used.
|
1367 |
+
|
1368 |
+
Returns
|
1369 |
+
-------
|
1370 |
+
res : PermutationTestResult
|
1371 |
+
An object with attributes:
|
1372 |
+
|
1373 |
+
statistic : float or ndarray
|
1374 |
+
The observed test statistic of the data.
|
1375 |
+
pvalue : float or ndarray
|
1376 |
+
The p-value for the given alternative.
|
1377 |
+
null_distribution : ndarray
|
1378 |
+
The values of the test statistic generated under the null
|
1379 |
+
hypothesis.
|
1380 |
+
|
1381 |
+
Notes
|
1382 |
+
-----
|
1383 |
+
|
1384 |
+
The three types of permutation tests supported by this function are
|
1385 |
+
described below.
|
1386 |
+
|
1387 |
+
**Unpaired statistics** (``permutation_type='independent'``):
|
1388 |
+
|
1389 |
+
The null hypothesis associated with this permutation type is that all
|
1390 |
+
observations are sampled from the same underlying distribution and that
|
1391 |
+
they have been assigned to one of the samples at random.
|
1392 |
+
|
1393 |
+
Suppose ``data`` contains two samples; e.g. ``a, b = data``.
|
1394 |
+
When ``1 < n_resamples < binom(n, k)``, where
|
1395 |
+
|
1396 |
+
* ``k`` is the number of observations in ``a``,
|
1397 |
+
* ``n`` is the total number of observations in ``a`` and ``b``, and
|
1398 |
+
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
|
1399 |
+
|
1400 |
+
the data are pooled (concatenated), randomly assigned to either the first
|
1401 |
+
or second sample, and the statistic is calculated. This process is
|
1402 |
+
performed repeatedly, `permutation` times, generating a distribution of the
|
1403 |
+
statistic under the null hypothesis. The statistic of the original
|
1404 |
+
data is compared to this distribution to determine the p-value.
|
1405 |
+
|
1406 |
+
When ``n_resamples >= binom(n, k)``, an exact test is performed: the data
|
1407 |
+
are *partitioned* between the samples in each distinct way exactly once,
|
1408 |
+
and the exact null distribution is formed.
|
1409 |
+
Note that for a given partitioning of the data between the samples,
|
1410 |
+
only one ordering/permutation of the data *within* each sample is
|
1411 |
+
considered. For statistics that do not depend on the order of the data
|
1412 |
+
within samples, this dramatically reduces computational cost without
|
1413 |
+
affecting the shape of the null distribution (because the frequency/count
|
1414 |
+
of each value is affected by the same factor).
|
1415 |
+
|
1416 |
+
For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this
|
1417 |
+
permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``.
|
1418 |
+
Because only one ordering/permutation of the data *within* each sample
|
1419 |
+
is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]``
|
1420 |
+
and ``y = [a4, a3, b1]`` would *not* be considered distinct from the
|
1421 |
+
example above.
|
1422 |
+
|
1423 |
+
``permutation_type='independent'`` does not support one-sample statistics,
|
1424 |
+
but it can be applied to statistics with more than two samples. In this
|
1425 |
+
case, if ``n`` is an array of the number of observations within each
|
1426 |
+
sample, the number of distinct partitions is::
|
1427 |
+
|
1428 |
+
np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)])
|
1429 |
+
|
1430 |
+
**Paired statistics, permute pairings** (``permutation_type='pairings'``):
|
1431 |
+
|
1432 |
+
The null hypothesis associated with this permutation type is that
|
1433 |
+
observations within each sample are drawn from the same underlying
|
1434 |
+
distribution and that pairings with elements of other samples are
|
1435 |
+
assigned at random.
|
1436 |
+
|
1437 |
+
Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we
|
1438 |
+
wish to consider all possible pairings of elements of ``a`` with elements
|
1439 |
+
of a second sample, ``b``. Let ``n`` be the number of observations in
|
1440 |
+
``a``, which must also equal the number of observations in ``b``.
|
1441 |
+
|
1442 |
+
When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are
|
1443 |
+
randomly permuted. The user-supplied statistic accepts one data argument,
|
1444 |
+
say ``a_perm``, and calculates the statistic considering ``a_perm`` and
|
1445 |
+
``b``. This process is performed repeatedly, `permutation` times,
|
1446 |
+
generating a distribution of the statistic under the null hypothesis.
|
1447 |
+
The statistic of the original data is compared to this distribution to
|
1448 |
+
determine the p-value.
|
1449 |
+
|
1450 |
+
When ``n_resamples >= factorial(n)``, an exact test is performed:
|
1451 |
+
``a`` is permuted in each distinct way exactly once. Therefore, the
|
1452 |
+
`statistic` is computed for each unique pairing of samples between ``a``
|
1453 |
+
and ``b`` exactly once.
|
1454 |
+
|
1455 |
+
For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
|
1456 |
+
permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left
|
1457 |
+
in its original order.
|
1458 |
+
|
1459 |
+
``permutation_type='pairings'`` supports ``data`` containing any number
|
1460 |
+
of samples, each of which must contain the same number of observations.
|
1461 |
+
All samples provided in ``data`` are permuted *independently*. Therefore,
|
1462 |
+
if ``m`` is the number of samples and ``n`` is the number of observations
|
1463 |
+
within each sample, then the number of permutations in an exact test is::
|
1464 |
+
|
1465 |
+
factorial(n)**m
|
1466 |
+
|
1467 |
+
Note that if a two-sample statistic, for example, does not inherently
|
1468 |
+
depend on the order in which observations are provided - only on the
|
1469 |
+
*pairings* of observations - then only one of the two samples should be
|
1470 |
+
provided in ``data``. This dramatically reduces computational cost without
|
1471 |
+
affecting the shape of the null distribution (because the frequency/count
|
1472 |
+
of each value is affected by the same factor).
|
1473 |
+
|
1474 |
+
**Paired statistics, permute samples** (``permutation_type='samples'``):
|
1475 |
+
|
1476 |
+
The null hypothesis associated with this permutation type is that
|
1477 |
+
observations within each pair are drawn from the same underlying
|
1478 |
+
distribution and that the sample to which they are assigned is random.
|
1479 |
+
|
1480 |
+
Suppose ``data`` contains two samples; e.g. ``a, b = data``.
|
1481 |
+
Let ``n`` be the number of observations in ``a``, which must also equal
|
1482 |
+
the number of observations in ``b``.
|
1483 |
+
|
1484 |
+
When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are
|
1485 |
+
randomly swapped between samples (maintaining their pairings) and the
|
1486 |
+
statistic is calculated. This process is performed repeatedly,
|
1487 |
+
`permutation` times, generating a distribution of the statistic under the
|
1488 |
+
null hypothesis. The statistic of the original data is compared to this
|
1489 |
+
distribution to determine the p-value.
|
1490 |
+
|
1491 |
+
When ``n_resamples >= 2**n``, an exact test is performed: the observations
|
1492 |
+
are assigned to the two samples in each distinct way (while maintaining
|
1493 |
+
pairings) exactly once.
|
1494 |
+
|
1495 |
+
For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
|
1496 |
+
permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``.
|
1497 |
+
|
1498 |
+
``permutation_type='samples'`` supports ``data`` containing any number
|
1499 |
+
of samples, each of which must contain the same number of observations.
|
1500 |
+
If ``data`` contains more than one sample, paired observations within
|
1501 |
+
``data`` are exchanged between samples *independently*. Therefore, if ``m``
|
1502 |
+
is the number of samples and ``n`` is the number of observations within
|
1503 |
+
each sample, then the number of permutations in an exact test is::
|
1504 |
+
|
1505 |
+
factorial(m)**n
|
1506 |
+
|
1507 |
+
Several paired-sample statistical tests, such as the Wilcoxon signed rank
|
1508 |
+
test and paired-sample t-test, can be performed considering only the
|
1509 |
+
*difference* between two paired elements. Accordingly, if ``data`` contains
|
1510 |
+
only one sample, then the null distribution is formed by independently
|
1511 |
+
changing the *sign* of each observation.
|
1512 |
+
|
1513 |
+
.. warning::
|
1514 |
+
The p-value is calculated by counting the elements of the null
|
1515 |
+
distribution that are as extreme or more extreme than the observed
|
1516 |
+
value of the statistic. Due to the use of finite precision arithmetic,
|
1517 |
+
some statistic functions return numerically distinct values when the
|
1518 |
+
theoretical values would be exactly equal. In some cases, this could
|
1519 |
+
lead to a large error in the calculated p-value. `permutation_test`
|
1520 |
+
guards against this by considering elements in the null distribution
|
1521 |
+
that are "close" (within a relative tolerance of 100 times the
|
1522 |
+
floating point epsilon of inexact dtypes) to the observed
|
1523 |
+
value of the test statistic as equal to the observed value of the
|
1524 |
+
test statistic. However, the user is advised to inspect the null
|
1525 |
+
distribution to assess whether this method of comparison is
|
1526 |
+
appropriate, and if not, calculate the p-value manually. See example
|
1527 |
+
below.
|
1528 |
+
|
1529 |
+
References
|
1530 |
+
----------
|
1531 |
+
|
1532 |
+
.. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951).
|
1533 |
+
.. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
|
1534 |
+
Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
|
1535 |
+
Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
|
1536 |
+
.. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference".
|
1537 |
+
Statistical Science (2004).
|
1538 |
+
.. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap
|
1539 |
+
(1993).
|
1540 |
+
|
1541 |
+
Examples
|
1542 |
+
--------
|
1543 |
+
|
1544 |
+
Suppose we wish to test whether two samples are drawn from the same
|
1545 |
+
distribution. Assume that the underlying distributions are unknown to us,
|
1546 |
+
and that before observing the data, we hypothesized that the mean of the
|
1547 |
+
first sample would be less than that of the second sample. We decide that
|
1548 |
+
we will use the difference between the sample means as a test statistic,
|
1549 |
+
and we will consider a p-value of 0.05 to be statistically significant.
|
1550 |
+
|
1551 |
+
For efficiency, we write the function defining the test statistic in a
|
1552 |
+
vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the
|
1553 |
+
statistic will be calculated for each axis-slice along `axis`.
|
1554 |
+
|
1555 |
+
>>> import numpy as np
|
1556 |
+
>>> def statistic(x, y, axis):
|
1557 |
+
... return np.mean(x, axis=axis) - np.mean(y, axis=axis)
|
1558 |
+
|
1559 |
+
After collecting our data, we calculate the observed value of the test
|
1560 |
+
statistic.
|
1561 |
+
|
1562 |
+
>>> from scipy.stats import norm
|
1563 |
+
>>> rng = np.random.default_rng()
|
1564 |
+
>>> x = norm.rvs(size=5, random_state=rng)
|
1565 |
+
>>> y = norm.rvs(size=6, loc = 3, random_state=rng)
|
1566 |
+
>>> statistic(x, y, 0)
|
1567 |
+
-3.5411688580987266
|
1568 |
+
|
1569 |
+
Indeed, the test statistic is negative, suggesting that the true mean of
|
1570 |
+
the distribution underlying ``x`` is less than that of the distribution
|
1571 |
+
underlying ``y``. To determine the probability of this occurring by chance
|
1572 |
+
if the two samples were drawn from the same distribution, we perform
|
1573 |
+
a permutation test.
|
1574 |
+
|
1575 |
+
>>> from scipy.stats import permutation_test
|
1576 |
+
>>> # because our statistic is vectorized, we pass `vectorized=True`
|
1577 |
+
>>> # `n_resamples=np.inf` indicates that an exact test is to be performed
|
1578 |
+
>>> res = permutation_test((x, y), statistic, vectorized=True,
|
1579 |
+
... n_resamples=np.inf, alternative='less')
|
1580 |
+
>>> print(res.statistic)
|
1581 |
+
-3.5411688580987266
|
1582 |
+
>>> print(res.pvalue)
|
1583 |
+
0.004329004329004329
|
1584 |
+
|
1585 |
+
The probability of obtaining a test statistic less than or equal to the
|
1586 |
+
observed value under the null hypothesis is 0.4329%. This is less than our
|
1587 |
+
chosen threshold of 5%, so we consider this to be significant evidence
|
1588 |
+
against the null hypothesis in favor of the alternative.
|
1589 |
+
|
1590 |
+
Because the size of the samples above was small, `permutation_test` could
|
1591 |
+
perform an exact test. For larger samples, we resort to a randomized
|
1592 |
+
permutation test.
|
1593 |
+
|
1594 |
+
>>> x = norm.rvs(size=100, random_state=rng)
|
1595 |
+
>>> y = norm.rvs(size=120, loc=0.3, random_state=rng)
|
1596 |
+
>>> res = permutation_test((x, y), statistic, n_resamples=100000,
|
1597 |
+
... vectorized=True, alternative='less',
|
1598 |
+
... random_state=rng)
|
1599 |
+
>>> print(res.statistic)
|
1600 |
+
-0.5230459671240913
|
1601 |
+
>>> print(res.pvalue)
|
1602 |
+
0.00016999830001699983
|
1603 |
+
|
1604 |
+
The approximate probability of obtaining a test statistic less than or
|
1605 |
+
equal to the observed value under the null hypothesis is 0.0225%. This is
|
1606 |
+
again less than our chosen threshold of 5%, so again we have significant
|
1607 |
+
evidence to reject the null hypothesis in favor of the alternative.
|
1608 |
+
|
1609 |
+
For large samples and number of permutations, the result is comparable to
|
1610 |
+
that of the corresponding asymptotic test, the independent sample t-test.
|
1611 |
+
|
1612 |
+
>>> from scipy.stats import ttest_ind
|
1613 |
+
>>> res_asymptotic = ttest_ind(x, y, alternative='less')
|
1614 |
+
>>> print(res_asymptotic.pvalue)
|
1615 |
+
0.00012688101537979522
|
1616 |
+
|
1617 |
+
The permutation distribution of the test statistic is provided for
|
1618 |
+
further investigation.
|
1619 |
+
|
1620 |
+
>>> import matplotlib.pyplot as plt
|
1621 |
+
>>> plt.hist(res.null_distribution, bins=50)
|
1622 |
+
>>> plt.title("Permutation distribution of test statistic")
|
1623 |
+
>>> plt.xlabel("Value of Statistic")
|
1624 |
+
>>> plt.ylabel("Frequency")
|
1625 |
+
>>> plt.show()
|
1626 |
+
|
1627 |
+
Inspection of the null distribution is essential if the statistic suffers
|
1628 |
+
from inaccuracy due to limited machine precision. Consider the following
|
1629 |
+
case:
|
1630 |
+
|
1631 |
+
>>> from scipy.stats import pearsonr
|
1632 |
+
>>> x = [1, 2, 4, 3]
|
1633 |
+
>>> y = [2, 4, 6, 8]
|
1634 |
+
>>> def statistic(x, y):
|
1635 |
+
... return pearsonr(x, y).statistic
|
1636 |
+
>>> res = permutation_test((x, y), statistic, vectorized=False,
|
1637 |
+
... permutation_type='pairings',
|
1638 |
+
... alternative='greater')
|
1639 |
+
>>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
|
1640 |
+
|
1641 |
+
In this case, some elements of the null distribution differ from the
|
1642 |
+
observed value of the correlation coefficient ``r`` due to numerical noise.
|
1643 |
+
We manually inspect the elements of the null distribution that are nearly
|
1644 |
+
the same as the observed value of the test statistic.
|
1645 |
+
|
1646 |
+
>>> r
|
1647 |
+
0.8
|
1648 |
+
>>> unique = np.unique(null)
|
1649 |
+
>>> unique
|
1650 |
+
array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4,
|
1651 |
+
0.6, 0.8, 0.8, 1. ]) # may vary
|
1652 |
+
>>> unique[np.isclose(r, unique)].tolist()
|
1653 |
+
[0.7999999999999999, 0.8]
|
1654 |
+
|
1655 |
+
If `permutation_test` were to perform the comparison naively, the
|
1656 |
+
elements of the null distribution with value ``0.7999999999999999`` would
|
1657 |
+
not be considered as extreme or more extreme as the observed value of the
|
1658 |
+
statistic, so the calculated p-value would be too small.
|
1659 |
+
|
1660 |
+
>>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null)
|
1661 |
+
>>> incorrect_pvalue
|
1662 |
+
0.1111111111111111 # may vary
|
1663 |
+
|
1664 |
+
Instead, `permutation_test` treats elements of the null distribution that
|
1665 |
+
are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the
|
1666 |
+
statistic ``r`` to be equal to ``r``.
|
1667 |
+
|
1668 |
+
>>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null)
|
1669 |
+
>>> correct_pvalue
|
1670 |
+
0.16666666666666666
|
1671 |
+
>>> res.pvalue == correct_pvalue
|
1672 |
+
True
|
1673 |
+
|
1674 |
+
This method of comparison is expected to be accurate in most practical
|
1675 |
+
situations, but the user is advised to assess this by inspecting the
|
1676 |
+
elements of the null distribution that are close to the observed value
|
1677 |
+
of the statistic. Also, consider the use of statistics that can be
|
1678 |
+
calculated using exact arithmetic (e.g. integer statistics).
|
1679 |
+
|
1680 |
+
"""
|
1681 |
+
args = _permutation_test_iv(data, statistic, permutation_type, vectorized,
|
1682 |
+
n_resamples, batch, alternative, axis,
|
1683 |
+
random_state)
|
1684 |
+
(data, statistic, permutation_type, vectorized, n_resamples, batch,
|
1685 |
+
alternative, axis, random_state) = args
|
1686 |
+
|
1687 |
+
observed = statistic(*data, axis=-1)
|
1688 |
+
|
1689 |
+
null_calculators = {"pairings": _calculate_null_pairings,
|
1690 |
+
"samples": _calculate_null_samples,
|
1691 |
+
"independent": _calculate_null_both}
|
1692 |
+
null_calculator_args = (data, statistic, n_resamples,
|
1693 |
+
batch, random_state)
|
1694 |
+
calculate_null = null_calculators[permutation_type]
|
1695 |
+
null_distribution, n_resamples, exact_test = (
|
1696 |
+
calculate_null(*null_calculator_args))
|
1697 |
+
|
1698 |
+
# See References [2] and [3]
|
1699 |
+
adjustment = 0 if exact_test else 1
|
1700 |
+
|
1701 |
+
# relative tolerance for detecting numerically distinct but
|
1702 |
+
# theoretically equal values in the null distribution
|
1703 |
+
eps = (0 if not np.issubdtype(observed.dtype, np.inexact)
|
1704 |
+
else np.finfo(observed.dtype).eps*100)
|
1705 |
+
gamma = np.abs(eps * observed)
|
1706 |
+
|
1707 |
+
def less(null_distribution, observed):
|
1708 |
+
cmps = null_distribution <= observed + gamma
|
1709 |
+
pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment)
|
1710 |
+
return pvalues
|
1711 |
+
|
1712 |
+
def greater(null_distribution, observed):
|
1713 |
+
cmps = null_distribution >= observed - gamma
|
1714 |
+
pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment)
|
1715 |
+
return pvalues
|
1716 |
+
|
1717 |
+
def two_sided(null_distribution, observed):
|
1718 |
+
pvalues_less = less(null_distribution, observed)
|
1719 |
+
pvalues_greater = greater(null_distribution, observed)
|
1720 |
+
pvalues = np.minimum(pvalues_less, pvalues_greater) * 2
|
1721 |
+
return pvalues
|
1722 |
+
|
1723 |
+
compare = {"less": less,
|
1724 |
+
"greater": greater,
|
1725 |
+
"two-sided": two_sided}
|
1726 |
+
|
1727 |
+
pvalues = compare[alternative](null_distribution, observed)
|
1728 |
+
pvalues = np.clip(pvalues, 0, 1)
|
1729 |
+
|
1730 |
+
return PermutationTestResult(observed, pvalues, null_distribution)
|
1731 |
+
|
1732 |
+
|
1733 |
+
@dataclass
|
1734 |
+
class ResamplingMethod:
|
1735 |
+
"""Configuration information for a statistical resampling method.
|
1736 |
+
|
1737 |
+
Instances of this class can be passed into the `method` parameter of some
|
1738 |
+
hypothesis test functions to perform a resampling or Monte Carlo version
|
1739 |
+
of the hypothesis test.
|
1740 |
+
|
1741 |
+
Attributes
|
1742 |
+
----------
|
1743 |
+
n_resamples : int
|
1744 |
+
The number of resamples to perform or Monte Carlo samples to draw.
|
1745 |
+
batch : int, optional
|
1746 |
+
The number of resamples to process in each vectorized call to
|
1747 |
+
the statistic. Batch sizes >>1 tend to be faster when the statistic
|
1748 |
+
is vectorized, but memory usage scales linearly with the batch size.
|
1749 |
+
Default is ``None``, which processes all resamples in a single batch.
|
1750 |
+
"""
|
1751 |
+
n_resamples: int = 9999
|
1752 |
+
batch: int = None # type: ignore[assignment]
|
1753 |
+
|
1754 |
+
|
1755 |
+
@dataclass
|
1756 |
+
class MonteCarloMethod(ResamplingMethod):
|
1757 |
+
"""Configuration information for a Monte Carlo hypothesis test.
|
1758 |
+
|
1759 |
+
Instances of this class can be passed into the `method` parameter of some
|
1760 |
+
hypothesis test functions to perform a Monte Carlo version of the
|
1761 |
+
hypothesis tests.
|
1762 |
+
|
1763 |
+
Attributes
|
1764 |
+
----------
|
1765 |
+
n_resamples : int, optional
|
1766 |
+
The number of Monte Carlo samples to draw. Default is 9999.
|
1767 |
+
batch : int, optional
|
1768 |
+
The number of Monte Carlo samples to process in each vectorized call to
|
1769 |
+
the statistic. Batch sizes >>1 tend to be faster when the statistic
|
1770 |
+
is vectorized, but memory usage scales linearly with the batch size.
|
1771 |
+
Default is ``None``, which processes all samples in a single batch.
|
1772 |
+
rvs : callable or tuple of callables, optional
|
1773 |
+
A callable or sequence of callables that generates random variates
|
1774 |
+
under the null hypothesis. Each element of `rvs` must be a callable
|
1775 |
+
that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and
|
1776 |
+
returns an N-d array sample of that shape. If `rvs` is a sequence, the
|
1777 |
+
number of callables in `rvs` must match the number of samples passed
|
1778 |
+
to the hypothesis test in which the `MonteCarloMethod` is used. Default
|
1779 |
+
is ``None``, in which case the hypothesis test function chooses values
|
1780 |
+
to match the standard version of the hypothesis test. For example,
|
1781 |
+
the null hypothesis of `scipy.stats.pearsonr` is typically that the
|
1782 |
+
samples are drawn from the standard normal distribution, so
|
1783 |
+
``rvs = (rng.normal, rng.normal)`` where
|
1784 |
+
``rng = np.random.default_rng()``.
|
1785 |
+
"""
|
1786 |
+
rvs: object = None
|
1787 |
+
|
1788 |
+
def _asdict(self):
|
1789 |
+
# `dataclasses.asdict` deepcopies; we don't want that.
|
1790 |
+
return dict(n_resamples=self.n_resamples, batch=self.batch,
|
1791 |
+
rvs=self.rvs)
|
1792 |
+
|
1793 |
+
|
1794 |
+
@dataclass
|
1795 |
+
class PermutationMethod(ResamplingMethod):
|
1796 |
+
"""Configuration information for a permutation hypothesis test.
|
1797 |
+
|
1798 |
+
Instances of this class can be passed into the `method` parameter of some
|
1799 |
+
hypothesis test functions to perform a permutation version of the
|
1800 |
+
hypothesis tests.
|
1801 |
+
|
1802 |
+
Attributes
|
1803 |
+
----------
|
1804 |
+
n_resamples : int, optional
|
1805 |
+
The number of resamples to perform. Default is 9999.
|
1806 |
+
batch : int, optional
|
1807 |
+
The number of resamples to process in each vectorized call to
|
1808 |
+
the statistic. Batch sizes >>1 tend to be faster when the statistic
|
1809 |
+
is vectorized, but memory usage scales linearly with the batch size.
|
1810 |
+
Default is ``None``, which processes all resamples in a single batch.
|
1811 |
+
random_state : {None, int, `numpy.random.Generator`,
|
1812 |
+
`numpy.random.RandomState`}, optional
|
1813 |
+
|
1814 |
+
Pseudorandom number generator state used to generate resamples.
|
1815 |
+
|
1816 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
1817 |
+
instance, then that instance is used.
|
1818 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
1819 |
+
seeded with `random_state`.
|
1820 |
+
If `random_state` is ``None`` (default), the
|
1821 |
+
`numpy.random.RandomState` singleton is used.
|
1822 |
+
"""
|
1823 |
+
random_state: object = None
|
1824 |
+
|
1825 |
+
def _asdict(self):
|
1826 |
+
# `dataclasses.asdict` deepcopies; we don't want that.
|
1827 |
+
return dict(n_resamples=self.n_resamples, batch=self.batch,
|
1828 |
+
random_state=self.random_state)
|
1829 |
+
|
1830 |
+
|
1831 |
+
@dataclass
|
1832 |
+
class BootstrapMethod(ResamplingMethod):
|
1833 |
+
"""Configuration information for a bootstrap confidence interval.
|
1834 |
+
|
1835 |
+
Instances of this class can be passed into the `method` parameter of some
|
1836 |
+
confidence interval methods to generate a bootstrap confidence interval.
|
1837 |
+
|
1838 |
+
Attributes
|
1839 |
+
----------
|
1840 |
+
n_resamples : int, optional
|
1841 |
+
The number of resamples to perform. Default is 9999.
|
1842 |
+
batch : int, optional
|
1843 |
+
The number of resamples to process in each vectorized call to
|
1844 |
+
the statistic. Batch sizes >>1 tend to be faster when the statistic
|
1845 |
+
is vectorized, but memory usage scales linearly with the batch size.
|
1846 |
+
Default is ``None``, which processes all resamples in a single batch.
|
1847 |
+
random_state : {None, int, `numpy.random.Generator`,
|
1848 |
+
`numpy.random.RandomState`}, optional
|
1849 |
+
|
1850 |
+
Pseudorandom number generator state used to generate resamples.
|
1851 |
+
|
1852 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
1853 |
+
instance, then that instance is used.
|
1854 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
1855 |
+
seeded with `random_state`.
|
1856 |
+
If `random_state` is ``None`` (default), the
|
1857 |
+
`numpy.random.RandomState` singleton is used.
|
1858 |
+
|
1859 |
+
method : {'bca', 'percentile', 'basic'}
|
1860 |
+
Whether to use the 'percentile' bootstrap ('percentile'), the 'basic'
|
1861 |
+
(AKA 'reverse') bootstrap ('basic'), or the bias-corrected and
|
1862 |
+
accelerated bootstrap ('BCa', default).
|
1863 |
+
"""
|
1864 |
+
random_state: object = None
|
1865 |
+
method: str = 'BCa'
|
1866 |
+
|
1867 |
+
def _asdict(self):
|
1868 |
+
# `dataclasses.asdict` deepcopies; we don't want that.
|
1869 |
+
return dict(n_resamples=self.n_resamples, batch=self.batch,
|
1870 |
+
random_state=self.random_state, method=self.method)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_result_classes.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This module exists only to allow Sphinx to generate docs
|
2 |
+
# for the result objects returned by some functions in stats
|
3 |
+
# _without_ adding them to the main stats documentation page.
|
4 |
+
|
5 |
+
"""
|
6 |
+
Result classes
|
7 |
+
--------------
|
8 |
+
|
9 |
+
.. currentmodule:: scipy.stats._result_classes
|
10 |
+
|
11 |
+
.. autosummary::
|
12 |
+
:toctree: generated/
|
13 |
+
|
14 |
+
RelativeRiskResult
|
15 |
+
BinomTestResult
|
16 |
+
TukeyHSDResult
|
17 |
+
DunnettResult
|
18 |
+
PearsonRResult
|
19 |
+
FitResult
|
20 |
+
OddsRatioResult
|
21 |
+
TtestResult
|
22 |
+
ECDFResult
|
23 |
+
EmpiricalDistributionFunction
|
24 |
+
|
25 |
+
"""
|
26 |
+
|
27 |
+
__all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult',
|
28 |
+
'PearsonRResult', 'FitResult', 'OddsRatioResult',
|
29 |
+
'TtestResult', 'DunnettResult', 'ECDFResult',
|
30 |
+
'EmpiricalDistributionFunction']
|
31 |
+
|
32 |
+
|
33 |
+
from ._binomtest import BinomTestResult
|
34 |
+
from ._odds_ratio import OddsRatioResult
|
35 |
+
from ._relative_risk import RelativeRiskResult
|
36 |
+
from ._hypotests import TukeyHSDResult
|
37 |
+
from ._multicomp import DunnettResult
|
38 |
+
from ._stats_py import PearsonRResult, TtestResult
|
39 |
+
from ._fit import FitResult
|
40 |
+
from ._survival import ECDFResult, EmpiricalDistributionFunction
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_sampling.py
ADDED
@@ -0,0 +1,1314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import numbers
|
3 |
+
import numpy as np
|
4 |
+
from scipy import stats
|
5 |
+
from scipy import special as sc
|
6 |
+
from ._qmc import (check_random_state as check_random_state_qmc,
|
7 |
+
Halton, QMCEngine)
|
8 |
+
from ._unuran.unuran_wrapper import NumericalInversePolynomial
|
9 |
+
from scipy._lib._util import check_random_state
|
10 |
+
|
11 |
+
|
12 |
+
__all__ = ['FastGeneratorInversion', 'RatioUniforms']
|
13 |
+
|
14 |
+
|
15 |
+
# define pdfs and other helper functions to create the generators
|
16 |
+
|
17 |
+
def argus_pdf(x, chi):
|
18 |
+
# approach follows Baumgarten/Hoermann: Generating ARGUS random variates
|
19 |
+
# for chi > 5, use relationship of the ARGUS distribution to Gamma(1.5)
|
20 |
+
if chi <= 5:
|
21 |
+
y = 1 - x * x
|
22 |
+
return x * math.sqrt(y) * math.exp(-0.5 * chi**2 * y)
|
23 |
+
return math.sqrt(x) * math.exp(-x)
|
24 |
+
|
25 |
+
|
26 |
+
def argus_gamma_trf(x, chi):
|
27 |
+
if chi <= 5:
|
28 |
+
return x
|
29 |
+
return np.sqrt(1.0 - 2 * x / chi**2)
|
30 |
+
|
31 |
+
|
32 |
+
def argus_gamma_inv_trf(x, chi):
|
33 |
+
if chi <= 5:
|
34 |
+
return x
|
35 |
+
return 0.5 * chi**2 * (1 - x**2)
|
36 |
+
|
37 |
+
|
38 |
+
def betaprime_pdf(x, a, b):
|
39 |
+
if x > 0:
|
40 |
+
logf = (a - 1) * math.log(x) - (a + b) * math.log1p(x) - sc.betaln(a, b)
|
41 |
+
return math.exp(logf)
|
42 |
+
else:
|
43 |
+
# return pdf at x == 0 separately to avoid runtime warnings
|
44 |
+
if a > 1:
|
45 |
+
return 0
|
46 |
+
elif a < 1:
|
47 |
+
return np.inf
|
48 |
+
else:
|
49 |
+
return 1 / sc.beta(a, b)
|
50 |
+
|
51 |
+
|
52 |
+
def beta_valid_params(a, b):
|
53 |
+
return (min(a, b) >= 0.1) and (max(a, b) <= 700)
|
54 |
+
|
55 |
+
|
56 |
+
def gamma_pdf(x, a):
|
57 |
+
if x > 0:
|
58 |
+
return math.exp(-math.lgamma(a) + (a - 1.0) * math.log(x) - x)
|
59 |
+
else:
|
60 |
+
return 0 if a >= 1 else np.inf
|
61 |
+
|
62 |
+
|
63 |
+
def invgamma_pdf(x, a):
|
64 |
+
if x > 0:
|
65 |
+
return math.exp(-(a + 1.0) * math.log(x) - math.lgamma(a) - 1 / x)
|
66 |
+
else:
|
67 |
+
return 0 if a >= 1 else np.inf
|
68 |
+
|
69 |
+
|
70 |
+
def burr_pdf(x, cc, dd):
|
71 |
+
# note: we use np.exp instead of math.exp, otherwise an overflow
|
72 |
+
# error can occur in the setup, e.g., for parameters
|
73 |
+
# 1.89128135, 0.30195177, see test test_burr_overflow
|
74 |
+
if x > 0:
|
75 |
+
lx = math.log(x)
|
76 |
+
return np.exp(-(cc + 1) * lx - (dd + 1) * math.log1p(np.exp(-cc * lx)))
|
77 |
+
else:
|
78 |
+
return 0
|
79 |
+
|
80 |
+
|
81 |
+
def burr12_pdf(x, cc, dd):
|
82 |
+
if x > 0:
|
83 |
+
lx = math.log(x)
|
84 |
+
logterm = math.log1p(math.exp(cc * lx))
|
85 |
+
return math.exp((cc - 1) * lx - (dd + 1) * logterm + math.log(cc * dd))
|
86 |
+
else:
|
87 |
+
return 0
|
88 |
+
|
89 |
+
|
90 |
+
def chi_pdf(x, a):
|
91 |
+
if x > 0:
|
92 |
+
return math.exp(
|
93 |
+
(a - 1) * math.log(x)
|
94 |
+
- 0.5 * (x * x)
|
95 |
+
- (a / 2 - 1) * math.log(2)
|
96 |
+
- math.lgamma(0.5 * a)
|
97 |
+
)
|
98 |
+
else:
|
99 |
+
return 0 if a >= 1 else np.inf
|
100 |
+
|
101 |
+
|
102 |
+
def chi2_pdf(x, df):
|
103 |
+
if x > 0:
|
104 |
+
return math.exp(
|
105 |
+
(df / 2 - 1) * math.log(x)
|
106 |
+
- 0.5 * x
|
107 |
+
- (df / 2) * math.log(2)
|
108 |
+
- math.lgamma(0.5 * df)
|
109 |
+
)
|
110 |
+
else:
|
111 |
+
return 0 if df >= 1 else np.inf
|
112 |
+
|
113 |
+
|
114 |
+
def alpha_pdf(x, a):
|
115 |
+
if x > 0:
|
116 |
+
return math.exp(-2.0 * math.log(x) - 0.5 * (a - 1.0 / x) ** 2)
|
117 |
+
return 0.0
|
118 |
+
|
119 |
+
|
120 |
+
def bradford_pdf(x, c):
|
121 |
+
if 0 <= x <= 1:
|
122 |
+
return 1.0 / (1.0 + c * x)
|
123 |
+
return 0.0
|
124 |
+
|
125 |
+
|
126 |
+
def crystalball_pdf(x, b, m):
|
127 |
+
if x > -b:
|
128 |
+
return math.exp(-0.5 * x * x)
|
129 |
+
return math.exp(m * math.log(m / b) - 0.5 * b * b - m * math.log(m / b - b - x))
|
130 |
+
|
131 |
+
|
132 |
+
def weibull_min_pdf(x, c):
|
133 |
+
if x > 0:
|
134 |
+
return c * math.exp((c - 1) * math.log(x) - x**c)
|
135 |
+
return 0.0
|
136 |
+
|
137 |
+
|
138 |
+
def weibull_max_pdf(x, c):
|
139 |
+
if x < 0:
|
140 |
+
return c * math.exp((c - 1) * math.log(-x) - ((-x) ** c))
|
141 |
+
return 0.0
|
142 |
+
|
143 |
+
|
144 |
+
def invweibull_pdf(x, c):
|
145 |
+
if x > 0:
|
146 |
+
return c * math.exp(-(c + 1) * math.log(x) - x ** (-c))
|
147 |
+
return 0.0
|
148 |
+
|
149 |
+
|
150 |
+
def wald_pdf(x):
|
151 |
+
if x > 0:
|
152 |
+
return math.exp(-((x - 1) ** 2) / (2 * x)) / math.sqrt(x**3)
|
153 |
+
return 0.0
|
154 |
+
|
155 |
+
|
156 |
+
def geninvgauss_mode(p, b):
|
157 |
+
if p > 1: # equivalent mode formulas numerical more stable versions
|
158 |
+
return (math.sqrt((1 - p) ** 2 + b**2) - (1 - p)) / b
|
159 |
+
return b / (math.sqrt((1 - p) ** 2 + b**2) + (1 - p))
|
160 |
+
|
161 |
+
|
162 |
+
def geninvgauss_pdf(x, p, b):
|
163 |
+
m = geninvgauss_mode(p, b)
|
164 |
+
lfm = (p - 1) * math.log(m) - 0.5 * b * (m + 1 / m)
|
165 |
+
if x > 0:
|
166 |
+
return math.exp((p - 1) * math.log(x) - 0.5 * b * (x + 1 / x) - lfm)
|
167 |
+
return 0.0
|
168 |
+
|
169 |
+
|
170 |
+
def invgauss_mode(mu):
|
171 |
+
return 1.0 / (math.sqrt(1.5 * 1.5 + 1 / (mu * mu)) + 1.5)
|
172 |
+
|
173 |
+
|
174 |
+
def invgauss_pdf(x, mu):
|
175 |
+
m = invgauss_mode(mu)
|
176 |
+
lfm = -1.5 * math.log(m) - (m - mu) ** 2 / (2 * m * mu**2)
|
177 |
+
if x > 0:
|
178 |
+
return math.exp(-1.5 * math.log(x) - (x - mu) ** 2 / (2 * x * mu**2) - lfm)
|
179 |
+
return 0.0
|
180 |
+
|
181 |
+
|
182 |
+
def powerlaw_pdf(x, a):
|
183 |
+
if x > 0:
|
184 |
+
return x ** (a - 1)
|
185 |
+
return 0.0
|
186 |
+
|
187 |
+
|
188 |
+
# Define a dictionary: for a given distribution (keys), another dictionary
|
189 |
+
# (values) specifies the parameters for NumericalInversePolynomial (PINV).
|
190 |
+
# The keys of the latter dictionary are:
|
191 |
+
# - pdf: the pdf of the distribution (callable). The signature of the pdf
|
192 |
+
# is float -> float (i.e., the function does not have to be vectorized).
|
193 |
+
# If possible, functions like log or exp from the module math should be
|
194 |
+
# preferred over functions from numpy since the PINV setup will be faster
|
195 |
+
# in that case.
|
196 |
+
# - check_pinv_params: callable f that returns true if the shape parameters
|
197 |
+
# (args) are recommended parameters for PINV (i.e., the u-error does
|
198 |
+
# not exceed the default tolerance)
|
199 |
+
# - center: scalar if the center does not depend on args, otherwise
|
200 |
+
# callable that returns the center as a function of the shape parameters
|
201 |
+
# - rvs_transform: a callable that can be used to transform the rvs that
|
202 |
+
# are distributed according to the pdf to the target distribution
|
203 |
+
# (as an example, see the entry for the beta distribution)
|
204 |
+
# - rvs_transform_inv: the inverse of rvs_transform (it is required
|
205 |
+
# for the transformed ppf)
|
206 |
+
# - mirror_uniform: boolean or a callable that returns true or false
|
207 |
+
# depending on the shape parameters. If True, the ppf is applied
|
208 |
+
# to 1-u instead of u to generate rvs, where u is a uniform rv.
|
209 |
+
# While both u and 1-u are uniform, it can be required to use 1-u
|
210 |
+
# to compute the u-error correctly. This is only relevant for the argus
|
211 |
+
# distribution.
|
212 |
+
# The only required keys are "pdf" and "check_pinv_params".
|
213 |
+
# All other keys are optional.
|
214 |
+
|
215 |
+
PINV_CONFIG = {
|
216 |
+
"alpha": {
|
217 |
+
"pdf": alpha_pdf,
|
218 |
+
"check_pinv_params": lambda a: 1.0e-11 <= a < 2.1e5,
|
219 |
+
"center": lambda a: 0.25 * (math.sqrt(a * a + 8.0) - a),
|
220 |
+
},
|
221 |
+
"anglit": {
|
222 |
+
"pdf": lambda x: math.cos(2 * x) + 1.0e-13,
|
223 |
+
# +1.e-13 is necessary, otherwise PINV has strange problems as
|
224 |
+
# f(upper border) is very close to 0
|
225 |
+
"center": 0,
|
226 |
+
},
|
227 |
+
"argus": {
|
228 |
+
"pdf": argus_pdf,
|
229 |
+
"center": lambda chi: 0.7 if chi <= 5 else 0.5,
|
230 |
+
"check_pinv_params": lambda chi: 1e-20 < chi < 901,
|
231 |
+
"rvs_transform": argus_gamma_trf,
|
232 |
+
"rvs_transform_inv": argus_gamma_inv_trf,
|
233 |
+
"mirror_uniform": lambda chi: chi > 5,
|
234 |
+
},
|
235 |
+
"beta": {
|
236 |
+
"pdf": betaprime_pdf,
|
237 |
+
"center": lambda a, b: max(0.1, (a - 1) / (b + 1)),
|
238 |
+
"check_pinv_params": beta_valid_params,
|
239 |
+
"rvs_transform": lambda x, *args: x / (1 + x),
|
240 |
+
"rvs_transform_inv": lambda x, *args: x / (1 - x) if x < 1 else np.inf,
|
241 |
+
},
|
242 |
+
"betaprime": {
|
243 |
+
"pdf": betaprime_pdf,
|
244 |
+
"center": lambda a, b: max(0.1, (a - 1) / (b + 1)),
|
245 |
+
"check_pinv_params": beta_valid_params,
|
246 |
+
},
|
247 |
+
"bradford": {
|
248 |
+
"pdf": bradford_pdf,
|
249 |
+
"check_pinv_params": lambda a: 1.0e-6 <= a <= 1e9,
|
250 |
+
"center": 0.5,
|
251 |
+
},
|
252 |
+
"burr": {
|
253 |
+
"pdf": burr_pdf,
|
254 |
+
"center": lambda a, b: (2 ** (1 / b) - 1) ** (-1 / a),
|
255 |
+
"check_pinv_params": lambda a, b: (min(a, b) >= 0.3) and (max(a, b) <= 50),
|
256 |
+
},
|
257 |
+
"burr12": {
|
258 |
+
"pdf": burr12_pdf,
|
259 |
+
"center": lambda a, b: (2 ** (1 / b) - 1) ** (1 / a),
|
260 |
+
"check_pinv_params": lambda a, b: (min(a, b) >= 0.2) and (max(a, b) <= 50),
|
261 |
+
},
|
262 |
+
"cauchy": {
|
263 |
+
"pdf": lambda x: 1 / (1 + (x * x)),
|
264 |
+
"center": 0,
|
265 |
+
},
|
266 |
+
"chi": {
|
267 |
+
"pdf": chi_pdf,
|
268 |
+
"check_pinv_params": lambda df: 0.05 <= df <= 1.0e6,
|
269 |
+
"center": lambda a: math.sqrt(a),
|
270 |
+
},
|
271 |
+
"chi2": {
|
272 |
+
"pdf": chi2_pdf,
|
273 |
+
"check_pinv_params": lambda df: 0.07 <= df <= 1e6,
|
274 |
+
"center": lambda a: a,
|
275 |
+
},
|
276 |
+
"cosine": {
|
277 |
+
"pdf": lambda x: 1 + math.cos(x),
|
278 |
+
"center": 0,
|
279 |
+
},
|
280 |
+
"crystalball": {
|
281 |
+
"pdf": crystalball_pdf,
|
282 |
+
"check_pinv_params": lambda b, m: (0.01 <= b <= 5.5)
|
283 |
+
and (1.1 <= m <= 75.1),
|
284 |
+
"center": 0.0,
|
285 |
+
},
|
286 |
+
"expon": {
|
287 |
+
"pdf": lambda x: math.exp(-x),
|
288 |
+
"center": 1.0,
|
289 |
+
},
|
290 |
+
"gamma": {
|
291 |
+
"pdf": gamma_pdf,
|
292 |
+
"check_pinv_params": lambda a: 0.04 <= a <= 1e6,
|
293 |
+
"center": lambda a: a,
|
294 |
+
},
|
295 |
+
"gennorm": {
|
296 |
+
"pdf": lambda x, b: math.exp(-abs(x) ** b),
|
297 |
+
"check_pinv_params": lambda b: 0.081 <= b <= 45.0,
|
298 |
+
"center": 0.0,
|
299 |
+
},
|
300 |
+
"geninvgauss": {
|
301 |
+
"pdf": geninvgauss_pdf,
|
302 |
+
"check_pinv_params": lambda p, b: (abs(p) <= 1200.0)
|
303 |
+
and (1.0e-10 <= b <= 1200.0),
|
304 |
+
"center": geninvgauss_mode,
|
305 |
+
},
|
306 |
+
"gumbel_l": {
|
307 |
+
"pdf": lambda x: math.exp(x - math.exp(x)),
|
308 |
+
"center": -0.6,
|
309 |
+
},
|
310 |
+
"gumbel_r": {
|
311 |
+
"pdf": lambda x: math.exp(-x - math.exp(-x)),
|
312 |
+
"center": 0.6,
|
313 |
+
},
|
314 |
+
"hypsecant": {
|
315 |
+
"pdf": lambda x: 1.0 / (math.exp(x) + math.exp(-x)),
|
316 |
+
"center": 0.0,
|
317 |
+
},
|
318 |
+
"invgamma": {
|
319 |
+
"pdf": invgamma_pdf,
|
320 |
+
"check_pinv_params": lambda a: 0.04 <= a <= 1e6,
|
321 |
+
"center": lambda a: 1 / a,
|
322 |
+
},
|
323 |
+
"invgauss": {
|
324 |
+
"pdf": invgauss_pdf,
|
325 |
+
"check_pinv_params": lambda mu: 1.0e-10 <= mu <= 1.0e9,
|
326 |
+
"center": invgauss_mode,
|
327 |
+
},
|
328 |
+
"invweibull": {
|
329 |
+
"pdf": invweibull_pdf,
|
330 |
+
"check_pinv_params": lambda a: 0.12 <= a <= 512,
|
331 |
+
"center": 1.0,
|
332 |
+
},
|
333 |
+
"laplace": {
|
334 |
+
"pdf": lambda x: math.exp(-abs(x)),
|
335 |
+
"center": 0.0,
|
336 |
+
},
|
337 |
+
"logistic": {
|
338 |
+
"pdf": lambda x: math.exp(-x) / (1 + math.exp(-x)) ** 2,
|
339 |
+
"center": 0.0,
|
340 |
+
},
|
341 |
+
"maxwell": {
|
342 |
+
"pdf": lambda x: x * x * math.exp(-0.5 * x * x),
|
343 |
+
"center": 1.41421,
|
344 |
+
},
|
345 |
+
"moyal": {
|
346 |
+
"pdf": lambda x: math.exp(-(x + math.exp(-x)) / 2),
|
347 |
+
"center": 1.2,
|
348 |
+
},
|
349 |
+
"norm": {
|
350 |
+
"pdf": lambda x: math.exp(-x * x / 2),
|
351 |
+
"center": 0.0,
|
352 |
+
},
|
353 |
+
"pareto": {
|
354 |
+
"pdf": lambda x, b: x ** -(b + 1),
|
355 |
+
"center": lambda b: b / (b - 1) if b > 2 else 1.5,
|
356 |
+
"check_pinv_params": lambda b: 0.08 <= b <= 400000,
|
357 |
+
},
|
358 |
+
"powerlaw": {
|
359 |
+
"pdf": powerlaw_pdf,
|
360 |
+
"center": 1.0,
|
361 |
+
"check_pinv_params": lambda a: 0.06 <= a <= 1.0e5,
|
362 |
+
},
|
363 |
+
"t": {
|
364 |
+
"pdf": lambda x, df: (1 + x * x / df) ** (-0.5 * (df + 1)),
|
365 |
+
"check_pinv_params": lambda a: 0.07 <= a <= 1e6,
|
366 |
+
"center": 0.0,
|
367 |
+
},
|
368 |
+
"rayleigh": {
|
369 |
+
"pdf": lambda x: x * math.exp(-0.5 * (x * x)),
|
370 |
+
"center": 1.0,
|
371 |
+
},
|
372 |
+
"semicircular": {
|
373 |
+
"pdf": lambda x: math.sqrt(1.0 - (x * x)),
|
374 |
+
"center": 0,
|
375 |
+
},
|
376 |
+
"wald": {
|
377 |
+
"pdf": wald_pdf,
|
378 |
+
"center": 1.0,
|
379 |
+
},
|
380 |
+
"weibull_max": {
|
381 |
+
"pdf": weibull_max_pdf,
|
382 |
+
"check_pinv_params": lambda a: 0.25 <= a <= 512,
|
383 |
+
"center": -1.0,
|
384 |
+
},
|
385 |
+
"weibull_min": {
|
386 |
+
"pdf": weibull_min_pdf,
|
387 |
+
"check_pinv_params": lambda a: 0.25 <= a <= 512,
|
388 |
+
"center": 1.0,
|
389 |
+
},
|
390 |
+
}
|
391 |
+
|
392 |
+
|
393 |
+
def _validate_qmc_input(qmc_engine, d, seed):
|
394 |
+
# Input validation for `qmc_engine` and `d`
|
395 |
+
# Error messages for invalid `d` are raised by QMCEngine
|
396 |
+
# we could probably use a stats.qmc.check_qrandom_state
|
397 |
+
if isinstance(qmc_engine, QMCEngine):
|
398 |
+
if d is not None and qmc_engine.d != d:
|
399 |
+
message = "`d` must be consistent with dimension of `qmc_engine`."
|
400 |
+
raise ValueError(message)
|
401 |
+
d = qmc_engine.d if d is None else d
|
402 |
+
elif qmc_engine is None:
|
403 |
+
d = 1 if d is None else d
|
404 |
+
qmc_engine = Halton(d, seed=seed)
|
405 |
+
else:
|
406 |
+
message = (
|
407 |
+
"`qmc_engine` must be an instance of "
|
408 |
+
"`scipy.stats.qmc.QMCEngine` or `None`."
|
409 |
+
)
|
410 |
+
raise ValueError(message)
|
411 |
+
|
412 |
+
return qmc_engine, d
|
413 |
+
|
414 |
+
|
415 |
+
class CustomDistPINV:
|
416 |
+
def __init__(self, pdf, args):
|
417 |
+
self._pdf = lambda x: pdf(x, *args)
|
418 |
+
|
419 |
+
def pdf(self, x):
|
420 |
+
return self._pdf(x)
|
421 |
+
|
422 |
+
|
423 |
+
class FastGeneratorInversion:
|
424 |
+
"""
|
425 |
+
Fast sampling by numerical inversion of the CDF for a large class of
|
426 |
+
continuous distributions in `scipy.stats`.
|
427 |
+
|
428 |
+
Parameters
|
429 |
+
----------
|
430 |
+
dist : rv_frozen object
|
431 |
+
Frozen distribution object from `scipy.stats`. The list of supported
|
432 |
+
distributions can be found in the Notes section. The shape parameters,
|
433 |
+
`loc` and `scale` used to create the distributions must be scalars.
|
434 |
+
For example, for the Gamma distribution with shape parameter `p`,
|
435 |
+
`p` has to be a float, and for the beta distribution with shape
|
436 |
+
parameters (a, b), both a and b have to be floats.
|
437 |
+
domain : tuple of floats, optional
|
438 |
+
If one wishes to sample from a truncated/conditional distribution,
|
439 |
+
the domain has to be specified.
|
440 |
+
The default is None. In that case, the random variates are not
|
441 |
+
truncated, and the domain is inferred from the support of the
|
442 |
+
distribution.
|
443 |
+
ignore_shape_range : boolean, optional.
|
444 |
+
If False, shape parameters that are outside of the valid range
|
445 |
+
of values to ensure that the numerical accuracy (see Notes) is
|
446 |
+
high, raise a ValueError. If True, any shape parameters that are valid
|
447 |
+
for the distribution are accepted. This can be useful for testing.
|
448 |
+
The default is False.
|
449 |
+
random_state : {None, int, `numpy.random.Generator`,
|
450 |
+
`numpy.random.RandomState`}, optional
|
451 |
+
|
452 |
+
A NumPy random number generator or seed for the underlying NumPy
|
453 |
+
random number generator used to generate the stream of uniform
|
454 |
+
random numbers.
|
455 |
+
If `random_state` is None, it uses ``self.random_state``.
|
456 |
+
If `random_state` is an int,
|
457 |
+
``np.random.default_rng(random_state)`` is used.
|
458 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
459 |
+
instance then that instance is used.
|
460 |
+
|
461 |
+
Attributes
|
462 |
+
----------
|
463 |
+
loc : float
|
464 |
+
The location parameter.
|
465 |
+
random_state : {`numpy.random.Generator`, `numpy.random.RandomState`}
|
466 |
+
The random state used in relevant methods like `rvs` (unless
|
467 |
+
another `random_state` is passed as an argument to these methods).
|
468 |
+
scale : float
|
469 |
+
The scale parameter.
|
470 |
+
|
471 |
+
Methods
|
472 |
+
-------
|
473 |
+
cdf
|
474 |
+
evaluate_error
|
475 |
+
ppf
|
476 |
+
qrvs
|
477 |
+
rvs
|
478 |
+
support
|
479 |
+
|
480 |
+
Notes
|
481 |
+
-----
|
482 |
+
The class creates an object for continuous distributions specified
|
483 |
+
by `dist`. The method `rvs` uses a generator from
|
484 |
+
`scipy.stats.sampling` that is created when the object is instantiated.
|
485 |
+
In addition, the methods `qrvs` and `ppf` are added.
|
486 |
+
`qrvs` generate samples based on quasi-random numbers from
|
487 |
+
`scipy.stats.qmc`. `ppf` is the PPF based on the
|
488 |
+
numerical inversion method in [1]_ (`NumericalInversePolynomial`) that is
|
489 |
+
used to generate random variates.
|
490 |
+
|
491 |
+
Supported distributions (`distname`) are:
|
492 |
+
``alpha``, ``anglit``, ``argus``, ``beta``, ``betaprime``, ``bradford``,
|
493 |
+
``burr``, ``burr12``, ``cauchy``, ``chi``, ``chi2``, ``cosine``,
|
494 |
+
``crystalball``, ``expon``, ``gamma``, ``gennorm``, ``geninvgauss``,
|
495 |
+
``gumbel_l``, ``gumbel_r``, ``hypsecant``, ``invgamma``, ``invgauss``,
|
496 |
+
``invweibull``, ``laplace``, ``logistic``, ``maxwell``, ``moyal``,
|
497 |
+
``norm``, ``pareto``, ``powerlaw``, ``t``, ``rayleigh``, ``semicircular``,
|
498 |
+
``wald``, ``weibull_max``, ``weibull_min``.
|
499 |
+
|
500 |
+
`rvs` relies on the accuracy of the numerical inversion. If very extreme
|
501 |
+
shape parameters are used, the numerical inversion might not work. However,
|
502 |
+
for all implemented distributions, the admissible shape parameters have
|
503 |
+
been tested, and an error will be raised if the user supplies values
|
504 |
+
outside of the allowed range. The u-error should not exceed 1e-10 for all
|
505 |
+
valid parameters. Note that warnings might be raised even if parameters
|
506 |
+
are within the valid range when the object is instantiated.
|
507 |
+
To check numerical accuracy, the method `evaluate_error` can be used.
|
508 |
+
|
509 |
+
Note that all implemented distributions are also part of `scipy.stats`, and
|
510 |
+
the object created by `FastGeneratorInversion` relies on methods like
|
511 |
+
`ppf`, `cdf` and `pdf` from `rv_frozen`. The main benefit of using this
|
512 |
+
class can be summarized as follows: Once the generator to sample random
|
513 |
+
variates is created in the setup step, sampling and evaluation of
|
514 |
+
the PPF using `ppf` are very fast,
|
515 |
+
and performance is essentially independent of the distribution. Therefore,
|
516 |
+
a substantial speed-up can be achieved for many distributions if large
|
517 |
+
numbers of random variates are required. It is important to know that this
|
518 |
+
fast sampling is achieved by inversion of the CDF. Thus, one uniform
|
519 |
+
random variate is transformed into a non-uniform variate, which is an
|
520 |
+
advantage for several simulation methods, e.g., when
|
521 |
+
the variance reduction methods of common random variates or
|
522 |
+
antithetic variates are be used ([2]_).
|
523 |
+
|
524 |
+
In addition, inversion makes it possible to
|
525 |
+
- to use a QMC generator from `scipy.stats.qmc` (method `qrvs`),
|
526 |
+
- to generate random variates truncated to an interval. For example, if
|
527 |
+
one aims to sample standard normal random variates from
|
528 |
+
the interval (2, 4), this can be easily achieved by using the parameter
|
529 |
+
`domain`.
|
530 |
+
|
531 |
+
The location and scale that are initially defined by `dist`
|
532 |
+
can be reset without having to rerun the setup
|
533 |
+
step to create the generator that is used for sampling. The relation
|
534 |
+
of the distribution `Y` with `loc` and `scale` to the standard
|
535 |
+
distribution `X` (i.e., ``loc=0`` and ``scale=1``) is given by
|
536 |
+
``Y = loc + scale * X``.
|
537 |
+
|
538 |
+
References
|
539 |
+
----------
|
540 |
+
.. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold.
|
541 |
+
"Random variate generation by numerical inversion when only the
|
542 |
+
density is known." ACM Transactions on Modeling and Computer
|
543 |
+
Simulation (TOMACS) 20.4 (2010): 1-25.
|
544 |
+
.. [2] Hörmann, Wolfgang, Josef Leydold and Gerhard Derflinger.
|
545 |
+
"Automatic nonuniform random number generation."
|
546 |
+
Springer, 2004.
|
547 |
+
|
548 |
+
Examples
|
549 |
+
--------
|
550 |
+
>>> import numpy as np
|
551 |
+
>>> from scipy import stats
|
552 |
+
>>> from scipy.stats.sampling import FastGeneratorInversion
|
553 |
+
|
554 |
+
Let's start with a simple example to illustrate the main features:
|
555 |
+
|
556 |
+
>>> gamma_frozen = stats.gamma(1.5)
|
557 |
+
>>> gamma_dist = FastGeneratorInversion(gamma_frozen)
|
558 |
+
>>> r = gamma_dist.rvs(size=1000)
|
559 |
+
|
560 |
+
The mean should be approximately equal to the shape parameter 1.5:
|
561 |
+
|
562 |
+
>>> r.mean()
|
563 |
+
1.52423591130436 # may vary
|
564 |
+
|
565 |
+
Similarly, we can draw a sample based on quasi-random numbers:
|
566 |
+
|
567 |
+
>>> r = gamma_dist.qrvs(size=1000)
|
568 |
+
>>> r.mean()
|
569 |
+
1.4996639255942914 # may vary
|
570 |
+
|
571 |
+
Compare the PPF against approximation `ppf`.
|
572 |
+
|
573 |
+
>>> q = [0.001, 0.2, 0.5, 0.8, 0.999]
|
574 |
+
>>> np.max(np.abs(gamma_frozen.ppf(q) - gamma_dist.ppf(q)))
|
575 |
+
4.313394796895409e-08
|
576 |
+
|
577 |
+
To confirm that the numerical inversion is accurate, we evaluate the
|
578 |
+
approximation error (u-error), which should be below 1e-10 (for more
|
579 |
+
details, refer to the documentation of `evaluate_error`):
|
580 |
+
|
581 |
+
>>> gamma_dist.evaluate_error()
|
582 |
+
(7.446320551265581e-11, nan) # may vary
|
583 |
+
|
584 |
+
Note that the location and scale can be changed without instantiating a
|
585 |
+
new generator:
|
586 |
+
|
587 |
+
>>> gamma_dist.loc = 2
|
588 |
+
>>> gamma_dist.scale = 3
|
589 |
+
>>> r = gamma_dist.rvs(size=1000)
|
590 |
+
|
591 |
+
The mean should be approximately 2 + 3*1.5 = 6.5.
|
592 |
+
|
593 |
+
>>> r.mean()
|
594 |
+
6.399549295242894 # may vary
|
595 |
+
|
596 |
+
Let us also illustrate how truncation can be applied:
|
597 |
+
|
598 |
+
>>> trunc_norm = FastGeneratorInversion(stats.norm(), domain=(3, 4))
|
599 |
+
>>> r = trunc_norm.rvs(size=1000)
|
600 |
+
>>> 3 < r.min() < r.max() < 4
|
601 |
+
True
|
602 |
+
|
603 |
+
Check the mean:
|
604 |
+
|
605 |
+
>>> r.mean()
|
606 |
+
3.250433367078603 # may vary
|
607 |
+
|
608 |
+
>>> stats.norm.expect(lb=3, ub=4, conditional=True)
|
609 |
+
3.260454285589997
|
610 |
+
|
611 |
+
In this particular, case, `scipy.stats.truncnorm` could also be used to
|
612 |
+
generate truncated normal random variates.
|
613 |
+
|
614 |
+
"""
|
615 |
+
|
616 |
+
def __init__(
|
617 |
+
self,
|
618 |
+
dist,
|
619 |
+
*,
|
620 |
+
domain=None,
|
621 |
+
ignore_shape_range=False,
|
622 |
+
random_state=None,
|
623 |
+
):
|
624 |
+
|
625 |
+
if isinstance(dist, stats.distributions.rv_frozen):
|
626 |
+
distname = dist.dist.name
|
627 |
+
if distname not in PINV_CONFIG.keys():
|
628 |
+
raise ValueError(
|
629 |
+
f"Distribution '{distname}' is not supported."
|
630 |
+
f"It must be one of {list(PINV_CONFIG.keys())}"
|
631 |
+
)
|
632 |
+
else:
|
633 |
+
raise ValueError("`dist` must be a frozen distribution object")
|
634 |
+
|
635 |
+
loc = dist.kwds.get("loc", 0)
|
636 |
+
scale = dist.kwds.get("scale", 1)
|
637 |
+
args = dist.args
|
638 |
+
if not np.isscalar(loc):
|
639 |
+
raise ValueError("loc must be scalar.")
|
640 |
+
if not np.isscalar(scale):
|
641 |
+
raise ValueError("scale must be scalar.")
|
642 |
+
|
643 |
+
self._frozendist = getattr(stats, distname)(
|
644 |
+
*args,
|
645 |
+
loc=loc,
|
646 |
+
scale=scale,
|
647 |
+
)
|
648 |
+
self._distname = distname
|
649 |
+
|
650 |
+
nargs = np.broadcast_arrays(args)[0].size
|
651 |
+
nargs_expected = self._frozendist.dist.numargs
|
652 |
+
if nargs != nargs_expected:
|
653 |
+
raise ValueError(
|
654 |
+
f"Each of the {nargs_expected} shape parameters must be a "
|
655 |
+
f"scalar, but {nargs} values are provided."
|
656 |
+
)
|
657 |
+
|
658 |
+
self.random_state = random_state
|
659 |
+
|
660 |
+
if domain is None:
|
661 |
+
self._domain = self._frozendist.support()
|
662 |
+
self._p_lower = 0.0
|
663 |
+
self._p_domain = 1.0
|
664 |
+
else:
|
665 |
+
self._domain = domain
|
666 |
+
self._p_lower = self._frozendist.cdf(self._domain[0])
|
667 |
+
_p_domain = self._frozendist.cdf(self._domain[1]) - self._p_lower
|
668 |
+
self._p_domain = _p_domain
|
669 |
+
self._set_domain_adj()
|
670 |
+
self._ignore_shape_range = ignore_shape_range
|
671 |
+
|
672 |
+
# the domain to be passed to NumericalInversePolynomial
|
673 |
+
# define a separate variable since in case of a transformation,
|
674 |
+
# domain_pinv will not be the same as self._domain
|
675 |
+
self._domain_pinv = self._domain
|
676 |
+
|
677 |
+
# get information about the distribution from the config to set up
|
678 |
+
# the generator
|
679 |
+
dist = self._process_config(distname, args)
|
680 |
+
|
681 |
+
if self._rvs_transform_inv is not None:
|
682 |
+
d0 = self._rvs_transform_inv(self._domain[0], *args)
|
683 |
+
d1 = self._rvs_transform_inv(self._domain[1], *args)
|
684 |
+
if d0 > d1:
|
685 |
+
# swap values if transformation if decreasing
|
686 |
+
d0, d1 = d1, d0
|
687 |
+
# only update _domain_pinv and not _domain
|
688 |
+
# _domain refers to the original distribution, _domain_pinv
|
689 |
+
# to the transformed distribution
|
690 |
+
self._domain_pinv = d0, d1
|
691 |
+
|
692 |
+
# self._center has been set by the call self._process_config
|
693 |
+
# check if self._center is inside the transformed domain
|
694 |
+
# _domain_pinv, otherwise move it to the endpoint that is closer
|
695 |
+
if self._center is not None:
|
696 |
+
if self._center < self._domain_pinv[0]:
|
697 |
+
self._center = self._domain_pinv[0]
|
698 |
+
elif self._center > self._domain_pinv[1]:
|
699 |
+
self._center = self._domain_pinv[1]
|
700 |
+
|
701 |
+
self._rng = NumericalInversePolynomial(
|
702 |
+
dist,
|
703 |
+
random_state=self.random_state,
|
704 |
+
domain=self._domain_pinv,
|
705 |
+
center=self._center,
|
706 |
+
)
|
707 |
+
|
708 |
+
@property
|
709 |
+
def random_state(self):
|
710 |
+
return self._random_state
|
711 |
+
|
712 |
+
@random_state.setter
|
713 |
+
def random_state(self, random_state):
|
714 |
+
self._random_state = check_random_state_qmc(random_state)
|
715 |
+
|
716 |
+
@property
|
717 |
+
def loc(self):
|
718 |
+
return self._frozendist.kwds.get("loc", 0)
|
719 |
+
|
720 |
+
@loc.setter
|
721 |
+
def loc(self, loc):
|
722 |
+
if not np.isscalar(loc):
|
723 |
+
raise ValueError("loc must be scalar.")
|
724 |
+
self._frozendist.kwds["loc"] = loc
|
725 |
+
# update the adjusted domain that depends on loc and scale
|
726 |
+
self._set_domain_adj()
|
727 |
+
|
728 |
+
@property
|
729 |
+
def scale(self):
|
730 |
+
return self._frozendist.kwds.get("scale", 0)
|
731 |
+
|
732 |
+
@scale.setter
|
733 |
+
def scale(self, scale):
|
734 |
+
if not np.isscalar(scale):
|
735 |
+
raise ValueError("scale must be scalar.")
|
736 |
+
self._frozendist.kwds["scale"] = scale
|
737 |
+
# update the adjusted domain that depends on loc and scale
|
738 |
+
self._set_domain_adj()
|
739 |
+
|
740 |
+
def _set_domain_adj(self):
|
741 |
+
""" Adjust the domain based on loc and scale. """
|
742 |
+
loc = self.loc
|
743 |
+
scale = self.scale
|
744 |
+
lb = self._domain[0] * scale + loc
|
745 |
+
ub = self._domain[1] * scale + loc
|
746 |
+
self._domain_adj = (lb, ub)
|
747 |
+
|
748 |
+
def _process_config(self, distname, args):
|
749 |
+
cfg = PINV_CONFIG[distname]
|
750 |
+
if "check_pinv_params" in cfg:
|
751 |
+
if not self._ignore_shape_range:
|
752 |
+
if not cfg["check_pinv_params"](*args):
|
753 |
+
msg = ("No generator is defined for the shape parameters "
|
754 |
+
f"{args}. Use ignore_shape_range to proceed "
|
755 |
+
"with the selected values.")
|
756 |
+
raise ValueError(msg)
|
757 |
+
|
758 |
+
if "center" in cfg.keys():
|
759 |
+
if not np.isscalar(cfg["center"]):
|
760 |
+
self._center = cfg["center"](*args)
|
761 |
+
else:
|
762 |
+
self._center = cfg["center"]
|
763 |
+
else:
|
764 |
+
self._center = None
|
765 |
+
self._rvs_transform = cfg.get("rvs_transform", None)
|
766 |
+
self._rvs_transform_inv = cfg.get("rvs_transform_inv", None)
|
767 |
+
_mirror_uniform = cfg.get("mirror_uniform", None)
|
768 |
+
if _mirror_uniform is None:
|
769 |
+
self._mirror_uniform = False
|
770 |
+
else:
|
771 |
+
self._mirror_uniform = _mirror_uniform(*args)
|
772 |
+
|
773 |
+
return CustomDistPINV(cfg["pdf"], args)
|
774 |
+
|
775 |
+
def rvs(self, size=None):
|
776 |
+
"""
|
777 |
+
Sample from the distribution by inversion.
|
778 |
+
|
779 |
+
Parameters
|
780 |
+
----------
|
781 |
+
size : int or tuple, optional
|
782 |
+
The shape of samples. Default is ``None`` in which case a scalar
|
783 |
+
sample is returned.
|
784 |
+
|
785 |
+
Returns
|
786 |
+
-------
|
787 |
+
rvs : array_like
|
788 |
+
A NumPy array of random variates.
|
789 |
+
|
790 |
+
Notes
|
791 |
+
-----
|
792 |
+
Random variates are generated by numerical inversion of the CDF, i.e.,
|
793 |
+
`ppf` computed by `NumericalInversePolynomial` when the class
|
794 |
+
is instantiated. Note that the
|
795 |
+
default ``rvs`` method of the rv_continuous class is
|
796 |
+
overwritten. Hence, a different stream of random numbers is generated
|
797 |
+
even if the same seed is used.
|
798 |
+
"""
|
799 |
+
# note: we cannot use self._rng.rvs directly in case
|
800 |
+
# self._mirror_uniform is true
|
801 |
+
u = self.random_state.uniform(size=size)
|
802 |
+
if self._mirror_uniform:
|
803 |
+
u = 1 - u
|
804 |
+
r = self._rng.ppf(u)
|
805 |
+
if self._rvs_transform is not None:
|
806 |
+
r = self._rvs_transform(r, *self._frozendist.args)
|
807 |
+
return self.loc + self.scale * r
|
808 |
+
|
809 |
+
def ppf(self, q):
|
810 |
+
"""
|
811 |
+
Very fast PPF (inverse CDF) of the distribution which
|
812 |
+
is a very close approximation of the exact PPF values.
|
813 |
+
|
814 |
+
Parameters
|
815 |
+
----------
|
816 |
+
u : array_like
|
817 |
+
Array with probabilities.
|
818 |
+
|
819 |
+
Returns
|
820 |
+
-------
|
821 |
+
ppf : array_like
|
822 |
+
Quantiles corresponding to the values in `u`.
|
823 |
+
|
824 |
+
Notes
|
825 |
+
-----
|
826 |
+
The evaluation of the PPF is very fast but it may have a large
|
827 |
+
relative error in the far tails. The numerical precision of the PPF
|
828 |
+
is controlled by the u-error, that is,
|
829 |
+
``max |u - CDF(PPF(u))|`` where the max is taken over points in
|
830 |
+
the interval [0,1], see `evaluate_error`.
|
831 |
+
|
832 |
+
Note that this PPF is designed to generate random samples.
|
833 |
+
"""
|
834 |
+
q = np.asarray(q)
|
835 |
+
if self._mirror_uniform:
|
836 |
+
x = self._rng.ppf(1 - q)
|
837 |
+
else:
|
838 |
+
x = self._rng.ppf(q)
|
839 |
+
if self._rvs_transform is not None:
|
840 |
+
x = self._rvs_transform(x, *self._frozendist.args)
|
841 |
+
return self.scale * x + self.loc
|
842 |
+
|
843 |
+
def qrvs(self, size=None, d=None, qmc_engine=None):
|
844 |
+
"""
|
845 |
+
Quasi-random variates of the given distribution.
|
846 |
+
|
847 |
+
The `qmc_engine` is used to draw uniform quasi-random variates, and
|
848 |
+
these are converted to quasi-random variates of the given distribution
|
849 |
+
using inverse transform sampling.
|
850 |
+
|
851 |
+
Parameters
|
852 |
+
----------
|
853 |
+
size : int, tuple of ints, or None; optional
|
854 |
+
Defines shape of random variates array. Default is ``None``.
|
855 |
+
d : int or None, optional
|
856 |
+
Defines dimension of uniform quasi-random variates to be
|
857 |
+
transformed. Default is ``None``.
|
858 |
+
qmc_engine : scipy.stats.qmc.QMCEngine(d=1), optional
|
859 |
+
Defines the object to use for drawing
|
860 |
+
quasi-random variates. Default is ``None``, which uses
|
861 |
+
`scipy.stats.qmc.Halton(1)`.
|
862 |
+
|
863 |
+
Returns
|
864 |
+
-------
|
865 |
+
rvs : ndarray or scalar
|
866 |
+
Quasi-random variates. See Notes for shape information.
|
867 |
+
|
868 |
+
Notes
|
869 |
+
-----
|
870 |
+
The shape of the output array depends on `size`, `d`, and `qmc_engine`.
|
871 |
+
The intent is for the interface to be natural, but the detailed rules
|
872 |
+
to achieve this are complicated.
|
873 |
+
|
874 |
+
- If `qmc_engine` is ``None``, a `scipy.stats.qmc.Halton` instance is
|
875 |
+
created with dimension `d`. If `d` is not provided, ``d=1``.
|
876 |
+
- If `qmc_engine` is not ``None`` and `d` is ``None``, `d` is
|
877 |
+
determined from the dimension of the `qmc_engine`.
|
878 |
+
- If `qmc_engine` is not ``None`` and `d` is not ``None`` but the
|
879 |
+
dimensions are inconsistent, a ``ValueError`` is raised.
|
880 |
+
- After `d` is determined according to the rules above, the output
|
881 |
+
shape is ``tuple_shape + d_shape``, where:
|
882 |
+
|
883 |
+
- ``tuple_shape = tuple()`` if `size` is ``None``,
|
884 |
+
- ``tuple_shape = (size,)`` if `size` is an ``int``,
|
885 |
+
- ``tuple_shape = size`` if `size` is a sequence,
|
886 |
+
- ``d_shape = tuple()`` if `d` is ``None`` or `d` is 1, and
|
887 |
+
- ``d_shape = (d,)`` if `d` is greater than 1.
|
888 |
+
|
889 |
+
The elements of the returned array are part of a low-discrepancy
|
890 |
+
sequence. If `d` is 1, this means that none of the samples are truly
|
891 |
+
independent. If `d` > 1, each slice ``rvs[..., i]`` will be of a
|
892 |
+
quasi-independent sequence; see `scipy.stats.qmc.QMCEngine` for
|
893 |
+
details. Note that when `d` > 1, the samples returned are still those
|
894 |
+
of the provided univariate distribution, not a multivariate
|
895 |
+
generalization of that distribution.
|
896 |
+
|
897 |
+
"""
|
898 |
+
qmc_engine, d = _validate_qmc_input(qmc_engine, d, self.random_state)
|
899 |
+
# mainly copied from unuran_wrapper.pyx.templ
|
900 |
+
# `rvs` is flexible about whether `size` is an int or tuple, so this
|
901 |
+
# should be, too.
|
902 |
+
try:
|
903 |
+
if size is None:
|
904 |
+
tuple_size = (1,)
|
905 |
+
else:
|
906 |
+
tuple_size = tuple(size)
|
907 |
+
except TypeError:
|
908 |
+
tuple_size = (size,)
|
909 |
+
# we do not use rng.qrvs directly since we need to be
|
910 |
+
# able to apply the ppf to 1 - u
|
911 |
+
N = 1 if size is None else np.prod(size)
|
912 |
+
u = qmc_engine.random(N)
|
913 |
+
if self._mirror_uniform:
|
914 |
+
u = 1 - u
|
915 |
+
qrvs = self._ppf(u)
|
916 |
+
if self._rvs_transform is not None:
|
917 |
+
qrvs = self._rvs_transform(qrvs, *self._frozendist.args)
|
918 |
+
if size is None:
|
919 |
+
qrvs = qrvs.squeeze()[()]
|
920 |
+
else:
|
921 |
+
if d == 1:
|
922 |
+
qrvs = qrvs.reshape(tuple_size)
|
923 |
+
else:
|
924 |
+
qrvs = qrvs.reshape(tuple_size + (d,))
|
925 |
+
return self.loc + self.scale * qrvs
|
926 |
+
|
927 |
+
def evaluate_error(self, size=100000, random_state=None, x_error=False):
|
928 |
+
"""
|
929 |
+
Evaluate the numerical accuracy of the inversion (u- and x-error).
|
930 |
+
|
931 |
+
Parameters
|
932 |
+
----------
|
933 |
+
size : int, optional
|
934 |
+
The number of random points over which the error is estimated.
|
935 |
+
Default is ``100000``.
|
936 |
+
random_state : {None, int, `numpy.random.Generator`,
|
937 |
+
`numpy.random.RandomState`}, optional
|
938 |
+
|
939 |
+
A NumPy random number generator or seed for the underlying NumPy
|
940 |
+
random number generator used to generate the stream of uniform
|
941 |
+
random numbers.
|
942 |
+
If `random_state` is None, use ``self.random_state``.
|
943 |
+
If `random_state` is an int,
|
944 |
+
``np.random.default_rng(random_state)`` is used.
|
945 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
946 |
+
instance then that instance is used.
|
947 |
+
|
948 |
+
Returns
|
949 |
+
-------
|
950 |
+
u_error, x_error : tuple of floats
|
951 |
+
A NumPy array of random variates.
|
952 |
+
|
953 |
+
Notes
|
954 |
+
-----
|
955 |
+
The numerical precision of the inverse CDF `ppf` is controlled by
|
956 |
+
the u-error. It is computed as follows:
|
957 |
+
``max |u - CDF(PPF(u))|`` where the max is taken `size` random
|
958 |
+
points in the interval [0,1]. `random_state` determines the random
|
959 |
+
sample. Note that if `ppf` was exact, the u-error would be zero.
|
960 |
+
|
961 |
+
The x-error measures the direct distance between the exact PPF
|
962 |
+
and `ppf`. If ``x_error`` is set to ``True`, it is
|
963 |
+
computed as the maximum of the minimum of the relative and absolute
|
964 |
+
x-error:
|
965 |
+
``max(min(x_error_abs[i], x_error_rel[i]))`` where
|
966 |
+
``x_error_abs[i] = |PPF(u[i]) - PPF_fast(u[i])|``,
|
967 |
+
``x_error_rel[i] = max |(PPF(u[i]) - PPF_fast(u[i])) / PPF(u[i])|``.
|
968 |
+
Note that it is important to consider the relative x-error in the case
|
969 |
+
that ``PPF(u)`` is close to zero or very large.
|
970 |
+
|
971 |
+
By default, only the u-error is evaluated and the x-error is set to
|
972 |
+
``np.nan``. Note that the evaluation of the x-error will be very slow
|
973 |
+
if the implementation of the PPF is slow.
|
974 |
+
|
975 |
+
Further information about these error measures can be found in [1]_.
|
976 |
+
|
977 |
+
References
|
978 |
+
----------
|
979 |
+
.. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold.
|
980 |
+
"Random variate generation by numerical inversion when only the
|
981 |
+
density is known." ACM Transactions on Modeling and Computer
|
982 |
+
Simulation (TOMACS) 20.4 (2010): 1-25.
|
983 |
+
|
984 |
+
Examples
|
985 |
+
--------
|
986 |
+
|
987 |
+
>>> import numpy as np
|
988 |
+
>>> from scipy import stats
|
989 |
+
>>> from scipy.stats.sampling import FastGeneratorInversion
|
990 |
+
|
991 |
+
Create an object for the normal distribution:
|
992 |
+
|
993 |
+
>>> d_norm_frozen = stats.norm()
|
994 |
+
>>> d_norm = FastGeneratorInversion(d_norm_frozen)
|
995 |
+
|
996 |
+
To confirm that the numerical inversion is accurate, we evaluate the
|
997 |
+
approximation error (u-error and x-error).
|
998 |
+
|
999 |
+
>>> u_error, x_error = d_norm.evaluate_error(x_error=True)
|
1000 |
+
|
1001 |
+
The u-error should be below 1e-10:
|
1002 |
+
|
1003 |
+
>>> u_error
|
1004 |
+
8.785783212061915e-11 # may vary
|
1005 |
+
|
1006 |
+
Compare the PPF against approximation `ppf`:
|
1007 |
+
|
1008 |
+
>>> q = [0.001, 0.2, 0.4, 0.6, 0.8, 0.999]
|
1009 |
+
>>> diff = np.abs(d_norm_frozen.ppf(q) - d_norm.ppf(q))
|
1010 |
+
>>> x_error_abs = np.max(diff)
|
1011 |
+
>>> x_error_abs
|
1012 |
+
1.2937954707581412e-08
|
1013 |
+
|
1014 |
+
This is the absolute x-error evaluated at the points q. The relative
|
1015 |
+
error is given by
|
1016 |
+
|
1017 |
+
>>> x_error_rel = np.max(diff / np.abs(d_norm_frozen.ppf(q)))
|
1018 |
+
>>> x_error_rel
|
1019 |
+
4.186725600453555e-09
|
1020 |
+
|
1021 |
+
The x_error computed above is derived in a very similar way over a
|
1022 |
+
much larger set of random values q. At each value q[i], the minimum
|
1023 |
+
of the relative and absolute error is taken. The final value is then
|
1024 |
+
derived as the maximum of these values. In our example, we get the
|
1025 |
+
following value:
|
1026 |
+
|
1027 |
+
>>> x_error
|
1028 |
+
4.507068014335139e-07 # may vary
|
1029 |
+
|
1030 |
+
"""
|
1031 |
+
if not isinstance(size, (numbers.Integral, np.integer)):
|
1032 |
+
raise ValueError("size must be an integer.")
|
1033 |
+
# urng will be used to draw the samples for testing the error
|
1034 |
+
# it must not interfere with self.random_state. therefore, do not
|
1035 |
+
# call self.rvs, but draw uniform random numbers and apply
|
1036 |
+
# self.ppf (note: like in rvs, consider self._mirror_uniform)
|
1037 |
+
urng = check_random_state_qmc(random_state)
|
1038 |
+
u = urng.uniform(size=size)
|
1039 |
+
if self._mirror_uniform:
|
1040 |
+
u = 1 - u
|
1041 |
+
x = self.ppf(u)
|
1042 |
+
uerr = np.max(np.abs(self._cdf(x) - u))
|
1043 |
+
if not x_error:
|
1044 |
+
return uerr, np.nan
|
1045 |
+
ppf_u = self._ppf(u)
|
1046 |
+
x_error_abs = np.abs(self.ppf(u)-ppf_u)
|
1047 |
+
x_error_rel = x_error_abs / np.abs(ppf_u)
|
1048 |
+
x_error_combined = np.array([x_error_abs, x_error_rel]).min(axis=0)
|
1049 |
+
return uerr, np.max(x_error_combined)
|
1050 |
+
|
1051 |
+
def support(self):
|
1052 |
+
"""Support of the distribution.
|
1053 |
+
|
1054 |
+
Returns
|
1055 |
+
-------
|
1056 |
+
a, b : float
|
1057 |
+
end-points of the distribution's support.
|
1058 |
+
|
1059 |
+
Notes
|
1060 |
+
-----
|
1061 |
+
|
1062 |
+
Note that the support of the distribution depends on `loc`,
|
1063 |
+
`scale` and `domain`.
|
1064 |
+
|
1065 |
+
Examples
|
1066 |
+
--------
|
1067 |
+
|
1068 |
+
>>> from scipy import stats
|
1069 |
+
>>> from scipy.stats.sampling import FastGeneratorInversion
|
1070 |
+
|
1071 |
+
Define a truncated normal distribution:
|
1072 |
+
|
1073 |
+
>>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1))
|
1074 |
+
>>> d_norm.support()
|
1075 |
+
(0, 1)
|
1076 |
+
|
1077 |
+
Shift the distribution:
|
1078 |
+
|
1079 |
+
>>> d_norm.loc = 2.5
|
1080 |
+
>>> d_norm.support()
|
1081 |
+
(2.5, 3.5)
|
1082 |
+
|
1083 |
+
"""
|
1084 |
+
return self._domain_adj
|
1085 |
+
|
1086 |
+
def _cdf(self, x):
|
1087 |
+
"""Cumulative distribution function (CDF)
|
1088 |
+
|
1089 |
+
Parameters
|
1090 |
+
----------
|
1091 |
+
x : array_like
|
1092 |
+
The values where the CDF is evaluated
|
1093 |
+
|
1094 |
+
Returns
|
1095 |
+
-------
|
1096 |
+
y : ndarray
|
1097 |
+
CDF evaluated at x
|
1098 |
+
|
1099 |
+
"""
|
1100 |
+
y = self._frozendist.cdf(x)
|
1101 |
+
if self._p_domain == 1.0:
|
1102 |
+
return y
|
1103 |
+
return np.clip((y - self._p_lower) / self._p_domain, 0, 1)
|
1104 |
+
|
1105 |
+
def _ppf(self, q):
|
1106 |
+
"""Percent point function (inverse of `cdf`)
|
1107 |
+
|
1108 |
+
Parameters
|
1109 |
+
----------
|
1110 |
+
q : array_like
|
1111 |
+
lower tail probability
|
1112 |
+
|
1113 |
+
Returns
|
1114 |
+
-------
|
1115 |
+
x : array_like
|
1116 |
+
quantile corresponding to the lower tail probability q.
|
1117 |
+
|
1118 |
+
"""
|
1119 |
+
if self._p_domain == 1.0:
|
1120 |
+
return self._frozendist.ppf(q)
|
1121 |
+
x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower)
|
1122 |
+
return np.clip(x, self._domain_adj[0], self._domain_adj[1])
|
1123 |
+
|
1124 |
+
|
1125 |
+
class RatioUniforms:
|
1126 |
+
"""
|
1127 |
+
Generate random samples from a probability density function using the
|
1128 |
+
ratio-of-uniforms method.
|
1129 |
+
|
1130 |
+
Parameters
|
1131 |
+
----------
|
1132 |
+
pdf : callable
|
1133 |
+
A function with signature `pdf(x)` that is proportional to the
|
1134 |
+
probability density function of the distribution.
|
1135 |
+
umax : float
|
1136 |
+
The upper bound of the bounding rectangle in the u-direction.
|
1137 |
+
vmin : float
|
1138 |
+
The lower bound of the bounding rectangle in the v-direction.
|
1139 |
+
vmax : float
|
1140 |
+
The upper bound of the bounding rectangle in the v-direction.
|
1141 |
+
c : float, optional.
|
1142 |
+
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
|
1143 |
+
random_state : {None, int, `numpy.random.Generator`,
|
1144 |
+
`numpy.random.RandomState`}, optional
|
1145 |
+
|
1146 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
1147 |
+
singleton is used.
|
1148 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
1149 |
+
seeded with `seed`.
|
1150 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
1151 |
+
that instance is used.
|
1152 |
+
|
1153 |
+
Methods
|
1154 |
+
-------
|
1155 |
+
rvs
|
1156 |
+
|
1157 |
+
Notes
|
1158 |
+
-----
|
1159 |
+
Given a univariate probability density function `pdf` and a constant `c`,
|
1160 |
+
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
|
1161 |
+
If ``(U, V)`` is a random vector uniformly distributed over ``A``,
|
1162 |
+
then ``V/U + c`` follows a distribution according to `pdf`.
|
1163 |
+
|
1164 |
+
The above result (see [1]_, [2]_) can be used to sample random variables
|
1165 |
+
using only the PDF, i.e. no inversion of the CDF is required. Typical
|
1166 |
+
choices of `c` are zero or the mode of `pdf`. The set ``A`` is a subset of
|
1167 |
+
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
|
1168 |
+
|
1169 |
+
- ``umax = sup sqrt(pdf(x))``
|
1170 |
+
- ``vmin = inf (x - c) sqrt(pdf(x))``
|
1171 |
+
- ``vmax = sup (x - c) sqrt(pdf(x))``
|
1172 |
+
|
1173 |
+
In particular, these values are finite if `pdf` is bounded and
|
1174 |
+
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
|
1175 |
+
One can generate ``(U, V)`` uniformly on ``R`` and return
|
1176 |
+
``V/U + c`` if ``(U, V)`` are also in ``A`` which can be directly
|
1177 |
+
verified.
|
1178 |
+
|
1179 |
+
The algorithm is not changed if one replaces `pdf` by k * `pdf` for any
|
1180 |
+
constant k > 0. Thus, it is often convenient to work with a function
|
1181 |
+
that is proportional to the probability density function by dropping
|
1182 |
+
unnecessary normalization factors.
|
1183 |
+
|
1184 |
+
Intuitively, the method works well if ``A`` fills up most of the
|
1185 |
+
enclosing rectangle such that the probability is high that ``(U, V)``
|
1186 |
+
lies in ``A`` whenever it lies in ``R`` as the number of required
|
1187 |
+
iterations becomes too large otherwise. To be more precise, note that
|
1188 |
+
the expected number of iterations to draw ``(U, V)`` uniformly
|
1189 |
+
distributed on ``R`` such that ``(U, V)`` is also in ``A`` is given by
|
1190 |
+
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``,
|
1191 |
+
where `area(pdf)` is the integral of `pdf` (which is equal to one if the
|
1192 |
+
probability density function is used but can take on other values if a
|
1193 |
+
function proportional to the density is used). The equality holds since
|
1194 |
+
the area of ``A`` is equal to ``0.5 * area(pdf)`` (Theorem 7.1 in [1]_).
|
1195 |
+
If the sampling fails to generate a single random variate after 50000
|
1196 |
+
iterations (i.e. not a single draw is in ``A``), an exception is raised.
|
1197 |
+
|
1198 |
+
If the bounding rectangle is not correctly specified (i.e. if it does not
|
1199 |
+
contain ``A``), the algorithm samples from a distribution different from
|
1200 |
+
the one given by `pdf`. It is therefore recommended to perform a
|
1201 |
+
test such as `~scipy.stats.kstest` as a check.
|
1202 |
+
|
1203 |
+
References
|
1204 |
+
----------
|
1205 |
+
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
|
1206 |
+
Springer-Verlag, 1986.
|
1207 |
+
|
1208 |
+
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
|
1209 |
+
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
|
1210 |
+
|
1211 |
+
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
|
1212 |
+
Variables Using the Ratio of Uniform Deviates",
|
1213 |
+
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
|
1214 |
+
|
1215 |
+
Examples
|
1216 |
+
--------
|
1217 |
+
>>> import numpy as np
|
1218 |
+
>>> from scipy import stats
|
1219 |
+
|
1220 |
+
>>> from scipy.stats.sampling import RatioUniforms
|
1221 |
+
>>> rng = np.random.default_rng()
|
1222 |
+
|
1223 |
+
Simulate normally distributed random variables. It is easy to compute the
|
1224 |
+
bounding rectangle explicitly in that case. For simplicity, we drop the
|
1225 |
+
normalization factor of the density.
|
1226 |
+
|
1227 |
+
>>> f = lambda x: np.exp(-x**2 / 2)
|
1228 |
+
>>> v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
|
1229 |
+
>>> umax = np.sqrt(f(0))
|
1230 |
+
>>> gen = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=rng)
|
1231 |
+
>>> r = gen.rvs(size=2500)
|
1232 |
+
|
1233 |
+
The K-S test confirms that the random variates are indeed normally
|
1234 |
+
distributed (normality is not rejected at 5% significance level):
|
1235 |
+
|
1236 |
+
>>> stats.kstest(r, 'norm')[1]
|
1237 |
+
0.250634764150542
|
1238 |
+
|
1239 |
+
The exponential distribution provides another example where the bounding
|
1240 |
+
rectangle can be determined explicitly.
|
1241 |
+
|
1242 |
+
>>> gen = RatioUniforms(lambda x: np.exp(-x), umax=1, vmin=0,
|
1243 |
+
... vmax=2*np.exp(-1), random_state=rng)
|
1244 |
+
>>> r = gen.rvs(1000)
|
1245 |
+
>>> stats.kstest(r, 'expon')[1]
|
1246 |
+
0.21121052054580314
|
1247 |
+
|
1248 |
+
"""
|
1249 |
+
|
1250 |
+
def __init__(self, pdf, *, umax, vmin, vmax, c=0, random_state=None):
|
1251 |
+
if vmin >= vmax:
|
1252 |
+
raise ValueError("vmin must be smaller than vmax.")
|
1253 |
+
|
1254 |
+
if umax <= 0:
|
1255 |
+
raise ValueError("umax must be positive.")
|
1256 |
+
|
1257 |
+
self._pdf = pdf
|
1258 |
+
self._umax = umax
|
1259 |
+
self._vmin = vmin
|
1260 |
+
self._vmax = vmax
|
1261 |
+
self._c = c
|
1262 |
+
self._rng = check_random_state(random_state)
|
1263 |
+
|
1264 |
+
def rvs(self, size=1):
|
1265 |
+
"""Sampling of random variates
|
1266 |
+
|
1267 |
+
Parameters
|
1268 |
+
----------
|
1269 |
+
size : int or tuple of ints, optional
|
1270 |
+
Number of random variates to be generated (default is 1).
|
1271 |
+
|
1272 |
+
Returns
|
1273 |
+
-------
|
1274 |
+
rvs : ndarray
|
1275 |
+
The random variates distributed according to the probability
|
1276 |
+
distribution defined by the pdf.
|
1277 |
+
|
1278 |
+
"""
|
1279 |
+
size1d = tuple(np.atleast_1d(size))
|
1280 |
+
N = np.prod(size1d) # number of rvs needed, reshape upon return
|
1281 |
+
|
1282 |
+
# start sampling using ratio of uniforms method
|
1283 |
+
x = np.zeros(N)
|
1284 |
+
simulated, i = 0, 1
|
1285 |
+
|
1286 |
+
# loop until N rvs have been generated: expected runtime is finite.
|
1287 |
+
# to avoid infinite loop, raise exception if not a single rv has been
|
1288 |
+
# generated after 50000 tries. even if the expected number of iterations
|
1289 |
+
# is 1000, the probability of this event is (1-1/1000)**50000
|
1290 |
+
# which is of order 10e-22
|
1291 |
+
while simulated < N:
|
1292 |
+
k = N - simulated
|
1293 |
+
# simulate uniform rvs on [0, umax] and [vmin, vmax]
|
1294 |
+
u1 = self._umax * self._rng.uniform(size=k)
|
1295 |
+
v1 = self._rng.uniform(self._vmin, self._vmax, size=k)
|
1296 |
+
# apply rejection method
|
1297 |
+
rvs = v1 / u1 + self._c
|
1298 |
+
accept = (u1**2 <= self._pdf(rvs))
|
1299 |
+
num_accept = np.sum(accept)
|
1300 |
+
if num_accept > 0:
|
1301 |
+
x[simulated:(simulated + num_accept)] = rvs[accept]
|
1302 |
+
simulated += num_accept
|
1303 |
+
|
1304 |
+
if (simulated == 0) and (i*N >= 50000):
|
1305 |
+
msg = (
|
1306 |
+
f"Not a single random variate could be generated in {i*N} "
|
1307 |
+
"attempts. The ratio of uniforms method does not appear "
|
1308 |
+
"to work for the provided parameters. Please check the "
|
1309 |
+
"pdf and the bounds."
|
1310 |
+
)
|
1311 |
+
raise RuntimeError(msg)
|
1312 |
+
i += 1
|
1313 |
+
|
1314 |
+
return np.reshape(x, size1d)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_sensitivity_analysis.py
ADDED
@@ -0,0 +1,712 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import inspect
|
4 |
+
from dataclasses import dataclass
|
5 |
+
from typing import (
|
6 |
+
Callable, Literal, Protocol, TYPE_CHECKING
|
7 |
+
)
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from scipy.stats._common import ConfidenceInterval
|
12 |
+
from scipy.stats._qmc import check_random_state
|
13 |
+
from scipy.stats._resampling import BootstrapResult
|
14 |
+
from scipy.stats import qmc, bootstrap
|
15 |
+
|
16 |
+
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
import numpy.typing as npt
|
19 |
+
from scipy._lib._util import DecimalNumber, IntNumber, SeedType
|
20 |
+
|
21 |
+
|
22 |
+
__all__ = [
|
23 |
+
'sobol_indices'
|
24 |
+
]
|
25 |
+
|
26 |
+
|
27 |
+
def f_ishigami(x: npt.ArrayLike) -> np.ndarray:
|
28 |
+
r"""Ishigami function.
|
29 |
+
|
30 |
+
.. math::
|
31 |
+
|
32 |
+
Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1
|
33 |
+
|
34 |
+
with :math:`\mathbf{x} \in [-\pi, \pi]^3`.
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
x : array_like ([x1, x2, x3], n)
|
39 |
+
|
40 |
+
Returns
|
41 |
+
-------
|
42 |
+
f : array_like (n,)
|
43 |
+
Function evaluation.
|
44 |
+
|
45 |
+
References
|
46 |
+
----------
|
47 |
+
.. [1] Ishigami, T. and T. Homma. "An importance quantification technique
|
48 |
+
in uncertainty analysis for computer models." IEEE,
|
49 |
+
:doi:`10.1109/ISUMA.1990.151285`, 1990.
|
50 |
+
"""
|
51 |
+
x = np.atleast_2d(x)
|
52 |
+
f_eval = (
|
53 |
+
np.sin(x[0])
|
54 |
+
+ 7 * np.sin(x[1])**2
|
55 |
+
+ 0.1 * (x[2]**4) * np.sin(x[0])
|
56 |
+
)
|
57 |
+
return f_eval
|
58 |
+
|
59 |
+
|
60 |
+
def sample_A_B(
|
61 |
+
n: IntNumber,
|
62 |
+
dists: list[PPFDist],
|
63 |
+
random_state: SeedType = None
|
64 |
+
) -> np.ndarray:
|
65 |
+
"""Sample two matrices A and B.
|
66 |
+
|
67 |
+
Uses a Sobol' sequence with 2`d` columns to have 2 uncorrelated matrices.
|
68 |
+
This is more efficient than using 2 random draw of Sobol'.
|
69 |
+
See sec. 5 from [1]_.
|
70 |
+
|
71 |
+
Output shape is (d, n).
|
72 |
+
|
73 |
+
References
|
74 |
+
----------
|
75 |
+
.. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
|
76 |
+
S. Tarantola. "Variance based sensitivity analysis of model
|
77 |
+
output. Design and estimator for the total sensitivity index."
|
78 |
+
Computer Physics Communications, 181(2):259-270,
|
79 |
+
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
|
80 |
+
"""
|
81 |
+
d = len(dists)
|
82 |
+
A_B = qmc.Sobol(d=2*d, seed=random_state, bits=64).random(n).T
|
83 |
+
A_B = A_B.reshape(2, d, -1)
|
84 |
+
try:
|
85 |
+
for d_, dist in enumerate(dists):
|
86 |
+
A_B[:, d_] = dist.ppf(A_B[:, d_])
|
87 |
+
except AttributeError as exc:
|
88 |
+
message = "Each distribution in `dists` must have method `ppf`."
|
89 |
+
raise ValueError(message) from exc
|
90 |
+
return A_B
|
91 |
+
|
92 |
+
|
93 |
+
def sample_AB(A: np.ndarray, B: np.ndarray) -> np.ndarray:
|
94 |
+
"""AB matrix.
|
95 |
+
|
96 |
+
AB: rows of B into A. Shape (d, d, n).
|
97 |
+
- Copy A into d "pages"
|
98 |
+
- In the first page, replace 1st rows of A with 1st row of B.
|
99 |
+
...
|
100 |
+
- In the dth page, replace dth row of A with dth row of B.
|
101 |
+
- return the stack of pages
|
102 |
+
"""
|
103 |
+
d, n = A.shape
|
104 |
+
AB = np.tile(A, (d, 1, 1))
|
105 |
+
i = np.arange(d)
|
106 |
+
AB[i, i] = B[i]
|
107 |
+
return AB
|
108 |
+
|
109 |
+
|
110 |
+
def saltelli_2010(
|
111 |
+
f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray
|
112 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
113 |
+
r"""Saltelli2010 formulation.
|
114 |
+
|
115 |
+
.. math::
|
116 |
+
|
117 |
+
S_i = \frac{1}{N} \sum_{j=1}^N
|
118 |
+
f(\mathbf{B})_j (f(\mathbf{AB}^{(i)})_j - f(\mathbf{A})_j)
|
119 |
+
|
120 |
+
.. math::
|
121 |
+
|
122 |
+
S_{T_i} = \frac{1}{N} \sum_{j=1}^N
|
123 |
+
(f(\mathbf{A})_j - f(\mathbf{AB}^{(i)})_j)^2
|
124 |
+
|
125 |
+
Parameters
|
126 |
+
----------
|
127 |
+
f_A, f_B : array_like (s, n)
|
128 |
+
Function values at A and B, respectively
|
129 |
+
f_AB : array_like (d, s, n)
|
130 |
+
Function values at each of the AB pages
|
131 |
+
|
132 |
+
Returns
|
133 |
+
-------
|
134 |
+
s, st : array_like (s, d)
|
135 |
+
First order and total order Sobol' indices.
|
136 |
+
|
137 |
+
References
|
138 |
+
----------
|
139 |
+
.. [1] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
|
140 |
+
S. Tarantola. "Variance based sensitivity analysis of model
|
141 |
+
output. Design and estimator for the total sensitivity index."
|
142 |
+
Computer Physics Communications, 181(2):259-270,
|
143 |
+
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
|
144 |
+
"""
|
145 |
+
# Empirical variance calculated using output from A and B which are
|
146 |
+
# independent. Output of AB is not independent and cannot be used
|
147 |
+
var = np.var([f_A, f_B], axis=(0, -1))
|
148 |
+
|
149 |
+
# We divide by the variance to have a ratio of variance
|
150 |
+
# this leads to eq. 2
|
151 |
+
s = np.mean(f_B * (f_AB - f_A), axis=-1) / var # Table 2 (b)
|
152 |
+
st = 0.5 * np.mean((f_A - f_AB) ** 2, axis=-1) / var # Table 2 (f)
|
153 |
+
|
154 |
+
return s.T, st.T
|
155 |
+
|
156 |
+
|
157 |
+
@dataclass
|
158 |
+
class BootstrapSobolResult:
|
159 |
+
first_order: BootstrapResult
|
160 |
+
total_order: BootstrapResult
|
161 |
+
|
162 |
+
|
163 |
+
@dataclass
|
164 |
+
class SobolResult:
|
165 |
+
first_order: np.ndarray
|
166 |
+
total_order: np.ndarray
|
167 |
+
_indices_method: Callable
|
168 |
+
_f_A: np.ndarray
|
169 |
+
_f_B: np.ndarray
|
170 |
+
_f_AB: np.ndarray
|
171 |
+
_A: np.ndarray | None = None
|
172 |
+
_B: np.ndarray | None = None
|
173 |
+
_AB: np.ndarray | None = None
|
174 |
+
_bootstrap_result: BootstrapResult | None = None
|
175 |
+
|
176 |
+
def bootstrap(
|
177 |
+
self,
|
178 |
+
confidence_level: DecimalNumber = 0.95,
|
179 |
+
n_resamples: IntNumber = 999
|
180 |
+
) -> BootstrapSobolResult:
|
181 |
+
"""Bootstrap Sobol' indices to provide confidence intervals.
|
182 |
+
|
183 |
+
Parameters
|
184 |
+
----------
|
185 |
+
confidence_level : float, default: ``0.95``
|
186 |
+
The confidence level of the confidence intervals.
|
187 |
+
n_resamples : int, default: ``999``
|
188 |
+
The number of resamples performed to form the bootstrap
|
189 |
+
distribution of the indices.
|
190 |
+
|
191 |
+
Returns
|
192 |
+
-------
|
193 |
+
res : BootstrapSobolResult
|
194 |
+
Bootstrap result containing the confidence intervals and the
|
195 |
+
bootstrap distribution of the indices.
|
196 |
+
|
197 |
+
An object with attributes:
|
198 |
+
|
199 |
+
first_order : BootstrapResult
|
200 |
+
Bootstrap result of the first order indices.
|
201 |
+
total_order : BootstrapResult
|
202 |
+
Bootstrap result of the total order indices.
|
203 |
+
See `BootstrapResult` for more details.
|
204 |
+
|
205 |
+
"""
|
206 |
+
def statistic(idx):
|
207 |
+
f_A_ = self._f_A[:, idx]
|
208 |
+
f_B_ = self._f_B[:, idx]
|
209 |
+
f_AB_ = self._f_AB[..., idx]
|
210 |
+
return self._indices_method(f_A_, f_B_, f_AB_)
|
211 |
+
|
212 |
+
n = self._f_A.shape[1]
|
213 |
+
|
214 |
+
res = bootstrap(
|
215 |
+
[np.arange(n)], statistic=statistic, method="BCa",
|
216 |
+
n_resamples=n_resamples,
|
217 |
+
confidence_level=confidence_level,
|
218 |
+
bootstrap_result=self._bootstrap_result
|
219 |
+
)
|
220 |
+
self._bootstrap_result = res
|
221 |
+
|
222 |
+
first_order = BootstrapResult(
|
223 |
+
confidence_interval=ConfidenceInterval(
|
224 |
+
res.confidence_interval.low[0], res.confidence_interval.high[0]
|
225 |
+
),
|
226 |
+
bootstrap_distribution=res.bootstrap_distribution[0],
|
227 |
+
standard_error=res.standard_error[0],
|
228 |
+
)
|
229 |
+
total_order = BootstrapResult(
|
230 |
+
confidence_interval=ConfidenceInterval(
|
231 |
+
res.confidence_interval.low[1], res.confidence_interval.high[1]
|
232 |
+
),
|
233 |
+
bootstrap_distribution=res.bootstrap_distribution[1],
|
234 |
+
standard_error=res.standard_error[1],
|
235 |
+
)
|
236 |
+
|
237 |
+
return BootstrapSobolResult(
|
238 |
+
first_order=first_order, total_order=total_order
|
239 |
+
)
|
240 |
+
|
241 |
+
|
242 |
+
class PPFDist(Protocol):
|
243 |
+
@property
|
244 |
+
def ppf(self) -> Callable[..., float]:
|
245 |
+
...
|
246 |
+
|
247 |
+
|
248 |
+
def sobol_indices(
|
249 |
+
*,
|
250 |
+
func: Callable[[np.ndarray], npt.ArrayLike] |
|
251 |
+
dict[Literal['f_A', 'f_B', 'f_AB'], np.ndarray],
|
252 |
+
n: IntNumber,
|
253 |
+
dists: list[PPFDist] | None = None,
|
254 |
+
method: Callable | Literal['saltelli_2010'] = 'saltelli_2010',
|
255 |
+
random_state: SeedType = None
|
256 |
+
) -> SobolResult:
|
257 |
+
r"""Global sensitivity indices of Sobol'.
|
258 |
+
|
259 |
+
Parameters
|
260 |
+
----------
|
261 |
+
func : callable or dict(str, array_like)
|
262 |
+
If `func` is a callable, function to compute the Sobol' indices from.
|
263 |
+
Its signature must be::
|
264 |
+
|
265 |
+
func(x: ArrayLike) -> ArrayLike
|
266 |
+
|
267 |
+
with ``x`` of shape ``(d, n)`` and output of shape ``(s, n)`` where:
|
268 |
+
|
269 |
+
- ``d`` is the input dimensionality of `func`
|
270 |
+
(number of input variables),
|
271 |
+
- ``s`` is the output dimensionality of `func`
|
272 |
+
(number of output variables), and
|
273 |
+
- ``n`` is the number of samples (see `n` below).
|
274 |
+
|
275 |
+
Function evaluation values must be finite.
|
276 |
+
|
277 |
+
If `func` is a dictionary, contains the function evaluations from three
|
278 |
+
different arrays. Keys must be: ``f_A``, ``f_B`` and ``f_AB``.
|
279 |
+
``f_A`` and ``f_B`` should have a shape ``(s, n)`` and ``f_AB``
|
280 |
+
should have a shape ``(d, s, n)``.
|
281 |
+
This is an advanced feature and misuse can lead to wrong analysis.
|
282 |
+
n : int
|
283 |
+
Number of samples used to generate the matrices ``A`` and ``B``.
|
284 |
+
Must be a power of 2. The total number of points at which `func` is
|
285 |
+
evaluated will be ``n*(d+2)``.
|
286 |
+
dists : list(distributions), optional
|
287 |
+
List of each parameter's distribution. The distribution of parameters
|
288 |
+
depends on the application and should be carefully chosen.
|
289 |
+
Parameters are assumed to be independently distributed, meaning there
|
290 |
+
is no constraint nor relationship between their values.
|
291 |
+
|
292 |
+
Distributions must be an instance of a class with a ``ppf``
|
293 |
+
method.
|
294 |
+
|
295 |
+
Must be specified if `func` is a callable, and ignored otherwise.
|
296 |
+
method : Callable or str, default: 'saltelli_2010'
|
297 |
+
Method used to compute the first and total Sobol' indices.
|
298 |
+
|
299 |
+
If a callable, its signature must be::
|
300 |
+
|
301 |
+
func(f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray)
|
302 |
+
-> Tuple[np.ndarray, np.ndarray]
|
303 |
+
|
304 |
+
with ``f_A, f_B`` of shape ``(s, n)`` and ``f_AB`` of shape
|
305 |
+
``(d, s, n)``.
|
306 |
+
These arrays contain the function evaluations from three different sets
|
307 |
+
of samples.
|
308 |
+
The output is a tuple of the first and total indices with
|
309 |
+
shape ``(s, d)``.
|
310 |
+
This is an advanced feature and misuse can lead to wrong analysis.
|
311 |
+
random_state : {None, int, `numpy.random.Generator`}, optional
|
312 |
+
If `random_state` is an int or None, a new `numpy.random.Generator` is
|
313 |
+
created using ``np.random.default_rng(random_state)``.
|
314 |
+
If `random_state` is already a ``Generator`` instance, then the
|
315 |
+
provided instance is used.
|
316 |
+
|
317 |
+
Returns
|
318 |
+
-------
|
319 |
+
res : SobolResult
|
320 |
+
An object with attributes:
|
321 |
+
|
322 |
+
first_order : ndarray of shape (s, d)
|
323 |
+
First order Sobol' indices.
|
324 |
+
total_order : ndarray of shape (s, d)
|
325 |
+
Total order Sobol' indices.
|
326 |
+
|
327 |
+
And method:
|
328 |
+
|
329 |
+
bootstrap(confidence_level: float, n_resamples: int)
|
330 |
+
-> BootstrapSobolResult
|
331 |
+
|
332 |
+
A method providing confidence intervals on the indices.
|
333 |
+
See `scipy.stats.bootstrap` for more details.
|
334 |
+
|
335 |
+
The bootstrapping is done on both first and total order indices,
|
336 |
+
and they are available in `BootstrapSobolResult` as attributes
|
337 |
+
``first_order`` and ``total_order``.
|
338 |
+
|
339 |
+
Notes
|
340 |
+
-----
|
341 |
+
The Sobol' method [1]_, [2]_ is a variance-based Sensitivity Analysis which
|
342 |
+
obtains the contribution of each parameter to the variance of the
|
343 |
+
quantities of interest (QoIs; i.e., the outputs of `func`).
|
344 |
+
Respective contributions can be used to rank the parameters and
|
345 |
+
also gauge the complexity of the model by computing the
|
346 |
+
model's effective (or mean) dimension.
|
347 |
+
|
348 |
+
.. note::
|
349 |
+
|
350 |
+
Parameters are assumed to be independently distributed. Each
|
351 |
+
parameter can still follow any distribution. In fact, the distribution
|
352 |
+
is very important and should match the real distribution of the
|
353 |
+
parameters.
|
354 |
+
|
355 |
+
It uses a functional decomposition of the variance of the function to
|
356 |
+
explore
|
357 |
+
|
358 |
+
.. math::
|
359 |
+
|
360 |
+
\mathbb{V}(Y) = \sum_{i}^{d} \mathbb{V}_i (Y) + \sum_{i<j}^{d}
|
361 |
+
\mathbb{V}_{ij}(Y) + ... + \mathbb{V}_{1,2,...,d}(Y),
|
362 |
+
|
363 |
+
introducing conditional variances:
|
364 |
+
|
365 |
+
.. math::
|
366 |
+
|
367 |
+
\mathbb{V}_i(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i)]
|
368 |
+
\qquad
|
369 |
+
\mathbb{V}_{ij}(Y) = \mathbb{\mathbb{V}}[\mathbb{E}(Y|x_i x_j)]
|
370 |
+
- \mathbb{V}_i(Y) - \mathbb{V}_j(Y),
|
371 |
+
|
372 |
+
Sobol' indices are expressed as
|
373 |
+
|
374 |
+
.. math::
|
375 |
+
|
376 |
+
S_i = \frac{\mathbb{V}_i(Y)}{\mathbb{V}[Y]}
|
377 |
+
\qquad
|
378 |
+
S_{ij} =\frac{\mathbb{V}_{ij}(Y)}{\mathbb{V}[Y]}.
|
379 |
+
|
380 |
+
:math:`S_{i}` corresponds to the first-order term which apprises the
|
381 |
+
contribution of the i-th parameter, while :math:`S_{ij}` corresponds to the
|
382 |
+
second-order term which informs about the contribution of interactions
|
383 |
+
between the i-th and the j-th parameters. These equations can be
|
384 |
+
generalized to compute higher order terms; however, they are expensive to
|
385 |
+
compute and their interpretation is complex.
|
386 |
+
This is why only first order indices are provided.
|
387 |
+
|
388 |
+
Total order indices represent the global contribution of the parameters
|
389 |
+
to the variance of the QoI and are defined as:
|
390 |
+
|
391 |
+
.. math::
|
392 |
+
|
393 |
+
S_{T_i} = S_i + \sum_j S_{ij} + \sum_{j,k} S_{ijk} + ...
|
394 |
+
= 1 - \frac{\mathbb{V}[\mathbb{E}(Y|x_{\sim i})]}{\mathbb{V}[Y]}.
|
395 |
+
|
396 |
+
First order indices sum to at most 1, while total order indices sum to at
|
397 |
+
least 1. If there are no interactions, then first and total order indices
|
398 |
+
are equal, and both first and total order indices sum to 1.
|
399 |
+
|
400 |
+
.. warning::
|
401 |
+
|
402 |
+
Negative Sobol' values are due to numerical errors. Increasing the
|
403 |
+
number of points `n` should help.
|
404 |
+
|
405 |
+
The number of sample required to have a good analysis increases with
|
406 |
+
the dimensionality of the problem. e.g. for a 3 dimension problem,
|
407 |
+
consider at minima ``n >= 2**12``. The more complex the model is,
|
408 |
+
the more samples will be needed.
|
409 |
+
|
410 |
+
Even for a purely addiditive model, the indices may not sum to 1 due
|
411 |
+
to numerical noise.
|
412 |
+
|
413 |
+
References
|
414 |
+
----------
|
415 |
+
.. [1] Sobol, I. M.. "Sensitivity analysis for nonlinear mathematical
|
416 |
+
models." Mathematical Modeling and Computational Experiment, 1:407-414,
|
417 |
+
1993.
|
418 |
+
.. [2] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
|
419 |
+
mathematical models and their Monte Carlo estimates." Mathematics
|
420 |
+
and Computers in Simulation, 55(1-3):271-280,
|
421 |
+
:doi:`10.1016/S0378-4754(00)00270-6`, 2001.
|
422 |
+
.. [3] Saltelli, A. "Making best use of model evaluations to
|
423 |
+
compute sensitivity indices." Computer Physics Communications,
|
424 |
+
145(2):280-297, :doi:`10.1016/S0010-4655(02)00280-1`, 2002.
|
425 |
+
.. [4] Saltelli, A., M. Ratto, T. Andres, F. Campolongo, J. Cariboni,
|
426 |
+
D. Gatelli, M. Saisana, and S. Tarantola. "Global Sensitivity Analysis.
|
427 |
+
The Primer." 2007.
|
428 |
+
.. [5] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
|
429 |
+
S. Tarantola. "Variance based sensitivity analysis of model
|
430 |
+
output. Design and estimator for the total sensitivity index."
|
431 |
+
Computer Physics Communications, 181(2):259-270,
|
432 |
+
:doi:`10.1016/j.cpc.2009.09.018`, 2010.
|
433 |
+
.. [6] Ishigami, T. and T. Homma. "An importance quantification technique
|
434 |
+
in uncertainty analysis for computer models." IEEE,
|
435 |
+
:doi:`10.1109/ISUMA.1990.151285`, 1990.
|
436 |
+
|
437 |
+
Examples
|
438 |
+
--------
|
439 |
+
The following is an example with the Ishigami function [6]_
|
440 |
+
|
441 |
+
.. math::
|
442 |
+
|
443 |
+
Y(\mathbf{x}) = \sin x_1 + 7 \sin^2 x_2 + 0.1 x_3^4 \sin x_1,
|
444 |
+
|
445 |
+
with :math:`\mathbf{x} \in [-\pi, \pi]^3`. This function exhibits strong
|
446 |
+
non-linearity and non-monotonicity.
|
447 |
+
|
448 |
+
Remember, Sobol' indices assumes that samples are independently
|
449 |
+
distributed. In this case we use a uniform distribution on each marginals.
|
450 |
+
|
451 |
+
>>> import numpy as np
|
452 |
+
>>> from scipy.stats import sobol_indices, uniform
|
453 |
+
>>> rng = np.random.default_rng()
|
454 |
+
>>> def f_ishigami(x):
|
455 |
+
... f_eval = (
|
456 |
+
... np.sin(x[0])
|
457 |
+
... + 7 * np.sin(x[1])**2
|
458 |
+
... + 0.1 * (x[2]**4) * np.sin(x[0])
|
459 |
+
... )
|
460 |
+
... return f_eval
|
461 |
+
>>> indices = sobol_indices(
|
462 |
+
... func=f_ishigami, n=1024,
|
463 |
+
... dists=[
|
464 |
+
... uniform(loc=-np.pi, scale=2*np.pi),
|
465 |
+
... uniform(loc=-np.pi, scale=2*np.pi),
|
466 |
+
... uniform(loc=-np.pi, scale=2*np.pi)
|
467 |
+
... ],
|
468 |
+
... random_state=rng
|
469 |
+
... )
|
470 |
+
>>> indices.first_order
|
471 |
+
array([0.31637954, 0.43781162, 0.00318825])
|
472 |
+
>>> indices.total_order
|
473 |
+
array([0.56122127, 0.44287857, 0.24229595])
|
474 |
+
|
475 |
+
Confidence interval can be obtained using bootstrapping.
|
476 |
+
|
477 |
+
>>> boot = indices.bootstrap()
|
478 |
+
|
479 |
+
Then, this information can be easily visualized.
|
480 |
+
|
481 |
+
>>> import matplotlib.pyplot as plt
|
482 |
+
>>> fig, axs = plt.subplots(1, 2, figsize=(9, 4))
|
483 |
+
>>> _ = axs[0].errorbar(
|
484 |
+
... [1, 2, 3], indices.first_order, fmt='o',
|
485 |
+
... yerr=[
|
486 |
+
... indices.first_order - boot.first_order.confidence_interval.low,
|
487 |
+
... boot.first_order.confidence_interval.high - indices.first_order
|
488 |
+
... ],
|
489 |
+
... )
|
490 |
+
>>> axs[0].set_ylabel("First order Sobol' indices")
|
491 |
+
>>> axs[0].set_xlabel('Input parameters')
|
492 |
+
>>> axs[0].set_xticks([1, 2, 3])
|
493 |
+
>>> _ = axs[1].errorbar(
|
494 |
+
... [1, 2, 3], indices.total_order, fmt='o',
|
495 |
+
... yerr=[
|
496 |
+
... indices.total_order - boot.total_order.confidence_interval.low,
|
497 |
+
... boot.total_order.confidence_interval.high - indices.total_order
|
498 |
+
... ],
|
499 |
+
... )
|
500 |
+
>>> axs[1].set_ylabel("Total order Sobol' indices")
|
501 |
+
>>> axs[1].set_xlabel('Input parameters')
|
502 |
+
>>> axs[1].set_xticks([1, 2, 3])
|
503 |
+
>>> plt.tight_layout()
|
504 |
+
>>> plt.show()
|
505 |
+
|
506 |
+
.. note::
|
507 |
+
|
508 |
+
By default, `scipy.stats.uniform` has support ``[0, 1]``.
|
509 |
+
Using the parameters ``loc`` and ``scale``, one obtains the uniform
|
510 |
+
distribution on ``[loc, loc + scale]``.
|
511 |
+
|
512 |
+
This result is particularly interesting because the first order index
|
513 |
+
:math:`S_{x_3} = 0` whereas its total order is :math:`S_{T_{x_3}} = 0.244`.
|
514 |
+
This means that higher order interactions with :math:`x_3` are responsible
|
515 |
+
for the difference. Almost 25% of the observed variance
|
516 |
+
on the QoI is due to the correlations between :math:`x_3` and :math:`x_1`,
|
517 |
+
although :math:`x_3` by itself has no impact on the QoI.
|
518 |
+
|
519 |
+
The following gives a visual explanation of Sobol' indices on this
|
520 |
+
function. Let's generate 1024 samples in :math:`[-\pi, \pi]^3` and
|
521 |
+
calculate the value of the output.
|
522 |
+
|
523 |
+
>>> from scipy.stats import qmc
|
524 |
+
>>> n_dim = 3
|
525 |
+
>>> p_labels = ['$x_1$', '$x_2$', '$x_3$']
|
526 |
+
>>> sample = qmc.Sobol(d=n_dim, seed=rng).random(1024)
|
527 |
+
>>> sample = qmc.scale(
|
528 |
+
... sample=sample,
|
529 |
+
... l_bounds=[-np.pi, -np.pi, -np.pi],
|
530 |
+
... u_bounds=[np.pi, np.pi, np.pi]
|
531 |
+
... )
|
532 |
+
>>> output = f_ishigami(sample.T)
|
533 |
+
|
534 |
+
Now we can do scatter plots of the output with respect to each parameter.
|
535 |
+
This gives a visual way to understand how each parameter impacts the
|
536 |
+
output of the function.
|
537 |
+
|
538 |
+
>>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
|
539 |
+
>>> for i in range(n_dim):
|
540 |
+
... xi = sample[:, i]
|
541 |
+
... ax[i].scatter(xi, output, marker='+')
|
542 |
+
... ax[i].set_xlabel(p_labels[i])
|
543 |
+
>>> ax[0].set_ylabel('Y')
|
544 |
+
>>> plt.tight_layout()
|
545 |
+
>>> plt.show()
|
546 |
+
|
547 |
+
Now Sobol' goes a step further:
|
548 |
+
by conditioning the output value by given values of the parameter
|
549 |
+
(black lines), the conditional output mean is computed. It corresponds to
|
550 |
+
the term :math:`\mathbb{E}(Y|x_i)`. Taking the variance of this term gives
|
551 |
+
the numerator of the Sobol' indices.
|
552 |
+
|
553 |
+
>>> mini = np.min(output)
|
554 |
+
>>> maxi = np.max(output)
|
555 |
+
>>> n_bins = 10
|
556 |
+
>>> bins = np.linspace(-np.pi, np.pi, num=n_bins, endpoint=False)
|
557 |
+
>>> dx = bins[1] - bins[0]
|
558 |
+
>>> fig, ax = plt.subplots(1, n_dim, figsize=(12, 4))
|
559 |
+
>>> for i in range(n_dim):
|
560 |
+
... xi = sample[:, i]
|
561 |
+
... ax[i].scatter(xi, output, marker='+')
|
562 |
+
... ax[i].set_xlabel(p_labels[i])
|
563 |
+
... for bin_ in bins:
|
564 |
+
... idx = np.where((bin_ <= xi) & (xi <= bin_ + dx))
|
565 |
+
... xi_ = xi[idx]
|
566 |
+
... y_ = output[idx]
|
567 |
+
... ave_y_ = np.mean(y_)
|
568 |
+
... ax[i].plot([bin_ + dx/2] * 2, [mini, maxi], c='k')
|
569 |
+
... ax[i].scatter(bin_ + dx/2, ave_y_, c='r')
|
570 |
+
>>> ax[0].set_ylabel('Y')
|
571 |
+
>>> plt.tight_layout()
|
572 |
+
>>> plt.show()
|
573 |
+
|
574 |
+
Looking at :math:`x_3`, the variance
|
575 |
+
of the mean is zero leading to :math:`S_{x_3} = 0`. But we can further
|
576 |
+
observe that the variance of the output is not constant along the parameter
|
577 |
+
values of :math:`x_3`. This heteroscedasticity is explained by higher order
|
578 |
+
interactions. Moreover, an heteroscedasticity is also noticeable on
|
579 |
+
:math:`x_1` leading to an interaction between :math:`x_3` and :math:`x_1`.
|
580 |
+
On :math:`x_2`, the variance seems to be constant and thus null interaction
|
581 |
+
with this parameter can be supposed.
|
582 |
+
|
583 |
+
This case is fairly simple to analyse visually---although it is only a
|
584 |
+
qualitative analysis. Nevertheless, when the number of input parameters
|
585 |
+
increases such analysis becomes unrealistic as it would be difficult to
|
586 |
+
conclude on high-order terms. Hence the benefit of using Sobol' indices.
|
587 |
+
|
588 |
+
"""
|
589 |
+
random_state = check_random_state(random_state)
|
590 |
+
|
591 |
+
n_ = int(n)
|
592 |
+
if not (n_ & (n_ - 1) == 0) or n != n_:
|
593 |
+
raise ValueError(
|
594 |
+
"The balance properties of Sobol' points require 'n' "
|
595 |
+
"to be a power of 2."
|
596 |
+
)
|
597 |
+
n = n_
|
598 |
+
|
599 |
+
if not callable(method):
|
600 |
+
indices_methods: dict[str, Callable] = {
|
601 |
+
"saltelli_2010": saltelli_2010,
|
602 |
+
}
|
603 |
+
try:
|
604 |
+
method = method.lower() # type: ignore[assignment]
|
605 |
+
indices_method_ = indices_methods[method]
|
606 |
+
except KeyError as exc:
|
607 |
+
message = (
|
608 |
+
f"{method!r} is not a valid 'method'. It must be one of"
|
609 |
+
f" {set(indices_methods)!r} or a callable."
|
610 |
+
)
|
611 |
+
raise ValueError(message) from exc
|
612 |
+
else:
|
613 |
+
indices_method_ = method
|
614 |
+
sig = inspect.signature(indices_method_)
|
615 |
+
|
616 |
+
if set(sig.parameters) != {'f_A', 'f_B', 'f_AB'}:
|
617 |
+
message = (
|
618 |
+
"If 'method' is a callable, it must have the following"
|
619 |
+
f" signature: {inspect.signature(saltelli_2010)}"
|
620 |
+
)
|
621 |
+
raise ValueError(message)
|
622 |
+
|
623 |
+
def indices_method(f_A, f_B, f_AB):
|
624 |
+
"""Wrap indices method to ensure proper output dimension.
|
625 |
+
|
626 |
+
1D when single output, 2D otherwise.
|
627 |
+
"""
|
628 |
+
return np.squeeze(indices_method_(f_A=f_A, f_B=f_B, f_AB=f_AB))
|
629 |
+
|
630 |
+
if callable(func):
|
631 |
+
if dists is None:
|
632 |
+
raise ValueError(
|
633 |
+
"'dists' must be defined when 'func' is a callable."
|
634 |
+
)
|
635 |
+
|
636 |
+
def wrapped_func(x):
|
637 |
+
return np.atleast_2d(func(x))
|
638 |
+
|
639 |
+
A, B = sample_A_B(n=n, dists=dists, random_state=random_state)
|
640 |
+
AB = sample_AB(A=A, B=B)
|
641 |
+
|
642 |
+
f_A = wrapped_func(A)
|
643 |
+
|
644 |
+
if f_A.shape[1] != n:
|
645 |
+
raise ValueError(
|
646 |
+
"'func' output should have a shape ``(s, -1)`` with ``s`` "
|
647 |
+
"the number of output."
|
648 |
+
)
|
649 |
+
|
650 |
+
def funcAB(AB):
|
651 |
+
d, d, n = AB.shape
|
652 |
+
AB = np.moveaxis(AB, 0, -1).reshape(d, n*d)
|
653 |
+
f_AB = wrapped_func(AB)
|
654 |
+
return np.moveaxis(f_AB.reshape((-1, n, d)), -1, 0)
|
655 |
+
|
656 |
+
f_B = wrapped_func(B)
|
657 |
+
f_AB = funcAB(AB)
|
658 |
+
else:
|
659 |
+
message = (
|
660 |
+
"When 'func' is a dictionary, it must contain the following "
|
661 |
+
"keys: 'f_A', 'f_B' and 'f_AB'."
|
662 |
+
"'f_A' and 'f_B' should have a shape ``(s, n)`` and 'f_AB' "
|
663 |
+
"should have a shape ``(d, s, n)``."
|
664 |
+
)
|
665 |
+
try:
|
666 |
+
f_A, f_B, f_AB = np.atleast_2d(
|
667 |
+
func['f_A'], func['f_B'], func['f_AB']
|
668 |
+
)
|
669 |
+
except KeyError as exc:
|
670 |
+
raise ValueError(message) from exc
|
671 |
+
|
672 |
+
if f_A.shape[1] != n or f_A.shape != f_B.shape or \
|
673 |
+
f_AB.shape == f_A.shape or f_AB.shape[-1] % n != 0:
|
674 |
+
raise ValueError(message)
|
675 |
+
|
676 |
+
# Normalization by mean
|
677 |
+
# Sobol', I. and Levitan, Y. L. (1999). On the use of variance reducing
|
678 |
+
# multipliers in monte carlo computations of a global sensitivity index.
|
679 |
+
# Computer Physics Communications, 117(1) :52-61.
|
680 |
+
mean = np.mean([f_A, f_B], axis=(0, -1)).reshape(-1, 1)
|
681 |
+
f_A -= mean
|
682 |
+
f_B -= mean
|
683 |
+
f_AB -= mean
|
684 |
+
|
685 |
+
# Compute indices
|
686 |
+
# Filter warnings for constant output as var = 0
|
687 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
688 |
+
first_order, total_order = indices_method(f_A=f_A, f_B=f_B, f_AB=f_AB)
|
689 |
+
|
690 |
+
# null variance means null indices
|
691 |
+
first_order[~np.isfinite(first_order)] = 0
|
692 |
+
total_order[~np.isfinite(total_order)] = 0
|
693 |
+
|
694 |
+
res = dict(
|
695 |
+
first_order=first_order,
|
696 |
+
total_order=total_order,
|
697 |
+
_indices_method=indices_method,
|
698 |
+
_f_A=f_A,
|
699 |
+
_f_B=f_B,
|
700 |
+
_f_AB=f_AB
|
701 |
+
)
|
702 |
+
|
703 |
+
if callable(func):
|
704 |
+
res.update(
|
705 |
+
dict(
|
706 |
+
_A=A,
|
707 |
+
_B=B,
|
708 |
+
_AB=AB,
|
709 |
+
)
|
710 |
+
)
|
711 |
+
|
712 |
+
return SobolResult(**res)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (404 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_sobol.pyi
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy._lib._util import IntNumber
|
3 |
+
from typing import Literal
|
4 |
+
|
5 |
+
def _initialize_v(
|
6 |
+
v : np.ndarray,
|
7 |
+
dim : IntNumber,
|
8 |
+
bits: IntNumber
|
9 |
+
) -> None: ...
|
10 |
+
|
11 |
+
def _cscramble (
|
12 |
+
dim : IntNumber,
|
13 |
+
bits: IntNumber,
|
14 |
+
ltm : np.ndarray,
|
15 |
+
sv: np.ndarray
|
16 |
+
) -> None: ...
|
17 |
+
|
18 |
+
def _fill_p_cumulative(
|
19 |
+
p: np.ndarray,
|
20 |
+
p_cumulative: np.ndarray
|
21 |
+
) -> None: ...
|
22 |
+
|
23 |
+
def _draw(
|
24 |
+
n : IntNumber,
|
25 |
+
num_gen: IntNumber,
|
26 |
+
dim: IntNumber,
|
27 |
+
scale: float,
|
28 |
+
sv: np.ndarray,
|
29 |
+
quasi: np.ndarray,
|
30 |
+
sample: np.ndarray
|
31 |
+
) -> None: ...
|
32 |
+
|
33 |
+
def _fast_forward(
|
34 |
+
n: IntNumber,
|
35 |
+
num_gen: IntNumber,
|
36 |
+
dim: IntNumber,
|
37 |
+
sv: np.ndarray,
|
38 |
+
quasi: np.ndarray
|
39 |
+
) -> None: ...
|
40 |
+
|
41 |
+
def _categorize(
|
42 |
+
draws: np.ndarray,
|
43 |
+
p_cumulative: np.ndarray,
|
44 |
+
result: np.ndarray
|
45 |
+
) -> None: ...
|
46 |
+
|
47 |
+
_MAXDIM: Literal[21201]
|
48 |
+
_MAXDEG: Literal[18]
|
49 |
+
|
50 |
+
def _test_find_index(
|
51 |
+
p_cumulative: np.ndarray,
|
52 |
+
size: int,
|
53 |
+
value: float
|
54 |
+
) -> int: ...
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats.pxd
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# destined to be used in a LowLevelCallable
|
2 |
+
cdef double _geninvgauss_pdf(double x, void *user_data) except * nogil
|
3 |
+
cdef double _studentized_range_cdf(int n, double[2] x, void *user_data) noexcept nogil
|
4 |
+
cdef double _studentized_range_cdf_asymptotic(double z, void *user_data) noexcept nogil
|
5 |
+
cdef double _studentized_range_pdf(int n, double[2] x, void *user_data) noexcept nogil
|
6 |
+
cdef double _studentized_range_pdf_asymptotic(double z, void *user_data) noexcept nogil
|
7 |
+
cdef double _studentized_range_moment(int n, double[3] x_arg, void *user_data) noexcept nogil
|
8 |
+
cdef double _genhyperbolic_pdf(double x, void *user_data) except * nogil
|
9 |
+
cdef double _genhyperbolic_logpdf(double x, void *user_data) except * nogil
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_mstats_common.py
ADDED
@@ -0,0 +1,499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import numpy as np
|
3 |
+
from . import distributions
|
4 |
+
from .._lib._bunch import _make_tuple_bunch
|
5 |
+
from ._stats_pythran import siegelslopes as siegelslopes_pythran
|
6 |
+
from . import _mstats_basic
|
7 |
+
|
8 |
+
__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
|
9 |
+
|
10 |
+
# This is not a namedtuple for backwards compatibility. See PR #12983
|
11 |
+
LinregressResult = _make_tuple_bunch('LinregressResult',
|
12 |
+
['slope', 'intercept', 'rvalue',
|
13 |
+
'pvalue', 'stderr'],
|
14 |
+
extra_field_names=['intercept_stderr'])
|
15 |
+
TheilslopesResult = _make_tuple_bunch('TheilslopesResult',
|
16 |
+
['slope', 'intercept',
|
17 |
+
'low_slope', 'high_slope'])
|
18 |
+
SiegelslopesResult = _make_tuple_bunch('SiegelslopesResult',
|
19 |
+
['slope', 'intercept'])
|
20 |
+
|
21 |
+
|
22 |
+
def linregress(x, y=None, alternative='two-sided'):
|
23 |
+
"""
|
24 |
+
Calculate a linear least-squares regression for two sets of measurements.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
x, y : array_like
|
29 |
+
Two sets of measurements. Both arrays should have the same length. If
|
30 |
+
only `x` is given (and ``y=None``), then it must be a two-dimensional
|
31 |
+
array where one dimension has length 2. The two sets of measurements
|
32 |
+
are then found by splitting the array along the length-2 dimension. In
|
33 |
+
the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is
|
34 |
+
equivalent to ``linregress(x[0], x[1])``.
|
35 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
36 |
+
Defines the alternative hypothesis. Default is 'two-sided'.
|
37 |
+
The following options are available:
|
38 |
+
|
39 |
+
* 'two-sided': the slope of the regression line is nonzero
|
40 |
+
* 'less': the slope of the regression line is less than zero
|
41 |
+
* 'greater': the slope of the regression line is greater than zero
|
42 |
+
|
43 |
+
.. versionadded:: 1.7.0
|
44 |
+
|
45 |
+
Returns
|
46 |
+
-------
|
47 |
+
result : ``LinregressResult`` instance
|
48 |
+
The return value is an object with the following attributes:
|
49 |
+
|
50 |
+
slope : float
|
51 |
+
Slope of the regression line.
|
52 |
+
intercept : float
|
53 |
+
Intercept of the regression line.
|
54 |
+
rvalue : float
|
55 |
+
The Pearson correlation coefficient. The square of ``rvalue``
|
56 |
+
is equal to the coefficient of determination.
|
57 |
+
pvalue : float
|
58 |
+
The p-value for a hypothesis test whose null hypothesis is
|
59 |
+
that the slope is zero, using Wald Test with t-distribution of
|
60 |
+
the test statistic. See `alternative` above for alternative
|
61 |
+
hypotheses.
|
62 |
+
stderr : float
|
63 |
+
Standard error of the estimated slope (gradient), under the
|
64 |
+
assumption of residual normality.
|
65 |
+
intercept_stderr : float
|
66 |
+
Standard error of the estimated intercept, under the assumption
|
67 |
+
of residual normality.
|
68 |
+
|
69 |
+
See Also
|
70 |
+
--------
|
71 |
+
scipy.optimize.curve_fit :
|
72 |
+
Use non-linear least squares to fit a function to data.
|
73 |
+
scipy.optimize.leastsq :
|
74 |
+
Minimize the sum of squares of a set of equations.
|
75 |
+
|
76 |
+
Notes
|
77 |
+
-----
|
78 |
+
Missing values are considered pair-wise: if a value is missing in `x`,
|
79 |
+
the corresponding value in `y` is masked.
|
80 |
+
|
81 |
+
For compatibility with older versions of SciPy, the return value acts
|
82 |
+
like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
|
83 |
+
``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
|
84 |
+
|
85 |
+
slope, intercept, r, p, se = linregress(x, y)
|
86 |
+
|
87 |
+
With that style, however, the standard error of the intercept is not
|
88 |
+
available. To have access to all the computed values, including the
|
89 |
+
standard error of the intercept, use the return value as an object
|
90 |
+
with attributes, e.g.::
|
91 |
+
|
92 |
+
result = linregress(x, y)
|
93 |
+
print(result.intercept, result.intercept_stderr)
|
94 |
+
|
95 |
+
Examples
|
96 |
+
--------
|
97 |
+
>>> import numpy as np
|
98 |
+
>>> import matplotlib.pyplot as plt
|
99 |
+
>>> from scipy import stats
|
100 |
+
>>> rng = np.random.default_rng()
|
101 |
+
|
102 |
+
Generate some data:
|
103 |
+
|
104 |
+
>>> x = rng.random(10)
|
105 |
+
>>> y = 1.6*x + rng.random(10)
|
106 |
+
|
107 |
+
Perform the linear regression:
|
108 |
+
|
109 |
+
>>> res = stats.linregress(x, y)
|
110 |
+
|
111 |
+
Coefficient of determination (R-squared):
|
112 |
+
|
113 |
+
>>> print(f"R-squared: {res.rvalue**2:.6f}")
|
114 |
+
R-squared: 0.717533
|
115 |
+
|
116 |
+
Plot the data along with the fitted line:
|
117 |
+
|
118 |
+
>>> plt.plot(x, y, 'o', label='original data')
|
119 |
+
>>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
|
120 |
+
>>> plt.legend()
|
121 |
+
>>> plt.show()
|
122 |
+
|
123 |
+
Calculate 95% confidence interval on slope and intercept:
|
124 |
+
|
125 |
+
>>> # Two-sided inverse Students t-distribution
|
126 |
+
>>> # p - probability, df - degrees of freedom
|
127 |
+
>>> from scipy.stats import t
|
128 |
+
>>> tinv = lambda p, df: abs(t.ppf(p/2, df))
|
129 |
+
|
130 |
+
>>> ts = tinv(0.05, len(x)-2)
|
131 |
+
>>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
|
132 |
+
slope (95%): 1.453392 +/- 0.743465
|
133 |
+
>>> print(f"intercept (95%): {res.intercept:.6f}"
|
134 |
+
... f" +/- {ts*res.intercept_stderr:.6f}")
|
135 |
+
intercept (95%): 0.616950 +/- 0.544475
|
136 |
+
|
137 |
+
"""
|
138 |
+
TINY = 1.0e-20
|
139 |
+
if y is None: # x is a (2, N) or (N, 2) shaped array_like
|
140 |
+
x = np.asarray(x)
|
141 |
+
if x.shape[0] == 2:
|
142 |
+
x, y = x
|
143 |
+
elif x.shape[1] == 2:
|
144 |
+
x, y = x.T
|
145 |
+
else:
|
146 |
+
raise ValueError("If only `x` is given as input, it has to "
|
147 |
+
"be of shape (2, N) or (N, 2); provided shape "
|
148 |
+
f"was {x.shape}.")
|
149 |
+
else:
|
150 |
+
x = np.asarray(x)
|
151 |
+
y = np.asarray(y)
|
152 |
+
|
153 |
+
if x.size == 0 or y.size == 0:
|
154 |
+
raise ValueError("Inputs must not be empty.")
|
155 |
+
|
156 |
+
if np.amax(x) == np.amin(x) and len(x) > 1:
|
157 |
+
raise ValueError("Cannot calculate a linear regression "
|
158 |
+
"if all x values are identical")
|
159 |
+
|
160 |
+
n = len(x)
|
161 |
+
xmean = np.mean(x, None)
|
162 |
+
ymean = np.mean(y, None)
|
163 |
+
|
164 |
+
# Average sums of square differences from the mean
|
165 |
+
# ssxm = mean( (x-mean(x))^2 )
|
166 |
+
# ssxym = mean( (x-mean(x)) * (y-mean(y)) )
|
167 |
+
ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
|
168 |
+
|
169 |
+
# R-value
|
170 |
+
# r = ssxym / sqrt( ssxm * ssym )
|
171 |
+
if ssxm == 0.0 or ssym == 0.0:
|
172 |
+
# If the denominator was going to be 0
|
173 |
+
r = 0.0
|
174 |
+
else:
|
175 |
+
r = ssxym / np.sqrt(ssxm * ssym)
|
176 |
+
# Test for numerical error propagation (make sure -1 < r < 1)
|
177 |
+
if r > 1.0:
|
178 |
+
r = 1.0
|
179 |
+
elif r < -1.0:
|
180 |
+
r = -1.0
|
181 |
+
|
182 |
+
slope = ssxym / ssxm
|
183 |
+
intercept = ymean - slope*xmean
|
184 |
+
if n == 2:
|
185 |
+
# handle case when only two points are passed in
|
186 |
+
if y[0] == y[1]:
|
187 |
+
prob = 1.0
|
188 |
+
else:
|
189 |
+
prob = 0.0
|
190 |
+
slope_stderr = 0.0
|
191 |
+
intercept_stderr = 0.0
|
192 |
+
else:
|
193 |
+
df = n - 2 # Number of degrees of freedom
|
194 |
+
# n-2 degrees of freedom because 2 has been used up
|
195 |
+
# to estimate the mean and standard deviation
|
196 |
+
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
|
197 |
+
t, prob = _mstats_basic._ttest_finish(df, t, alternative)
|
198 |
+
|
199 |
+
slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
|
200 |
+
|
201 |
+
# Also calculate the standard error of the intercept
|
202 |
+
# The following relationship is used:
|
203 |
+
# ssxm = mean( (x-mean(x))^2 )
|
204 |
+
# = ssx - sx*sx
|
205 |
+
# = mean( x^2 ) - mean(x)^2
|
206 |
+
intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
|
207 |
+
|
208 |
+
return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
|
209 |
+
pvalue=prob, stderr=slope_stderr,
|
210 |
+
intercept_stderr=intercept_stderr)
|
211 |
+
|
212 |
+
|
213 |
+
def theilslopes(y, x=None, alpha=0.95, method='separate'):
|
214 |
+
r"""
|
215 |
+
Computes the Theil-Sen estimator for a set of points (x, y).
|
216 |
+
|
217 |
+
`theilslopes` implements a method for robust linear regression. It
|
218 |
+
computes the slope as the median of all slopes between paired values.
|
219 |
+
|
220 |
+
Parameters
|
221 |
+
----------
|
222 |
+
y : array_like
|
223 |
+
Dependent variable.
|
224 |
+
x : array_like or None, optional
|
225 |
+
Independent variable. If None, use ``arange(len(y))`` instead.
|
226 |
+
alpha : float, optional
|
227 |
+
Confidence degree between 0 and 1. Default is 95% confidence.
|
228 |
+
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
|
229 |
+
interpreted as "find the 90% confidence interval".
|
230 |
+
method : {'joint', 'separate'}, optional
|
231 |
+
Method to be used for computing estimate for intercept.
|
232 |
+
Following methods are supported,
|
233 |
+
|
234 |
+
* 'joint': Uses np.median(y - slope * x) as intercept.
|
235 |
+
* 'separate': Uses np.median(y) - slope * np.median(x)
|
236 |
+
as intercept.
|
237 |
+
|
238 |
+
The default is 'separate'.
|
239 |
+
|
240 |
+
.. versionadded:: 1.8.0
|
241 |
+
|
242 |
+
Returns
|
243 |
+
-------
|
244 |
+
result : ``TheilslopesResult`` instance
|
245 |
+
The return value is an object with the following attributes:
|
246 |
+
|
247 |
+
slope : float
|
248 |
+
Theil slope.
|
249 |
+
intercept : float
|
250 |
+
Intercept of the Theil line.
|
251 |
+
low_slope : float
|
252 |
+
Lower bound of the confidence interval on `slope`.
|
253 |
+
high_slope : float
|
254 |
+
Upper bound of the confidence interval on `slope`.
|
255 |
+
|
256 |
+
See Also
|
257 |
+
--------
|
258 |
+
siegelslopes : a similar technique using repeated medians
|
259 |
+
|
260 |
+
Notes
|
261 |
+
-----
|
262 |
+
The implementation of `theilslopes` follows [1]_. The intercept is
|
263 |
+
not defined in [1]_, and here it is defined as ``median(y) -
|
264 |
+
slope*median(x)``, which is given in [3]_. Other definitions of
|
265 |
+
the intercept exist in the literature such as ``median(y - slope*x)``
|
266 |
+
in [4]_. The approach to compute the intercept can be determined by the
|
267 |
+
parameter ``method``. A confidence interval for the intercept is not
|
268 |
+
given as this question is not addressed in [1]_.
|
269 |
+
|
270 |
+
For compatibility with older versions of SciPy, the return value acts
|
271 |
+
like a ``namedtuple`` of length 4, with fields ``slope``, ``intercept``,
|
272 |
+
``low_slope``, and ``high_slope``, so one can continue to write::
|
273 |
+
|
274 |
+
slope, intercept, low_slope, high_slope = theilslopes(y, x)
|
275 |
+
|
276 |
+
References
|
277 |
+
----------
|
278 |
+
.. [1] P.K. Sen, "Estimates of the regression coefficient based on
|
279 |
+
Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
|
280 |
+
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
|
281 |
+
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
|
282 |
+
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
|
283 |
+
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
|
284 |
+
John Wiley and Sons, New York, pp. 493.
|
285 |
+
.. [4] https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator
|
286 |
+
|
287 |
+
Examples
|
288 |
+
--------
|
289 |
+
>>> import numpy as np
|
290 |
+
>>> from scipy import stats
|
291 |
+
>>> import matplotlib.pyplot as plt
|
292 |
+
|
293 |
+
>>> x = np.linspace(-5, 5, num=150)
|
294 |
+
>>> y = x + np.random.normal(size=x.size)
|
295 |
+
>>> y[11:15] += 10 # add outliers
|
296 |
+
>>> y[-5:] -= 7
|
297 |
+
|
298 |
+
Compute the slope, intercept and 90% confidence interval. For comparison,
|
299 |
+
also compute the least-squares fit with `linregress`:
|
300 |
+
|
301 |
+
>>> res = stats.theilslopes(y, x, 0.90, method='separate')
|
302 |
+
>>> lsq_res = stats.linregress(x, y)
|
303 |
+
|
304 |
+
Plot the results. The Theil-Sen regression line is shown in red, with the
|
305 |
+
dashed red lines illustrating the confidence interval of the slope (note
|
306 |
+
that the dashed red lines are not the confidence interval of the regression
|
307 |
+
as the confidence interval of the intercept is not included). The green
|
308 |
+
line shows the least-squares fit for comparison.
|
309 |
+
|
310 |
+
>>> fig = plt.figure()
|
311 |
+
>>> ax = fig.add_subplot(111)
|
312 |
+
>>> ax.plot(x, y, 'b.')
|
313 |
+
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
|
314 |
+
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
|
315 |
+
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
|
316 |
+
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
|
317 |
+
>>> plt.show()
|
318 |
+
|
319 |
+
"""
|
320 |
+
if method not in ['joint', 'separate']:
|
321 |
+
raise ValueError("method must be either 'joint' or 'separate'."
|
322 |
+
f"'{method}' is invalid.")
|
323 |
+
# We copy both x and y so we can use _find_repeats.
|
324 |
+
y = np.array(y, dtype=float, copy=True).ravel()
|
325 |
+
if x is None:
|
326 |
+
x = np.arange(len(y), dtype=float)
|
327 |
+
else:
|
328 |
+
x = np.array(x, dtype=float, copy=True).ravel()
|
329 |
+
if len(x) != len(y):
|
330 |
+
raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})")
|
331 |
+
|
332 |
+
# Compute sorted slopes only when deltax > 0
|
333 |
+
deltax = x[:, np.newaxis] - x
|
334 |
+
deltay = y[:, np.newaxis] - y
|
335 |
+
slopes = deltay[deltax > 0] / deltax[deltax > 0]
|
336 |
+
if not slopes.size:
|
337 |
+
msg = "All `x` coordinates are identical."
|
338 |
+
warnings.warn(msg, RuntimeWarning, stacklevel=2)
|
339 |
+
slopes.sort()
|
340 |
+
medslope = np.median(slopes)
|
341 |
+
if method == 'joint':
|
342 |
+
medinter = np.median(y - medslope * x)
|
343 |
+
else:
|
344 |
+
medinter = np.median(y) - medslope * np.median(x)
|
345 |
+
# Now compute confidence intervals
|
346 |
+
if alpha > 0.5:
|
347 |
+
alpha = 1. - alpha
|
348 |
+
|
349 |
+
z = distributions.norm.ppf(alpha / 2.)
|
350 |
+
# This implements (2.6) from Sen (1968)
|
351 |
+
_, nxreps = _find_repeats(x)
|
352 |
+
_, nyreps = _find_repeats(y)
|
353 |
+
nt = len(slopes) # N in Sen (1968)
|
354 |
+
ny = len(y) # n in Sen (1968)
|
355 |
+
# Equation 2.6 in Sen (1968):
|
356 |
+
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
|
357 |
+
sum(k * (k-1) * (2*k + 5) for k in nxreps) -
|
358 |
+
sum(k * (k-1) * (2*k + 5) for k in nyreps))
|
359 |
+
# Find the confidence interval indices in `slopes`
|
360 |
+
try:
|
361 |
+
sigma = np.sqrt(sigsq)
|
362 |
+
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
|
363 |
+
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
|
364 |
+
delta = slopes[[Rl, Ru]]
|
365 |
+
except (ValueError, IndexError):
|
366 |
+
delta = (np.nan, np.nan)
|
367 |
+
|
368 |
+
return TheilslopesResult(slope=medslope, intercept=medinter,
|
369 |
+
low_slope=delta[0], high_slope=delta[1])
|
370 |
+
|
371 |
+
|
372 |
+
def _find_repeats(arr):
|
373 |
+
# This function assumes it may clobber its input.
|
374 |
+
if len(arr) == 0:
|
375 |
+
return np.array(0, np.float64), np.array(0, np.intp)
|
376 |
+
|
377 |
+
# XXX This cast was previously needed for the Fortran implementation,
|
378 |
+
# should we ditch it?
|
379 |
+
arr = np.asarray(arr, np.float64).ravel()
|
380 |
+
arr.sort()
|
381 |
+
|
382 |
+
# Taken from NumPy 1.9's np.unique.
|
383 |
+
change = np.concatenate(([True], arr[1:] != arr[:-1]))
|
384 |
+
unique = arr[change]
|
385 |
+
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
|
386 |
+
freq = np.diff(change_idx)
|
387 |
+
atleast2 = freq > 1
|
388 |
+
return unique[atleast2], freq[atleast2]
|
389 |
+
|
390 |
+
|
391 |
+
def siegelslopes(y, x=None, method="hierarchical"):
|
392 |
+
r"""
|
393 |
+
Computes the Siegel estimator for a set of points (x, y).
|
394 |
+
|
395 |
+
`siegelslopes` implements a method for robust linear regression
|
396 |
+
using repeated medians (see [1]_) to fit a line to the points (x, y).
|
397 |
+
The method is robust to outliers with an asymptotic breakdown point
|
398 |
+
of 50%.
|
399 |
+
|
400 |
+
Parameters
|
401 |
+
----------
|
402 |
+
y : array_like
|
403 |
+
Dependent variable.
|
404 |
+
x : array_like or None, optional
|
405 |
+
Independent variable. If None, use ``arange(len(y))`` instead.
|
406 |
+
method : {'hierarchical', 'separate'}
|
407 |
+
If 'hierarchical', estimate the intercept using the estimated
|
408 |
+
slope ``slope`` (default option).
|
409 |
+
If 'separate', estimate the intercept independent of the estimated
|
410 |
+
slope. See Notes for details.
|
411 |
+
|
412 |
+
Returns
|
413 |
+
-------
|
414 |
+
result : ``SiegelslopesResult`` instance
|
415 |
+
The return value is an object with the following attributes:
|
416 |
+
|
417 |
+
slope : float
|
418 |
+
Estimate of the slope of the regression line.
|
419 |
+
intercept : float
|
420 |
+
Estimate of the intercept of the regression line.
|
421 |
+
|
422 |
+
See Also
|
423 |
+
--------
|
424 |
+
theilslopes : a similar technique without repeated medians
|
425 |
+
|
426 |
+
Notes
|
427 |
+
-----
|
428 |
+
With ``n = len(y)``, compute ``m_j`` as the median of
|
429 |
+
the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
|
430 |
+
``slope`` is then the median of all slopes ``m_j``.
|
431 |
+
Two ways are given to estimate the intercept in [1]_ which can be chosen
|
432 |
+
via the parameter ``method``.
|
433 |
+
The hierarchical approach uses the estimated slope ``slope``
|
434 |
+
and computes ``intercept`` as the median of ``y - slope*x``.
|
435 |
+
The other approach estimates the intercept separately as follows: for
|
436 |
+
each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
|
437 |
+
lines through the remaining points and take the median ``i_j``.
|
438 |
+
``intercept`` is the median of the ``i_j``.
|
439 |
+
|
440 |
+
The implementation computes `n` times the median of a vector of size `n`
|
441 |
+
which can be slow for large vectors. There are more efficient algorithms
|
442 |
+
(see [2]_) which are not implemented here.
|
443 |
+
|
444 |
+
For compatibility with older versions of SciPy, the return value acts
|
445 |
+
like a ``namedtuple`` of length 2, with fields ``slope`` and
|
446 |
+
``intercept``, so one can continue to write::
|
447 |
+
|
448 |
+
slope, intercept = siegelslopes(y, x)
|
449 |
+
|
450 |
+
References
|
451 |
+
----------
|
452 |
+
.. [1] A. Siegel, "Robust Regression Using Repeated Medians",
|
453 |
+
Biometrika, Vol. 69, pp. 242-244, 1982.
|
454 |
+
|
455 |
+
.. [2] A. Stein and M. Werman, "Finding the repeated median regression
|
456 |
+
line", Proceedings of the Third Annual ACM-SIAM Symposium on
|
457 |
+
Discrete Algorithms, pp. 409-413, 1992.
|
458 |
+
|
459 |
+
Examples
|
460 |
+
--------
|
461 |
+
>>> import numpy as np
|
462 |
+
>>> from scipy import stats
|
463 |
+
>>> import matplotlib.pyplot as plt
|
464 |
+
|
465 |
+
>>> x = np.linspace(-5, 5, num=150)
|
466 |
+
>>> y = x + np.random.normal(size=x.size)
|
467 |
+
>>> y[11:15] += 10 # add outliers
|
468 |
+
>>> y[-5:] -= 7
|
469 |
+
|
470 |
+
Compute the slope and intercept. For comparison, also compute the
|
471 |
+
least-squares fit with `linregress`:
|
472 |
+
|
473 |
+
>>> res = stats.siegelslopes(y, x)
|
474 |
+
>>> lsq_res = stats.linregress(x, y)
|
475 |
+
|
476 |
+
Plot the results. The Siegel regression line is shown in red. The green
|
477 |
+
line shows the least-squares fit for comparison.
|
478 |
+
|
479 |
+
>>> fig = plt.figure()
|
480 |
+
>>> ax = fig.add_subplot(111)
|
481 |
+
>>> ax.plot(x, y, 'b.')
|
482 |
+
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
|
483 |
+
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
|
484 |
+
>>> plt.show()
|
485 |
+
|
486 |
+
"""
|
487 |
+
if method not in ['hierarchical', 'separate']:
|
488 |
+
raise ValueError("method can only be 'hierarchical' or 'separate'")
|
489 |
+
y = np.asarray(y).ravel()
|
490 |
+
if x is None:
|
491 |
+
x = np.arange(len(y), dtype=float)
|
492 |
+
else:
|
493 |
+
x = np.asarray(x, dtype=float).ravel()
|
494 |
+
if len(x) != len(y):
|
495 |
+
raise ValueError(f"Incompatible lengths ! ({len(y)}<>{len(x)})")
|
496 |
+
dtype = np.result_type(x, y, np.float32) # use at least float32
|
497 |
+
y, x = y.astype(dtype), x.astype(dtype)
|
498 |
+
medslope, medinter = siegelslopes_pythran(y, x, method)
|
499 |
+
return SiegelslopesResult(slope=medslope, intercept=medinter)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (159 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_survival.py
ADDED
@@ -0,0 +1,686 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from dataclasses import dataclass, field
|
4 |
+
from typing import TYPE_CHECKING
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from scipy import special, interpolate, stats
|
9 |
+
from scipy.stats._censored_data import CensoredData
|
10 |
+
from scipy.stats._common import ConfidenceInterval
|
11 |
+
from scipy.stats import norm # type: ignore[attr-defined]
|
12 |
+
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
from typing import Literal
|
15 |
+
import numpy.typing as npt
|
16 |
+
|
17 |
+
|
18 |
+
__all__ = ['ecdf', 'logrank']
|
19 |
+
|
20 |
+
|
21 |
+
@dataclass
|
22 |
+
class EmpiricalDistributionFunction:
|
23 |
+
"""An empirical distribution function produced by `scipy.stats.ecdf`
|
24 |
+
|
25 |
+
Attributes
|
26 |
+
----------
|
27 |
+
quantiles : ndarray
|
28 |
+
The unique values of the sample from which the
|
29 |
+
`EmpiricalDistributionFunction` was estimated.
|
30 |
+
probabilities : ndarray
|
31 |
+
The point estimates of the cumulative distribution function (CDF) or
|
32 |
+
its complement, the survival function (SF), corresponding with
|
33 |
+
`quantiles`.
|
34 |
+
"""
|
35 |
+
quantiles: np.ndarray
|
36 |
+
probabilities: np.ndarray
|
37 |
+
# Exclude these from __str__
|
38 |
+
_n: np.ndarray = field(repr=False) # number "at risk"
|
39 |
+
_d: np.ndarray = field(repr=False) # number of "deaths"
|
40 |
+
_sf: np.ndarray = field(repr=False) # survival function for var estimate
|
41 |
+
_kind: str = field(repr=False) # type of function: "cdf" or "sf"
|
42 |
+
|
43 |
+
def __init__(self, q, p, n, d, kind):
|
44 |
+
self.probabilities = p
|
45 |
+
self.quantiles = q
|
46 |
+
self._n = n
|
47 |
+
self._d = d
|
48 |
+
self._sf = p if kind == 'sf' else 1 - p
|
49 |
+
self._kind = kind
|
50 |
+
|
51 |
+
f0 = 1 if kind == 'sf' else 0 # leftmost function value
|
52 |
+
f1 = 1 - f0
|
53 |
+
# fill_value can't handle edge cases at infinity
|
54 |
+
x = np.insert(q, [0, len(q)], [-np.inf, np.inf])
|
55 |
+
y = np.insert(p, [0, len(p)], [f0, f1])
|
56 |
+
# `or` conditions handle the case of empty x, points
|
57 |
+
self._f = interpolate.interp1d(x, y, kind='previous',
|
58 |
+
assume_sorted=True)
|
59 |
+
|
60 |
+
def evaluate(self, x):
|
61 |
+
"""Evaluate the empirical CDF/SF function at the input.
|
62 |
+
|
63 |
+
Parameters
|
64 |
+
----------
|
65 |
+
x : ndarray
|
66 |
+
Argument to the CDF/SF
|
67 |
+
|
68 |
+
Returns
|
69 |
+
-------
|
70 |
+
y : ndarray
|
71 |
+
The CDF/SF evaluated at the input
|
72 |
+
"""
|
73 |
+
return self._f(x)
|
74 |
+
|
75 |
+
def plot(self, ax=None, **matplotlib_kwargs):
|
76 |
+
"""Plot the empirical distribution function
|
77 |
+
|
78 |
+
Available only if ``matplotlib`` is installed.
|
79 |
+
|
80 |
+
Parameters
|
81 |
+
----------
|
82 |
+
ax : matplotlib.axes.Axes
|
83 |
+
Axes object to draw the plot onto, otherwise uses the current Axes.
|
84 |
+
|
85 |
+
**matplotlib_kwargs : dict, optional
|
86 |
+
Keyword arguments passed directly to `matplotlib.axes.Axes.step`.
|
87 |
+
Unless overridden, ``where='post'``.
|
88 |
+
|
89 |
+
Returns
|
90 |
+
-------
|
91 |
+
lines : list of `matplotlib.lines.Line2D`
|
92 |
+
Objects representing the plotted data
|
93 |
+
"""
|
94 |
+
try:
|
95 |
+
import matplotlib # noqa: F401
|
96 |
+
except ModuleNotFoundError as exc:
|
97 |
+
message = "matplotlib must be installed to use method `plot`."
|
98 |
+
raise ModuleNotFoundError(message) from exc
|
99 |
+
|
100 |
+
if ax is None:
|
101 |
+
import matplotlib.pyplot as plt
|
102 |
+
ax = plt.gca()
|
103 |
+
|
104 |
+
kwargs = {'where': 'post'}
|
105 |
+
kwargs.update(matplotlib_kwargs)
|
106 |
+
|
107 |
+
delta = np.ptp(self.quantiles)*0.05 # how far past sample edge to plot
|
108 |
+
q = self.quantiles
|
109 |
+
q = [q[0] - delta] + list(q) + [q[-1] + delta]
|
110 |
+
|
111 |
+
return ax.step(q, self.evaluate(q), **kwargs)
|
112 |
+
|
113 |
+
def confidence_interval(self, confidence_level=0.95, *, method='linear'):
|
114 |
+
"""Compute a confidence interval around the CDF/SF point estimate
|
115 |
+
|
116 |
+
Parameters
|
117 |
+
----------
|
118 |
+
confidence_level : float, default: 0.95
|
119 |
+
Confidence level for the computed confidence interval
|
120 |
+
|
121 |
+
method : str, {"linear", "log-log"}
|
122 |
+
Method used to compute the confidence interval. Options are
|
123 |
+
"linear" for the conventional Greenwood confidence interval
|
124 |
+
(default) and "log-log" for the "exponential Greenwood",
|
125 |
+
log-negative-log-transformed confidence interval.
|
126 |
+
|
127 |
+
Returns
|
128 |
+
-------
|
129 |
+
ci : ``ConfidenceInterval``
|
130 |
+
An object with attributes ``low`` and ``high``, instances of
|
131 |
+
`~scipy.stats._result_classes.EmpiricalDistributionFunction` that
|
132 |
+
represent the lower and upper bounds (respectively) of the
|
133 |
+
confidence interval.
|
134 |
+
|
135 |
+
Notes
|
136 |
+
-----
|
137 |
+
Confidence intervals are computed according to the Greenwood formula
|
138 |
+
(``method='linear'``) or the more recent "exponential Greenwood"
|
139 |
+
formula (``method='log-log'``) as described in [1]_. The conventional
|
140 |
+
Greenwood formula can result in lower confidence limits less than 0
|
141 |
+
and upper confidence limits greater than 1; these are clipped to the
|
142 |
+
unit interval. NaNs may be produced by either method; these are
|
143 |
+
features of the formulas.
|
144 |
+
|
145 |
+
References
|
146 |
+
----------
|
147 |
+
.. [1] Sawyer, Stanley. "The Greenwood and Exponential Greenwood
|
148 |
+
Confidence Intervals in Survival Analysis."
|
149 |
+
https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
|
150 |
+
|
151 |
+
"""
|
152 |
+
message = ("Confidence interval bounds do not implement a "
|
153 |
+
"`confidence_interval` method.")
|
154 |
+
if self._n is None:
|
155 |
+
raise NotImplementedError(message)
|
156 |
+
|
157 |
+
methods = {'linear': self._linear_ci,
|
158 |
+
'log-log': self._loglog_ci}
|
159 |
+
|
160 |
+
message = f"`method` must be one of {set(methods)}."
|
161 |
+
if method.lower() not in methods:
|
162 |
+
raise ValueError(message)
|
163 |
+
|
164 |
+
message = "`confidence_level` must be a scalar between 0 and 1."
|
165 |
+
confidence_level = np.asarray(confidence_level)[()]
|
166 |
+
if confidence_level.shape or not (0 <= confidence_level <= 1):
|
167 |
+
raise ValueError(message)
|
168 |
+
|
169 |
+
method_fun = methods[method.lower()]
|
170 |
+
low, high = method_fun(confidence_level)
|
171 |
+
|
172 |
+
message = ("The confidence interval is undefined at some observations."
|
173 |
+
" This is a feature of the mathematical formula used, not"
|
174 |
+
" an error in its implementation.")
|
175 |
+
if np.any(np.isnan(low) | np.isnan(high)):
|
176 |
+
warnings.warn(message, RuntimeWarning, stacklevel=2)
|
177 |
+
|
178 |
+
low, high = np.clip(low, 0, 1), np.clip(high, 0, 1)
|
179 |
+
low = EmpiricalDistributionFunction(self.quantiles, low, None, None,
|
180 |
+
self._kind)
|
181 |
+
high = EmpiricalDistributionFunction(self.quantiles, high, None, None,
|
182 |
+
self._kind)
|
183 |
+
return ConfidenceInterval(low, high)
|
184 |
+
|
185 |
+
def _linear_ci(self, confidence_level):
|
186 |
+
sf, d, n = self._sf, self._d, self._n
|
187 |
+
# When n == d, Greenwood's formula divides by zero.
|
188 |
+
# When s != 0, this can be ignored: var == inf, and CI is [0, 1]
|
189 |
+
# When s == 0, this results in NaNs. Produce an informative warning.
|
190 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
191 |
+
var = sf ** 2 * np.cumsum(d / (n * (n - d)))
|
192 |
+
|
193 |
+
se = np.sqrt(var)
|
194 |
+
z = special.ndtri(1 / 2 + confidence_level / 2)
|
195 |
+
|
196 |
+
z_se = z * se
|
197 |
+
low = self.probabilities - z_se
|
198 |
+
high = self.probabilities + z_se
|
199 |
+
|
200 |
+
return low, high
|
201 |
+
|
202 |
+
def _loglog_ci(self, confidence_level):
|
203 |
+
sf, d, n = self._sf, self._d, self._n
|
204 |
+
|
205 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
206 |
+
var = 1 / np.log(sf) ** 2 * np.cumsum(d / (n * (n - d)))
|
207 |
+
|
208 |
+
se = np.sqrt(var)
|
209 |
+
z = special.ndtri(1 / 2 + confidence_level / 2)
|
210 |
+
|
211 |
+
with np.errstate(divide='ignore'):
|
212 |
+
lnl_points = np.log(-np.log(sf))
|
213 |
+
|
214 |
+
z_se = z * se
|
215 |
+
low = np.exp(-np.exp(lnl_points + z_se))
|
216 |
+
high = np.exp(-np.exp(lnl_points - z_se))
|
217 |
+
if self._kind == "cdf":
|
218 |
+
low, high = 1-high, 1-low
|
219 |
+
|
220 |
+
return low, high
|
221 |
+
|
222 |
+
|
223 |
+
@dataclass
|
224 |
+
class ECDFResult:
|
225 |
+
""" Result object returned by `scipy.stats.ecdf`
|
226 |
+
|
227 |
+
Attributes
|
228 |
+
----------
|
229 |
+
cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
|
230 |
+
An object representing the empirical cumulative distribution function.
|
231 |
+
sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
|
232 |
+
An object representing the complement of the empirical cumulative
|
233 |
+
distribution function.
|
234 |
+
"""
|
235 |
+
cdf: EmpiricalDistributionFunction
|
236 |
+
sf: EmpiricalDistributionFunction
|
237 |
+
|
238 |
+
def __init__(self, q, cdf, sf, n, d):
|
239 |
+
self.cdf = EmpiricalDistributionFunction(q, cdf, n, d, "cdf")
|
240 |
+
self.sf = EmpiricalDistributionFunction(q, sf, n, d, "sf")
|
241 |
+
|
242 |
+
|
243 |
+
def _iv_CensoredData(
|
244 |
+
sample: npt.ArrayLike | CensoredData, param_name: str = 'sample'
|
245 |
+
) -> CensoredData:
|
246 |
+
"""Attempt to convert `sample` to `CensoredData`."""
|
247 |
+
if not isinstance(sample, CensoredData):
|
248 |
+
try: # takes care of input standardization/validation
|
249 |
+
sample = CensoredData(uncensored=sample)
|
250 |
+
except ValueError as e:
|
251 |
+
message = str(e).replace('uncensored', param_name)
|
252 |
+
raise type(e)(message) from e
|
253 |
+
return sample
|
254 |
+
|
255 |
+
|
256 |
+
def ecdf(sample: npt.ArrayLike | CensoredData) -> ECDFResult:
|
257 |
+
"""Empirical cumulative distribution function of a sample.
|
258 |
+
|
259 |
+
The empirical cumulative distribution function (ECDF) is a step function
|
260 |
+
estimate of the CDF of the distribution underlying a sample. This function
|
261 |
+
returns objects representing both the empirical distribution function and
|
262 |
+
its complement, the empirical survival function.
|
263 |
+
|
264 |
+
Parameters
|
265 |
+
----------
|
266 |
+
sample : 1D array_like or `scipy.stats.CensoredData`
|
267 |
+
Besides array_like, instances of `scipy.stats.CensoredData` containing
|
268 |
+
uncensored and right-censored observations are supported. Currently,
|
269 |
+
other instances of `scipy.stats.CensoredData` will result in a
|
270 |
+
``NotImplementedError``.
|
271 |
+
|
272 |
+
Returns
|
273 |
+
-------
|
274 |
+
res : `~scipy.stats._result_classes.ECDFResult`
|
275 |
+
An object with the following attributes.
|
276 |
+
|
277 |
+
cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
|
278 |
+
An object representing the empirical cumulative distribution
|
279 |
+
function.
|
280 |
+
sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
|
281 |
+
An object representing the empirical survival function.
|
282 |
+
|
283 |
+
The `cdf` and `sf` attributes themselves have the following attributes.
|
284 |
+
|
285 |
+
quantiles : ndarray
|
286 |
+
The unique values in the sample that defines the empirical CDF/SF.
|
287 |
+
probabilities : ndarray
|
288 |
+
The point estimates of the probabilities corresponding with
|
289 |
+
`quantiles`.
|
290 |
+
|
291 |
+
And the following methods:
|
292 |
+
|
293 |
+
evaluate(x) :
|
294 |
+
Evaluate the CDF/SF at the argument.
|
295 |
+
|
296 |
+
plot(ax) :
|
297 |
+
Plot the CDF/SF on the provided axes.
|
298 |
+
|
299 |
+
confidence_interval(confidence_level=0.95) :
|
300 |
+
Compute the confidence interval around the CDF/SF at the values in
|
301 |
+
`quantiles`.
|
302 |
+
|
303 |
+
Notes
|
304 |
+
-----
|
305 |
+
When each observation of the sample is a precise measurement, the ECDF
|
306 |
+
steps up by ``1/len(sample)`` at each of the observations [1]_.
|
307 |
+
|
308 |
+
When observations are lower bounds, upper bounds, or both upper and lower
|
309 |
+
bounds, the data is said to be "censored", and `sample` may be provided as
|
310 |
+
an instance of `scipy.stats.CensoredData`.
|
311 |
+
|
312 |
+
For right-censored data, the ECDF is given by the Kaplan-Meier estimator
|
313 |
+
[2]_; other forms of censoring are not supported at this time.
|
314 |
+
|
315 |
+
Confidence intervals are computed according to the Greenwood formula or the
|
316 |
+
more recent "Exponential Greenwood" formula as described in [4]_.
|
317 |
+
|
318 |
+
References
|
319 |
+
----------
|
320 |
+
.. [1] Conover, William Jay. Practical nonparametric statistics. Vol. 350.
|
321 |
+
John Wiley & Sons, 1999.
|
322 |
+
|
323 |
+
.. [2] Kaplan, Edward L., and Paul Meier. "Nonparametric estimation from
|
324 |
+
incomplete observations." Journal of the American statistical
|
325 |
+
association 53.282 (1958): 457-481.
|
326 |
+
|
327 |
+
.. [3] Goel, Manish Kumar, Pardeep Khanna, and Jugal Kishore.
|
328 |
+
"Understanding survival analysis: Kaplan-Meier estimate."
|
329 |
+
International journal of Ayurveda research 1.4 (2010): 274.
|
330 |
+
|
331 |
+
.. [4] Sawyer, Stanley. "The Greenwood and Exponential Greenwood Confidence
|
332 |
+
Intervals in Survival Analysis."
|
333 |
+
https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
|
334 |
+
|
335 |
+
Examples
|
336 |
+
--------
|
337 |
+
**Uncensored Data**
|
338 |
+
|
339 |
+
As in the example from [1]_ page 79, five boys were selected at random from
|
340 |
+
those in a single high school. Their one-mile run times were recorded as
|
341 |
+
follows.
|
342 |
+
|
343 |
+
>>> sample = [6.23, 5.58, 7.06, 6.42, 5.20] # one-mile run times (minutes)
|
344 |
+
|
345 |
+
The empirical distribution function, which approximates the distribution
|
346 |
+
function of one-mile run times of the population from which the boys were
|
347 |
+
sampled, is calculated as follows.
|
348 |
+
|
349 |
+
>>> from scipy import stats
|
350 |
+
>>> res = stats.ecdf(sample)
|
351 |
+
>>> res.cdf.quantiles
|
352 |
+
array([5.2 , 5.58, 6.23, 6.42, 7.06])
|
353 |
+
>>> res.cdf.probabilities
|
354 |
+
array([0.2, 0.4, 0.6, 0.8, 1. ])
|
355 |
+
|
356 |
+
To plot the result as a step function:
|
357 |
+
|
358 |
+
>>> import matplotlib.pyplot as plt
|
359 |
+
>>> ax = plt.subplot()
|
360 |
+
>>> res.cdf.plot(ax)
|
361 |
+
>>> ax.set_xlabel('One-Mile Run Time (minutes)')
|
362 |
+
>>> ax.set_ylabel('Empirical CDF')
|
363 |
+
>>> plt.show()
|
364 |
+
|
365 |
+
**Right-censored Data**
|
366 |
+
|
367 |
+
As in the example from [1]_ page 91, the lives of ten car fanbelts were
|
368 |
+
tested. Five tests concluded because the fanbelt being tested broke, but
|
369 |
+
the remaining tests concluded for other reasons (e.g. the study ran out of
|
370 |
+
funding, but the fanbelt was still functional). The mileage driven
|
371 |
+
with the fanbelts were recorded as follows.
|
372 |
+
|
373 |
+
>>> broken = [77, 47, 81, 56, 80] # in thousands of miles driven
|
374 |
+
>>> unbroken = [62, 60, 43, 71, 37]
|
375 |
+
|
376 |
+
Precise survival times of the fanbelts that were still functional at the
|
377 |
+
end of the tests are unknown, but they are known to exceed the values
|
378 |
+
recorded in ``unbroken``. Therefore, these observations are said to be
|
379 |
+
"right-censored", and the data is represented using
|
380 |
+
`scipy.stats.CensoredData`.
|
381 |
+
|
382 |
+
>>> sample = stats.CensoredData(uncensored=broken, right=unbroken)
|
383 |
+
|
384 |
+
The empirical survival function is calculated as follows.
|
385 |
+
|
386 |
+
>>> res = stats.ecdf(sample)
|
387 |
+
>>> res.sf.quantiles
|
388 |
+
array([37., 43., 47., 56., 60., 62., 71., 77., 80., 81.])
|
389 |
+
>>> res.sf.probabilities
|
390 |
+
array([1. , 1. , 0.875, 0.75 , 0.75 , 0.75 , 0.75 , 0.5 , 0.25 , 0. ])
|
391 |
+
|
392 |
+
To plot the result as a step function:
|
393 |
+
|
394 |
+
>>> ax = plt.subplot()
|
395 |
+
>>> res.cdf.plot(ax)
|
396 |
+
>>> ax.set_xlabel('Fanbelt Survival Time (thousands of miles)')
|
397 |
+
>>> ax.set_ylabel('Empirical SF')
|
398 |
+
>>> plt.show()
|
399 |
+
|
400 |
+
"""
|
401 |
+
sample = _iv_CensoredData(sample)
|
402 |
+
|
403 |
+
if sample.num_censored() == 0:
|
404 |
+
res = _ecdf_uncensored(sample._uncensor())
|
405 |
+
elif sample.num_censored() == sample._right.size:
|
406 |
+
res = _ecdf_right_censored(sample)
|
407 |
+
else:
|
408 |
+
# Support additional censoring options in follow-up PRs
|
409 |
+
message = ("Currently, only uncensored and right-censored data is "
|
410 |
+
"supported.")
|
411 |
+
raise NotImplementedError(message)
|
412 |
+
|
413 |
+
t, cdf, sf, n, d = res
|
414 |
+
return ECDFResult(t, cdf, sf, n, d)
|
415 |
+
|
416 |
+
|
417 |
+
def _ecdf_uncensored(sample):
|
418 |
+
sample = np.sort(sample)
|
419 |
+
x, counts = np.unique(sample, return_counts=True)
|
420 |
+
|
421 |
+
# [1].81 "the fraction of [observations] that are less than or equal to x
|
422 |
+
events = np.cumsum(counts)
|
423 |
+
n = sample.size
|
424 |
+
cdf = events / n
|
425 |
+
|
426 |
+
# [1].89 "the relative frequency of the sample that exceeds x in value"
|
427 |
+
sf = 1 - cdf
|
428 |
+
|
429 |
+
at_risk = np.concatenate(([n], n - events[:-1]))
|
430 |
+
return x, cdf, sf, at_risk, counts
|
431 |
+
|
432 |
+
|
433 |
+
def _ecdf_right_censored(sample):
|
434 |
+
# It is conventional to discuss right-censored data in terms of
|
435 |
+
# "survival time", "death", and "loss" (e.g. [2]). We'll use that
|
436 |
+
# terminology here.
|
437 |
+
# This implementation was influenced by the references cited and also
|
438 |
+
# https://www.youtube.com/watch?v=lxoWsVco_iM
|
439 |
+
# https://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
|
440 |
+
# In retrospect it is probably most easily compared against [3].
|
441 |
+
# Ultimately, the data needs to be sorted, so this implementation is
|
442 |
+
# written to avoid a separate call to `unique` after sorting. In hope of
|
443 |
+
# better performance on large datasets, it also computes survival
|
444 |
+
# probabilities at unique times only rather than at each observation.
|
445 |
+
tod = sample._uncensored # time of "death"
|
446 |
+
tol = sample._right # time of "loss"
|
447 |
+
times = np.concatenate((tod, tol))
|
448 |
+
died = np.asarray([1]*tod.size + [0]*tol.size)
|
449 |
+
|
450 |
+
# sort by times
|
451 |
+
i = np.argsort(times)
|
452 |
+
times = times[i]
|
453 |
+
died = died[i]
|
454 |
+
at_risk = np.arange(times.size, 0, -1)
|
455 |
+
|
456 |
+
# logical indices of unique times
|
457 |
+
j = np.diff(times, prepend=-np.inf, append=np.inf) > 0
|
458 |
+
j_l = j[:-1] # first instances of unique times
|
459 |
+
j_r = j[1:] # last instances of unique times
|
460 |
+
|
461 |
+
# get number at risk and deaths at each unique time
|
462 |
+
t = times[j_l] # unique times
|
463 |
+
n = at_risk[j_l] # number at risk at each unique time
|
464 |
+
cd = np.cumsum(died)[j_r] # cumulative deaths up to/including unique times
|
465 |
+
d = np.diff(cd, prepend=0) # deaths at each unique time
|
466 |
+
|
467 |
+
# compute survival function
|
468 |
+
sf = np.cumprod((n - d) / n)
|
469 |
+
cdf = 1 - sf
|
470 |
+
return t, cdf, sf, n, d
|
471 |
+
|
472 |
+
|
473 |
+
@dataclass
|
474 |
+
class LogRankResult:
|
475 |
+
"""Result object returned by `scipy.stats.logrank`.
|
476 |
+
|
477 |
+
Attributes
|
478 |
+
----------
|
479 |
+
statistic : float ndarray
|
480 |
+
The computed statistic (defined below). Its magnitude is the
|
481 |
+
square root of the magnitude returned by most other logrank test
|
482 |
+
implementations.
|
483 |
+
pvalue : float ndarray
|
484 |
+
The computed p-value of the test.
|
485 |
+
"""
|
486 |
+
statistic: np.ndarray
|
487 |
+
pvalue: np.ndarray
|
488 |
+
|
489 |
+
|
490 |
+
def logrank(
|
491 |
+
x: npt.ArrayLike | CensoredData,
|
492 |
+
y: npt.ArrayLike | CensoredData,
|
493 |
+
alternative: Literal['two-sided', 'less', 'greater'] = "two-sided"
|
494 |
+
) -> LogRankResult:
|
495 |
+
r"""Compare the survival distributions of two samples via the logrank test.
|
496 |
+
|
497 |
+
Parameters
|
498 |
+
----------
|
499 |
+
x, y : array_like or CensoredData
|
500 |
+
Samples to compare based on their empirical survival functions.
|
501 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
502 |
+
Defines the alternative hypothesis.
|
503 |
+
|
504 |
+
The null hypothesis is that the survival distributions of the two
|
505 |
+
groups, say *X* and *Y*, are identical.
|
506 |
+
|
507 |
+
The following alternative hypotheses [4]_ are available (default is
|
508 |
+
'two-sided'):
|
509 |
+
|
510 |
+
* 'two-sided': the survival distributions of the two groups are not
|
511 |
+
identical.
|
512 |
+
* 'less': survival of group *X* is favored: the group *X* failure rate
|
513 |
+
function is less than the group *Y* failure rate function at some
|
514 |
+
times.
|
515 |
+
* 'greater': survival of group *Y* is favored: the group *X* failure
|
516 |
+
rate function is greater than the group *Y* failure rate function at
|
517 |
+
some times.
|
518 |
+
|
519 |
+
Returns
|
520 |
+
-------
|
521 |
+
res : `~scipy.stats._result_classes.LogRankResult`
|
522 |
+
An object containing attributes:
|
523 |
+
|
524 |
+
statistic : float ndarray
|
525 |
+
The computed statistic (defined below). Its magnitude is the
|
526 |
+
square root of the magnitude returned by most other logrank test
|
527 |
+
implementations.
|
528 |
+
pvalue : float ndarray
|
529 |
+
The computed p-value of the test.
|
530 |
+
|
531 |
+
See Also
|
532 |
+
--------
|
533 |
+
scipy.stats.ecdf
|
534 |
+
|
535 |
+
Notes
|
536 |
+
-----
|
537 |
+
The logrank test [1]_ compares the observed number of events to
|
538 |
+
the expected number of events under the null hypothesis that the two
|
539 |
+
samples were drawn from the same distribution. The statistic is
|
540 |
+
|
541 |
+
.. math::
|
542 |
+
|
543 |
+
Z_i = \frac{\sum_{j=1}^J(O_{i,j}-E_{i,j})}{\sqrt{\sum_{j=1}^J V_{i,j}}}
|
544 |
+
\rightarrow \mathcal{N}(0,1)
|
545 |
+
|
546 |
+
where
|
547 |
+
|
548 |
+
.. math::
|
549 |
+
|
550 |
+
E_{i,j} = O_j \frac{N_{i,j}}{N_j},
|
551 |
+
\qquad
|
552 |
+
V_{i,j} = E_{i,j} \left(\frac{N_j-O_j}{N_j}\right)
|
553 |
+
\left(\frac{N_j-N_{i,j}}{N_j-1}\right),
|
554 |
+
|
555 |
+
:math:`i` denotes the group (i.e. it may assume values :math:`x` or
|
556 |
+
:math:`y`, or it may be omitted to refer to the combined sample)
|
557 |
+
:math:`j` denotes the time (at which an event occurred),
|
558 |
+
:math:`N` is the number of subjects at risk just before an event occurred,
|
559 |
+
and :math:`O` is the observed number of events at that time.
|
560 |
+
|
561 |
+
The ``statistic`` :math:`Z_x` returned by `logrank` is the (signed) square
|
562 |
+
root of the statistic returned by many other implementations. Under the
|
563 |
+
null hypothesis, :math:`Z_x**2` is asymptotically distributed according to
|
564 |
+
the chi-squared distribution with one degree of freedom. Consequently,
|
565 |
+
:math:`Z_x` is asymptotically distributed according to the standard normal
|
566 |
+
distribution. The advantage of using :math:`Z_x` is that the sign
|
567 |
+
information (i.e. whether the observed number of events tends to be less
|
568 |
+
than or greater than the number expected under the null hypothesis) is
|
569 |
+
preserved, allowing `scipy.stats.logrank` to offer one-sided alternative
|
570 |
+
hypotheses.
|
571 |
+
|
572 |
+
References
|
573 |
+
----------
|
574 |
+
.. [1] Mantel N. "Evaluation of survival data and two new rank order
|
575 |
+
statistics arising in its consideration."
|
576 |
+
Cancer Chemotherapy Reports, 50(3):163-170, PMID: 5910392, 1966
|
577 |
+
.. [2] Bland, Altman, "The logrank test", BMJ, 328:1073,
|
578 |
+
:doi:`10.1136/bmj.328.7447.1073`, 2004
|
579 |
+
.. [3] "Logrank test", Wikipedia,
|
580 |
+
https://en.wikipedia.org/wiki/Logrank_test
|
581 |
+
.. [4] Brown, Mark. "On the choice of variance for the log rank test."
|
582 |
+
Biometrika 71.1 (1984): 65-74.
|
583 |
+
.. [5] Klein, John P., and Melvin L. Moeschberger. Survival analysis:
|
584 |
+
techniques for censored and truncated data. Vol. 1230. New York:
|
585 |
+
Springer, 2003.
|
586 |
+
|
587 |
+
Examples
|
588 |
+
--------
|
589 |
+
Reference [2]_ compared the survival times of patients with two different
|
590 |
+
types of recurrent malignant gliomas. The samples below record the time
|
591 |
+
(number of weeks) for which each patient participated in the study. The
|
592 |
+
`scipy.stats.CensoredData` class is used because the data is
|
593 |
+
right-censored: the uncensored observations correspond with observed deaths
|
594 |
+
whereas the censored observations correspond with the patient leaving the
|
595 |
+
study for another reason.
|
596 |
+
|
597 |
+
>>> from scipy import stats
|
598 |
+
>>> x = stats.CensoredData(
|
599 |
+
... uncensored=[6, 13, 21, 30, 37, 38, 49, 50,
|
600 |
+
... 63, 79, 86, 98, 202, 219],
|
601 |
+
... right=[31, 47, 80, 82, 82, 149]
|
602 |
+
... )
|
603 |
+
>>> y = stats.CensoredData(
|
604 |
+
... uncensored=[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24,
|
605 |
+
... 25, 28,30, 33, 35, 37, 40, 40, 46, 48, 76, 81,
|
606 |
+
... 82, 91, 112, 181],
|
607 |
+
... right=[34, 40, 70]
|
608 |
+
... )
|
609 |
+
|
610 |
+
We can calculate and visualize the empirical survival functions
|
611 |
+
of both groups as follows.
|
612 |
+
|
613 |
+
>>> import numpy as np
|
614 |
+
>>> import matplotlib.pyplot as plt
|
615 |
+
>>> ax = plt.subplot()
|
616 |
+
>>> ecdf_x = stats.ecdf(x)
|
617 |
+
>>> ecdf_x.sf.plot(ax, label='Astrocytoma')
|
618 |
+
>>> ecdf_y = stats.ecdf(y)
|
619 |
+
>>> ecdf_y.sf.plot(ax, label='Glioblastoma')
|
620 |
+
>>> ax.set_xlabel('Time to death (weeks)')
|
621 |
+
>>> ax.set_ylabel('Empirical SF')
|
622 |
+
>>> plt.legend()
|
623 |
+
>>> plt.show()
|
624 |
+
|
625 |
+
Visual inspection of the empirical survival functions suggests that the
|
626 |
+
survival times tend to be different between the two groups. To formally
|
627 |
+
assess whether the difference is significant at the 1% level, we use the
|
628 |
+
logrank test.
|
629 |
+
|
630 |
+
>>> res = stats.logrank(x=x, y=y)
|
631 |
+
>>> res.statistic
|
632 |
+
-2.73799...
|
633 |
+
>>> res.pvalue
|
634 |
+
0.00618...
|
635 |
+
|
636 |
+
The p-value is less than 1%, so we can consider the data to be evidence
|
637 |
+
against the null hypothesis in favor of the alternative that there is a
|
638 |
+
difference between the two survival functions.
|
639 |
+
|
640 |
+
"""
|
641 |
+
# Input validation. `alternative` IV handled in `_get_pvalue` below.
|
642 |
+
x = _iv_CensoredData(sample=x, param_name='x')
|
643 |
+
y = _iv_CensoredData(sample=y, param_name='y')
|
644 |
+
|
645 |
+
# Combined sample. (Under H0, the two groups are identical.)
|
646 |
+
xy = CensoredData(
|
647 |
+
uncensored=np.concatenate((x._uncensored, y._uncensored)),
|
648 |
+
right=np.concatenate((x._right, y._right))
|
649 |
+
)
|
650 |
+
|
651 |
+
# Extract data from the combined sample
|
652 |
+
res = ecdf(xy)
|
653 |
+
idx = res.sf._d.astype(bool) # indices of observed events
|
654 |
+
times_xy = res.sf.quantiles[idx] # unique times of observed events
|
655 |
+
at_risk_xy = res.sf._n[idx] # combined number of subjects at risk
|
656 |
+
deaths_xy = res.sf._d[idx] # combined number of events
|
657 |
+
|
658 |
+
# Get the number at risk within each sample.
|
659 |
+
# First compute the number at risk in group X at each of the `times_xy`.
|
660 |
+
# Could use `interpolate_1d`, but this is more compact.
|
661 |
+
res_x = ecdf(x)
|
662 |
+
i = np.searchsorted(res_x.sf.quantiles, times_xy)
|
663 |
+
at_risk_x = np.append(res_x.sf._n, 0)[i] # 0 at risk after last time
|
664 |
+
# Subtract from the combined number at risk to get number at risk in Y
|
665 |
+
at_risk_y = at_risk_xy - at_risk_x
|
666 |
+
|
667 |
+
# Compute the variance.
|
668 |
+
num = at_risk_x * at_risk_y * deaths_xy * (at_risk_xy - deaths_xy)
|
669 |
+
den = at_risk_xy**2 * (at_risk_xy - 1)
|
670 |
+
# Note: when `at_risk_xy == 1`, we would have `at_risk_xy - 1 == 0` in the
|
671 |
+
# numerator and denominator. Simplifying the fraction symbolically, we
|
672 |
+
# would always find the overall quotient to be zero, so don't compute it.
|
673 |
+
i = at_risk_xy > 1
|
674 |
+
sum_var = np.sum(num[i]/den[i])
|
675 |
+
|
676 |
+
# Get the observed and expected number of deaths in group X
|
677 |
+
n_died_x = x._uncensored.size
|
678 |
+
sum_exp_deaths_x = np.sum(at_risk_x * (deaths_xy/at_risk_xy))
|
679 |
+
|
680 |
+
# Compute the statistic. This is the square root of that in references.
|
681 |
+
statistic = (n_died_x - sum_exp_deaths_x)/np.sqrt(sum_var)
|
682 |
+
|
683 |
+
# Equivalent to chi2(df=1).sf(statistic**2) when alternative='two-sided'
|
684 |
+
pvalue = stats._stats_py._get_pvalue(statistic, norm, alternative)
|
685 |
+
|
686 |
+
return LogRankResult(statistic=statistic[()], pvalue=pvalue[()])
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy import poly1d
|
3 |
+
from scipy.special import beta
|
4 |
+
|
5 |
+
|
6 |
+
# The following code was used to generate the Pade coefficients for the
|
7 |
+
# Tukey Lambda variance function. Version 0.17 of mpmath was used.
|
8 |
+
#---------------------------------------------------------------------------
|
9 |
+
# import mpmath as mp
|
10 |
+
#
|
11 |
+
# mp.mp.dps = 60
|
12 |
+
#
|
13 |
+
# one = mp.mpf(1)
|
14 |
+
# two = mp.mpf(2)
|
15 |
+
#
|
16 |
+
# def mpvar(lam):
|
17 |
+
# if lam == 0:
|
18 |
+
# v = mp.pi**2 / three
|
19 |
+
# else:
|
20 |
+
# v = (two / lam**2) * (one / (one + two*lam) -
|
21 |
+
# mp.beta(lam + one, lam + one))
|
22 |
+
# return v
|
23 |
+
#
|
24 |
+
# t = mp.taylor(mpvar, 0, 8)
|
25 |
+
# p, q = mp.pade(t, 4, 4)
|
26 |
+
# print("p =", [mp.fp.mpf(c) for c in p])
|
27 |
+
# print("q =", [mp.fp.mpf(c) for c in q])
|
28 |
+
#---------------------------------------------------------------------------
|
29 |
+
|
30 |
+
# Pade coefficients for the Tukey Lambda variance function.
|
31 |
+
_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127,
|
32 |
+
-0.5370742306855439, 0.17292046290190008,
|
33 |
+
-0.02371146284628187]
|
34 |
+
_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124,
|
35 |
+
1.7660926747377275, 0.2643989311168465]
|
36 |
+
|
37 |
+
# numpy.poly1d instances for the numerator and denominator of the
|
38 |
+
# Pade approximation to the Tukey Lambda variance.
|
39 |
+
_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1])
|
40 |
+
_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1])
|
41 |
+
|
42 |
+
|
43 |
+
def tukeylambda_variance(lam):
|
44 |
+
"""Variance of the Tukey Lambda distribution.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
----------
|
48 |
+
lam : array_like
|
49 |
+
The lambda values at which to compute the variance.
|
50 |
+
|
51 |
+
Returns
|
52 |
+
-------
|
53 |
+
v : ndarray
|
54 |
+
The variance. For lam < -0.5, the variance is not defined, so
|
55 |
+
np.nan is returned. For lam = 0.5, np.inf is returned.
|
56 |
+
|
57 |
+
Notes
|
58 |
+
-----
|
59 |
+
In an interval around lambda=0, this function uses the [4,4] Pade
|
60 |
+
approximation to compute the variance. Otherwise it uses the standard
|
61 |
+
formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The
|
62 |
+
Pade approximation is used because the standard formula has a removable
|
63 |
+
discontinuity at lambda = 0, and does not produce accurate numerical
|
64 |
+
results near lambda = 0.
|
65 |
+
"""
|
66 |
+
lam = np.asarray(lam)
|
67 |
+
shp = lam.shape
|
68 |
+
lam = np.atleast_1d(lam).astype(np.float64)
|
69 |
+
|
70 |
+
# For absolute values of lam less than threshold, use the Pade
|
71 |
+
# approximation.
|
72 |
+
threshold = 0.075
|
73 |
+
|
74 |
+
# Play games with masks to implement the conditional evaluation of
|
75 |
+
# the distribution.
|
76 |
+
# lambda < -0.5: var = nan
|
77 |
+
low_mask = lam < -0.5
|
78 |
+
# lambda == -0.5: var = inf
|
79 |
+
neghalf_mask = lam == -0.5
|
80 |
+
# abs(lambda) < threshold: use Pade approximation
|
81 |
+
small_mask = np.abs(lam) < threshold
|
82 |
+
# else the "regular" case: use the explicit formula.
|
83 |
+
reg_mask = ~(low_mask | neghalf_mask | small_mask)
|
84 |
+
|
85 |
+
# Get the 'lam' values for the cases where they are needed.
|
86 |
+
small = lam[small_mask]
|
87 |
+
reg = lam[reg_mask]
|
88 |
+
|
89 |
+
# Compute the function for each case.
|
90 |
+
v = np.empty_like(lam)
|
91 |
+
v[low_mask] = np.nan
|
92 |
+
v[neghalf_mask] = np.inf
|
93 |
+
if small.size > 0:
|
94 |
+
# Use the Pade approximation near lambda = 0.
|
95 |
+
v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)
|
96 |
+
if reg.size > 0:
|
97 |
+
v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) -
|
98 |
+
beta(reg + 1, reg + 1))
|
99 |
+
v.shape = shp
|
100 |
+
return v
|
101 |
+
|
102 |
+
|
103 |
+
# The following code was used to generate the Pade coefficients for the
|
104 |
+
# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used.
|
105 |
+
#---------------------------------------------------------------------------
|
106 |
+
# import mpmath as mp
|
107 |
+
#
|
108 |
+
# mp.mp.dps = 60
|
109 |
+
#
|
110 |
+
# one = mp.mpf(1)
|
111 |
+
# two = mp.mpf(2)
|
112 |
+
# three = mp.mpf(3)
|
113 |
+
# four = mp.mpf(4)
|
114 |
+
#
|
115 |
+
# def mpkurt(lam):
|
116 |
+
# if lam == 0:
|
117 |
+
# k = mp.mpf(6)/5
|
118 |
+
# else:
|
119 |
+
# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) +
|
120 |
+
# three*mp.beta(two*lam+one, two*lam+one))
|
121 |
+
# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2
|
122 |
+
# k = numer / denom - three
|
123 |
+
# return k
|
124 |
+
#
|
125 |
+
# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the
|
126 |
+
# # taylor function and we request a degree 9 Taylor polynomial, we actually
|
127 |
+
# # get degree 8.
|
128 |
+
# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01)
|
129 |
+
# t = [mp.chop(c, tol=1e-15) for c in t]
|
130 |
+
# p, q = mp.pade(t, 4, 4)
|
131 |
+
# print("p =", [mp.fp.mpf(c) for c in p])
|
132 |
+
# print("q =", [mp.fp.mpf(c) for c in q])
|
133 |
+
#---------------------------------------------------------------------------
|
134 |
+
|
135 |
+
# Pade coefficients for the Tukey Lambda kurtosis function.
|
136 |
+
_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077,
|
137 |
+
0.20601184383406815, 4.59796302262789]
|
138 |
+
_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842,
|
139 |
+
0.43075235247853005, -2.789746758009912]
|
140 |
+
|
141 |
+
# numpy.poly1d instances for the numerator and denominator of the
|
142 |
+
# Pade approximation to the Tukey Lambda kurtosis.
|
143 |
+
_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1])
|
144 |
+
_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1])
|
145 |
+
|
146 |
+
|
147 |
+
def tukeylambda_kurtosis(lam):
|
148 |
+
"""Kurtosis of the Tukey Lambda distribution.
|
149 |
+
|
150 |
+
Parameters
|
151 |
+
----------
|
152 |
+
lam : array_like
|
153 |
+
The lambda values at which to compute the variance.
|
154 |
+
|
155 |
+
Returns
|
156 |
+
-------
|
157 |
+
v : ndarray
|
158 |
+
The variance. For lam < -0.25, the variance is not defined, so
|
159 |
+
np.nan is returned. For lam = 0.25, np.inf is returned.
|
160 |
+
|
161 |
+
"""
|
162 |
+
lam = np.asarray(lam)
|
163 |
+
shp = lam.shape
|
164 |
+
lam = np.atleast_1d(lam).astype(np.float64)
|
165 |
+
|
166 |
+
# For absolute values of lam less than threshold, use the Pade
|
167 |
+
# approximation.
|
168 |
+
threshold = 0.055
|
169 |
+
|
170 |
+
# Use masks to implement the conditional evaluation of the kurtosis.
|
171 |
+
# lambda < -0.25: kurtosis = nan
|
172 |
+
low_mask = lam < -0.25
|
173 |
+
# lambda == -0.25: kurtosis = inf
|
174 |
+
negqrtr_mask = lam == -0.25
|
175 |
+
# lambda near 0: use Pade approximation
|
176 |
+
small_mask = np.abs(lam) < threshold
|
177 |
+
# else the "regular" case: use the explicit formula.
|
178 |
+
reg_mask = ~(low_mask | negqrtr_mask | small_mask)
|
179 |
+
|
180 |
+
# Get the 'lam' values for the cases where they are needed.
|
181 |
+
small = lam[small_mask]
|
182 |
+
reg = lam[reg_mask]
|
183 |
+
|
184 |
+
# Compute the function for each case.
|
185 |
+
k = np.empty_like(lam)
|
186 |
+
k[low_mask] = np.nan
|
187 |
+
k[negqrtr_mask] = np.inf
|
188 |
+
if small.size > 0:
|
189 |
+
k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)
|
190 |
+
if reg.size > 0:
|
191 |
+
numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) +
|
192 |
+
3 * beta(2 * reg + 1, 2 * reg + 1))
|
193 |
+
denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2
|
194 |
+
k[reg_mask] = numer / denom - 3
|
195 |
+
|
196 |
+
# The return value will be a numpy array; resetting the shape ensures that
|
197 |
+
# if `lam` was a scalar, the return value is a 0-d array.
|
198 |
+
k.shape = shp
|
199 |
+
return k
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_variation.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy._lib._util import _get_nan
|
3 |
+
from ._axis_nan_policy import _axis_nan_policy_factory
|
4 |
+
|
5 |
+
|
6 |
+
@_axis_nan_policy_factory(
|
7 |
+
lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,)
|
8 |
+
)
|
9 |
+
def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):
|
10 |
+
"""
|
11 |
+
Compute the coefficient of variation.
|
12 |
+
|
13 |
+
The coefficient of variation is the standard deviation divided by the
|
14 |
+
mean. This function is equivalent to::
|
15 |
+
|
16 |
+
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
|
17 |
+
|
18 |
+
The default for ``ddof`` is 0, but many definitions of the coefficient
|
19 |
+
of variation use the square root of the unbiased sample variance
|
20 |
+
for the sample standard deviation, which corresponds to ``ddof=1``.
|
21 |
+
|
22 |
+
The function does not take the absolute value of the mean of the data,
|
23 |
+
so the return value is negative if the mean is negative.
|
24 |
+
|
25 |
+
Parameters
|
26 |
+
----------
|
27 |
+
a : array_like
|
28 |
+
Input array.
|
29 |
+
axis : int or None, optional
|
30 |
+
Axis along which to calculate the coefficient of variation.
|
31 |
+
Default is 0. If None, compute over the whole array `a`.
|
32 |
+
nan_policy : {'propagate', 'raise', 'omit'}, optional
|
33 |
+
Defines how to handle when input contains ``nan``.
|
34 |
+
The following options are available:
|
35 |
+
|
36 |
+
* 'propagate': return ``nan``
|
37 |
+
* 'raise': raise an exception
|
38 |
+
* 'omit': perform the calculation with ``nan`` values omitted
|
39 |
+
|
40 |
+
The default is 'propagate'.
|
41 |
+
ddof : int, optional
|
42 |
+
Gives the "Delta Degrees Of Freedom" used when computing the
|
43 |
+
standard deviation. The divisor used in the calculation of the
|
44 |
+
standard deviation is ``N - ddof``, where ``N`` is the number of
|
45 |
+
elements. `ddof` must be less than ``N``; if it isn't, the result
|
46 |
+
will be ``nan`` or ``inf``, depending on ``N`` and the values in
|
47 |
+
the array. By default `ddof` is zero for backwards compatibility,
|
48 |
+
but it is recommended to use ``ddof=1`` to ensure that the sample
|
49 |
+
standard deviation is computed as the square root of the unbiased
|
50 |
+
sample variance.
|
51 |
+
|
52 |
+
Returns
|
53 |
+
-------
|
54 |
+
variation : ndarray
|
55 |
+
The calculated variation along the requested axis.
|
56 |
+
|
57 |
+
Notes
|
58 |
+
-----
|
59 |
+
There are several edge cases that are handled without generating a
|
60 |
+
warning:
|
61 |
+
|
62 |
+
* If both the mean and the standard deviation are zero, ``nan``
|
63 |
+
is returned.
|
64 |
+
* If the mean is zero and the standard deviation is nonzero, ``inf``
|
65 |
+
is returned.
|
66 |
+
* If the input has length zero (either because the array has zero
|
67 |
+
length, or all the input values are ``nan`` and ``nan_policy`` is
|
68 |
+
``'omit'``), ``nan`` is returned.
|
69 |
+
* If the input contains ``inf``, ``nan`` is returned.
|
70 |
+
|
71 |
+
References
|
72 |
+
----------
|
73 |
+
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
|
74 |
+
Probability and Statistics Tables and Formulae. Chapman & Hall: New
|
75 |
+
York. 2000.
|
76 |
+
|
77 |
+
Examples
|
78 |
+
--------
|
79 |
+
>>> import numpy as np
|
80 |
+
>>> from scipy.stats import variation
|
81 |
+
>>> variation([1, 2, 3, 4, 5], ddof=1)
|
82 |
+
0.5270462766947299
|
83 |
+
|
84 |
+
Compute the variation along a given dimension of an array that contains
|
85 |
+
a few ``nan`` values:
|
86 |
+
|
87 |
+
>>> x = np.array([[ 10.0, np.nan, 11.0, 19.0, 23.0, 29.0, 98.0],
|
88 |
+
... [ 29.0, 30.0, 32.0, 33.0, 35.0, 56.0, 57.0],
|
89 |
+
... [np.nan, np.nan, 12.0, 13.0, 16.0, 16.0, 17.0]])
|
90 |
+
>>> variation(x, axis=1, ddof=1, nan_policy='omit')
|
91 |
+
array([1.05109361, 0.31428986, 0.146483 ])
|
92 |
+
|
93 |
+
"""
|
94 |
+
# `nan_policy` and `keepdims` are handled by `_axis_nan_policy`
|
95 |
+
n = a.shape[axis]
|
96 |
+
NaN = _get_nan(a)
|
97 |
+
|
98 |
+
if a.size == 0 or ddof > n:
|
99 |
+
# Handle as a special case to avoid spurious warnings.
|
100 |
+
# The return values, if any, are all nan.
|
101 |
+
shp = np.asarray(a.shape)
|
102 |
+
shp = np.delete(shp, axis)
|
103 |
+
result = np.full(shp, fill_value=NaN)
|
104 |
+
return result[()]
|
105 |
+
|
106 |
+
mean_a = a.mean(axis)
|
107 |
+
|
108 |
+
if ddof == n:
|
109 |
+
# Another special case. Result is either inf or nan.
|
110 |
+
std_a = a.std(axis=axis, ddof=0)
|
111 |
+
result = np.full_like(std_a, fill_value=NaN)
|
112 |
+
i = std_a > 0
|
113 |
+
result[i] = np.inf
|
114 |
+
result[i] = np.copysign(result[i], mean_a[i])
|
115 |
+
return result[()]
|
116 |
+
|
117 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
118 |
+
std_a = a.std(axis, ddof=ddof)
|
119 |
+
result = std_a / mean_a
|
120 |
+
|
121 |
+
return result[()]
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/_warnings_errors.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Warnings
|
2 |
+
|
3 |
+
|
4 |
+
class DegenerateDataWarning(RuntimeWarning):
|
5 |
+
"""Warns when data is degenerate and results may not be reliable."""
|
6 |
+
def __init__(self, msg=None):
|
7 |
+
if msg is None:
|
8 |
+
msg = ("Degenerate data encountered; results may not be reliable.")
|
9 |
+
self.args = (msg,)
|
10 |
+
|
11 |
+
|
12 |
+
class ConstantInputWarning(DegenerateDataWarning):
|
13 |
+
"""Warns when all values in data are exactly equal."""
|
14 |
+
def __init__(self, msg=None):
|
15 |
+
if msg is None:
|
16 |
+
msg = ("All values in data are exactly equal; "
|
17 |
+
"results may not be reliable.")
|
18 |
+
self.args = (msg,)
|
19 |
+
|
20 |
+
|
21 |
+
class NearConstantInputWarning(DegenerateDataWarning):
|
22 |
+
"""Warns when all values in data are nearly equal."""
|
23 |
+
def __init__(self, msg=None):
|
24 |
+
if msg is None:
|
25 |
+
msg = ("All values in data are nearly equal; "
|
26 |
+
"results may not be reliable.")
|
27 |
+
self.args = (msg,)
|
28 |
+
|
29 |
+
|
30 |
+
# Errors
|
31 |
+
|
32 |
+
|
33 |
+
class FitError(RuntimeError):
|
34 |
+
"""Represents an error condition when fitting a distribution to data."""
|
35 |
+
def __init__(self, msg=None):
|
36 |
+
if msg is None:
|
37 |
+
msg = ("An error occurred when fitting a distribution to data.")
|
38 |
+
self.args = (msg,)
|
env-llmeval/lib/python3.10/site-packages/scipy/stats/morestats.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.stats` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'mvsdist',
|
10 |
+
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
|
11 |
+
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
|
12 |
+
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene',
|
13 |
+
'fligner', 'mood', 'wilcoxon', 'median_test',
|
14 |
+
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
|
15 |
+
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
|
16 |
+
'yeojohnson_normplot', 'annotations', 'namedtuple', 'isscalar', 'log',
|
17 |
+
'around', 'unique', 'arange', 'sort', 'amin', 'amax', 'atleast_1d',
|
18 |
+
'array', 'compress', 'exp', 'ravel', 'count_nonzero', 'arctan2',
|
19 |
+
'hypot', 'optimize', 'find_repeats',
|
20 |
+
'chi2_contingency', 'distributions', 'rv_generic', 'Mean',
|
21 |
+
'Variance', 'Std_dev', 'ShapiroResult', 'AndersonResult',
|
22 |
+
'Anderson_ksampResult', 'AnsariResult', 'BartlettResult',
|
23 |
+
'LeveneResult', 'FlignerResult', 'WilcoxonResult'
|
24 |
+
]
|
25 |
+
|
26 |
+
|
27 |
+
def __dir__():
|
28 |
+
return __all__
|
29 |
+
|
30 |
+
|
31 |
+
def __getattr__(name):
|
32 |
+
return _sub_module_deprecation(sub_package="stats", module="morestats",
|
33 |
+
private_modules=["_morestats"], all=__all__,
|
34 |
+
attribute=name)
|