Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llmeval-env/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz +3 -0
- llmeval-env/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy +3 -0
- llmeval-env/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so +3 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_crosstab.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_fit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/contingency.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_boost/__init__.py +53 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py +482 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_rcont/__init__.py +4 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py +237 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_fit_censored.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sensitivity_analysis.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_survival.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_tukeylambda_stats.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py +351 -0
- llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc +0 -0
.gitattributes
CHANGED
@@ -212,3 +212,4 @@ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow.so.1500 filter=lfs dif
|
|
212 |
llmeval-env/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
213 |
llmeval-env/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
214 |
llmeval-env/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
212 |
llmeval-env/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
213 |
llmeval-env/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
214 |
llmeval-env/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
215 |
+
llmeval-env/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llmeval-env/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d6803c0b398f2704c236f1d1b9e8e5ede06bd165a0abb0f228281abbd455ae9
|
3 |
+
size 2648
|
llmeval-env/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:406c10857417ff5ea98d8cd28945c9d0e4f5c24f92a48ad0e8fab955bf2477f1
|
3 |
+
size 35680
|
llmeval-env/lib/python3.10/site-packages/scipy/io/_fast_matrix_market/_fmm_core.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:719f8c4c68bbb75108c6968d2ba4160a2a35d69eb787ee348943a884ea629827
|
3 |
+
size 3827072
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc
ADDED
Binary file (365 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_crosstab.cpython-310.pyc
ADDED
Binary file (7.35 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/_fit.cpython-310.pyc
ADDED
Binary file (52.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/__pycache__/contingency.cpython-310.pyc
ADDED
Binary file (15.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_boost/__init__.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from scipy.stats._boost.beta_ufunc import (
|
2 |
+
_beta_pdf, _beta_cdf, _beta_sf, _beta_ppf,
|
3 |
+
_beta_isf, _beta_mean, _beta_variance,
|
4 |
+
_beta_skewness, _beta_kurtosis_excess,
|
5 |
+
)
|
6 |
+
|
7 |
+
from scipy.stats._boost.binom_ufunc import (
|
8 |
+
_binom_pdf, _binom_cdf, _binom_sf, _binom_ppf,
|
9 |
+
_binom_isf, _binom_mean, _binom_variance,
|
10 |
+
_binom_skewness, _binom_kurtosis_excess,
|
11 |
+
)
|
12 |
+
|
13 |
+
from scipy.stats._boost.nbinom_ufunc import (
|
14 |
+
_nbinom_pdf, _nbinom_cdf, _nbinom_sf, _nbinom_ppf,
|
15 |
+
_nbinom_isf, _nbinom_mean, _nbinom_variance,
|
16 |
+
_nbinom_skewness, _nbinom_kurtosis_excess,
|
17 |
+
)
|
18 |
+
|
19 |
+
from scipy.stats._boost.hypergeom_ufunc import (
|
20 |
+
_hypergeom_pdf, _hypergeom_cdf, _hypergeom_sf, _hypergeom_ppf,
|
21 |
+
_hypergeom_isf, _hypergeom_mean, _hypergeom_variance,
|
22 |
+
_hypergeom_skewness, _hypergeom_kurtosis_excess,
|
23 |
+
)
|
24 |
+
|
25 |
+
from scipy.stats._boost.ncf_ufunc import (
|
26 |
+
_ncf_pdf, _ncf_cdf, _ncf_sf, _ncf_ppf,
|
27 |
+
_ncf_isf, _ncf_mean, _ncf_variance,
|
28 |
+
_ncf_skewness, _ncf_kurtosis_excess,
|
29 |
+
)
|
30 |
+
|
31 |
+
from scipy.stats._boost.ncx2_ufunc import (
|
32 |
+
_ncx2_pdf, _ncx2_cdf, _ncx2_sf, _ncx2_ppf,
|
33 |
+
_ncx2_isf, _ncx2_mean, _ncx2_variance,
|
34 |
+
_ncx2_skewness, _ncx2_kurtosis_excess,
|
35 |
+
)
|
36 |
+
|
37 |
+
from scipy.stats._boost.nct_ufunc import (
|
38 |
+
_nct_pdf, _nct_cdf, _nct_sf, _nct_ppf,
|
39 |
+
_nct_isf, _nct_mean, _nct_variance,
|
40 |
+
_nct_skewness, _nct_kurtosis_excess,
|
41 |
+
)
|
42 |
+
|
43 |
+
from scipy.stats._boost.skewnorm_ufunc import (
|
44 |
+
_skewnorm_pdf, _skewnorm_cdf, _skewnorm_sf, _skewnorm_ppf,
|
45 |
+
_skewnorm_isf, _skewnorm_mean, _skewnorm_variance,
|
46 |
+
_skewnorm_skewness, _skewnorm_kurtosis_excess,
|
47 |
+
)
|
48 |
+
|
49 |
+
from scipy.stats._boost.invgauss_ufunc import (
|
50 |
+
_invgauss_pdf, _invgauss_cdf, _invgauss_sf, _invgauss_ppf,
|
51 |
+
_invgauss_isf, _invgauss_mean, _invgauss_variance,
|
52 |
+
_invgauss_skewness, _invgauss_kurtosis_excess,
|
53 |
+
)
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (109 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (32.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (66.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from scipy.special import ndtri
|
4 |
+
from scipy.optimize import brentq
|
5 |
+
from ._discrete_distns import nchypergeom_fisher
|
6 |
+
from ._common import ConfidenceInterval
|
7 |
+
|
8 |
+
|
9 |
+
def _sample_odds_ratio(table):
|
10 |
+
"""
|
11 |
+
Given a table [[a, b], [c, d]], compute a*d/(b*c).
|
12 |
+
|
13 |
+
Return nan if the numerator and denominator are 0.
|
14 |
+
Return inf if just the denominator is 0.
|
15 |
+
"""
|
16 |
+
# table must be a 2x2 numpy array.
|
17 |
+
if table[1, 0] > 0 and table[0, 1] > 0:
|
18 |
+
oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])
|
19 |
+
elif table[0, 0] == 0 or table[1, 1] == 0:
|
20 |
+
oddsratio = np.nan
|
21 |
+
else:
|
22 |
+
oddsratio = np.inf
|
23 |
+
return oddsratio
|
24 |
+
|
25 |
+
|
26 |
+
def _solve(func):
|
27 |
+
"""
|
28 |
+
Solve func(nc) = 0. func must be an increasing function.
|
29 |
+
"""
|
30 |
+
# We could just as well call the variable `x` instead of `nc`, but we
|
31 |
+
# always call this function with functions for which nc (the noncentrality
|
32 |
+
# parameter) is the variable for which we are solving.
|
33 |
+
nc = 1.0
|
34 |
+
value = func(nc)
|
35 |
+
if value == 0:
|
36 |
+
return nc
|
37 |
+
|
38 |
+
# Multiplicative factor by which to increase or decrease nc when
|
39 |
+
# searching for a bracketing interval.
|
40 |
+
factor = 2.0
|
41 |
+
# Find a bracketing interval.
|
42 |
+
if value > 0:
|
43 |
+
nc /= factor
|
44 |
+
while func(nc) > 0:
|
45 |
+
nc /= factor
|
46 |
+
lo = nc
|
47 |
+
hi = factor*nc
|
48 |
+
else:
|
49 |
+
nc *= factor
|
50 |
+
while func(nc) < 0:
|
51 |
+
nc *= factor
|
52 |
+
lo = nc/factor
|
53 |
+
hi = nc
|
54 |
+
|
55 |
+
# lo and hi bracket the solution for nc.
|
56 |
+
nc = brentq(func, lo, hi, xtol=1e-13)
|
57 |
+
return nc
|
58 |
+
|
59 |
+
|
60 |
+
def _nc_hypergeom_mean_inverse(x, M, n, N):
|
61 |
+
"""
|
62 |
+
For the given noncentral hypergeometric parameters x, M, n,and N
|
63 |
+
(table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2
|
64 |
+
contingency table), find the noncentrality parameter of Fisher's
|
65 |
+
noncentral hypergeometric distribution whose mean is x.
|
66 |
+
"""
|
67 |
+
nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)
|
68 |
+
return nc
|
69 |
+
|
70 |
+
|
71 |
+
def _hypergeom_params_from_table(table):
|
72 |
+
# The notation M, n and N is consistent with stats.hypergeom and
|
73 |
+
# stats.nchypergeom_fisher.
|
74 |
+
x = table[0, 0]
|
75 |
+
M = table.sum()
|
76 |
+
n = table[0].sum()
|
77 |
+
N = table[:, 0].sum()
|
78 |
+
return x, M, n, N
|
79 |
+
|
80 |
+
|
81 |
+
def _ci_upper(table, alpha):
|
82 |
+
"""
|
83 |
+
Compute the upper end of the confidence interval.
|
84 |
+
"""
|
85 |
+
if _sample_odds_ratio(table) == np.inf:
|
86 |
+
return np.inf
|
87 |
+
|
88 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
89 |
+
|
90 |
+
# nchypergeom_fisher.cdf is a decreasing function of nc, so we negate
|
91 |
+
# it in the lambda expression.
|
92 |
+
nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)
|
93 |
+
return nc
|
94 |
+
|
95 |
+
|
96 |
+
def _ci_lower(table, alpha):
|
97 |
+
"""
|
98 |
+
Compute the lower end of the confidence interval.
|
99 |
+
"""
|
100 |
+
if _sample_odds_ratio(table) == 0:
|
101 |
+
return 0
|
102 |
+
|
103 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
104 |
+
|
105 |
+
nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)
|
106 |
+
return nc
|
107 |
+
|
108 |
+
|
109 |
+
def _conditional_oddsratio(table):
|
110 |
+
"""
|
111 |
+
Conditional MLE of the odds ratio for the 2x2 contingency table.
|
112 |
+
"""
|
113 |
+
x, M, n, N = _hypergeom_params_from_table(table)
|
114 |
+
# Get the bounds of the support. The support of the noncentral
|
115 |
+
# hypergeometric distribution with parameters M, n, and N is the same
|
116 |
+
# for all values of the noncentrality parameter, so we can use 1 here.
|
117 |
+
lo, hi = nchypergeom_fisher.support(M, n, N, 1)
|
118 |
+
|
119 |
+
# Check if x is at one of the extremes of the support. If so, we know
|
120 |
+
# the odds ratio is either 0 or inf.
|
121 |
+
if x == lo:
|
122 |
+
# x is at the low end of the support.
|
123 |
+
return 0
|
124 |
+
if x == hi:
|
125 |
+
# x is at the high end of the support.
|
126 |
+
return np.inf
|
127 |
+
|
128 |
+
nc = _nc_hypergeom_mean_inverse(x, M, n, N)
|
129 |
+
return nc
|
130 |
+
|
131 |
+
|
132 |
+
def _conditional_oddsratio_ci(table, confidence_level=0.95,
|
133 |
+
alternative='two-sided'):
|
134 |
+
"""
|
135 |
+
Conditional exact confidence interval for the odds ratio.
|
136 |
+
"""
|
137 |
+
if alternative == 'two-sided':
|
138 |
+
alpha = 0.5*(1 - confidence_level)
|
139 |
+
lower = _ci_lower(table, alpha)
|
140 |
+
upper = _ci_upper(table, alpha)
|
141 |
+
elif alternative == 'less':
|
142 |
+
lower = 0.0
|
143 |
+
upper = _ci_upper(table, 1 - confidence_level)
|
144 |
+
else:
|
145 |
+
# alternative == 'greater'
|
146 |
+
lower = _ci_lower(table, 1 - confidence_level)
|
147 |
+
upper = np.inf
|
148 |
+
|
149 |
+
return lower, upper
|
150 |
+
|
151 |
+
|
152 |
+
def _sample_odds_ratio_ci(table, confidence_level=0.95,
|
153 |
+
alternative='two-sided'):
|
154 |
+
oddsratio = _sample_odds_ratio(table)
|
155 |
+
log_or = np.log(oddsratio)
|
156 |
+
se = np.sqrt((1/table).sum())
|
157 |
+
if alternative == 'less':
|
158 |
+
z = ndtri(confidence_level)
|
159 |
+
loglow = -np.inf
|
160 |
+
loghigh = log_or + z*se
|
161 |
+
elif alternative == 'greater':
|
162 |
+
z = ndtri(confidence_level)
|
163 |
+
loglow = log_or - z*se
|
164 |
+
loghigh = np.inf
|
165 |
+
else:
|
166 |
+
# alternative is 'two-sided'
|
167 |
+
z = ndtri(0.5*confidence_level + 0.5)
|
168 |
+
loglow = log_or - z*se
|
169 |
+
loghigh = log_or + z*se
|
170 |
+
|
171 |
+
return np.exp(loglow), np.exp(loghigh)
|
172 |
+
|
173 |
+
|
174 |
+
class OddsRatioResult:
|
175 |
+
"""
|
176 |
+
Result of `scipy.stats.contingency.odds_ratio`. See the
|
177 |
+
docstring for `odds_ratio` for more details.
|
178 |
+
|
179 |
+
Attributes
|
180 |
+
----------
|
181 |
+
statistic : float
|
182 |
+
The computed odds ratio.
|
183 |
+
|
184 |
+
* If `kind` is ``'sample'``, this is sample (or unconditional)
|
185 |
+
estimate, given by
|
186 |
+
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
|
187 |
+
* If `kind` is ``'conditional'``, this is the conditional
|
188 |
+
maximum likelihood estimate for the odds ratio. It is
|
189 |
+
the noncentrality parameter of Fisher's noncentral
|
190 |
+
hypergeometric distribution with the same hypergeometric
|
191 |
+
parameters as `table` and whose mean is ``table[0, 0]``.
|
192 |
+
|
193 |
+
Methods
|
194 |
+
-------
|
195 |
+
confidence_interval :
|
196 |
+
Confidence interval for the odds ratio.
|
197 |
+
"""
|
198 |
+
|
199 |
+
def __init__(self, _table, _kind, statistic):
|
200 |
+
# for now, no need to make _table and _kind public, since this sort of
|
201 |
+
# information is returned in very few `scipy.stats` results
|
202 |
+
self._table = _table
|
203 |
+
self._kind = _kind
|
204 |
+
self.statistic = statistic
|
205 |
+
|
206 |
+
def __repr__(self):
|
207 |
+
return f"OddsRatioResult(statistic={self.statistic})"
|
208 |
+
|
209 |
+
def confidence_interval(self, confidence_level=0.95,
|
210 |
+
alternative='two-sided'):
|
211 |
+
"""
|
212 |
+
Confidence interval for the odds ratio.
|
213 |
+
|
214 |
+
Parameters
|
215 |
+
----------
|
216 |
+
confidence_level: float
|
217 |
+
Desired confidence level for the confidence interval.
|
218 |
+
The value must be given as a fraction between 0 and 1.
|
219 |
+
Default is 0.95 (meaning 95%).
|
220 |
+
|
221 |
+
alternative : {'two-sided', 'less', 'greater'}, optional
|
222 |
+
The alternative hypothesis of the hypothesis test to which the
|
223 |
+
confidence interval corresponds. That is, suppose the null
|
224 |
+
hypothesis is that the true odds ratio equals ``OR`` and the
|
225 |
+
confidence interval is ``(low, high)``. Then the following options
|
226 |
+
for `alternative` are available (default is 'two-sided'):
|
227 |
+
|
228 |
+
* 'two-sided': the true odds ratio is not equal to ``OR``. There
|
229 |
+
is evidence against the null hypothesis at the chosen
|
230 |
+
`confidence_level` if ``high < OR`` or ``low > OR``.
|
231 |
+
* 'less': the true odds ratio is less than ``OR``. The ``low`` end
|
232 |
+
of the confidence interval is 0, and there is evidence against
|
233 |
+
the null hypothesis at the chosen `confidence_level` if
|
234 |
+
``high < OR``.
|
235 |
+
* 'greater': the true odds ratio is greater than ``OR``. The
|
236 |
+
``high`` end of the confidence interval is ``np.inf``, and there
|
237 |
+
is evidence against the null hypothesis at the chosen
|
238 |
+
`confidence_level` if ``low > OR``.
|
239 |
+
|
240 |
+
Returns
|
241 |
+
-------
|
242 |
+
ci : ``ConfidenceInterval`` instance
|
243 |
+
The confidence interval, represented as an object with
|
244 |
+
attributes ``low`` and ``high``.
|
245 |
+
|
246 |
+
Notes
|
247 |
+
-----
|
248 |
+
When `kind` is ``'conditional'``, the limits of the confidence
|
249 |
+
interval are the conditional "exact confidence limits" as described
|
250 |
+
by Fisher [1]_. The conditional odds ratio and confidence interval are
|
251 |
+
also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_.
|
252 |
+
|
253 |
+
When `kind` is ``'sample'``, the confidence interval is computed
|
254 |
+
under the assumption that the logarithm of the odds ratio is normally
|
255 |
+
distributed with standard error given by::
|
256 |
+
|
257 |
+
se = sqrt(1/a + 1/b + 1/c + 1/d)
|
258 |
+
|
259 |
+
where ``a``, ``b``, ``c`` and ``d`` are the elements of the
|
260 |
+
contingency table. (See, for example, [2]_, section 3.1.3.2,
|
261 |
+
or [3]_, section 2.3.3).
|
262 |
+
|
263 |
+
References
|
264 |
+
----------
|
265 |
+
.. [1] R. A. Fisher (1935), The logic of inductive inference,
|
266 |
+
Journal of the Royal Statistical Society, Vol. 98, No. 1,
|
267 |
+
pp. 39-82.
|
268 |
+
.. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
|
269 |
+
Methods, Techniques, and Applications, CRC Press LLC, Boca
|
270 |
+
Raton, Florida.
|
271 |
+
.. [3] Alan Agresti, An Introduction to Categorical Data Analysis
|
272 |
+
(second edition), Wiley, Hoboken, NJ, USA (2007).
|
273 |
+
"""
|
274 |
+
if alternative not in ['two-sided', 'less', 'greater']:
|
275 |
+
raise ValueError("`alternative` must be 'two-sided', 'less' or "
|
276 |
+
"'greater'.")
|
277 |
+
|
278 |
+
if confidence_level < 0 or confidence_level > 1:
|
279 |
+
raise ValueError('confidence_level must be between 0 and 1')
|
280 |
+
|
281 |
+
if self._kind == 'conditional':
|
282 |
+
ci = self._conditional_odds_ratio_ci(confidence_level, alternative)
|
283 |
+
else:
|
284 |
+
ci = self._sample_odds_ratio_ci(confidence_level, alternative)
|
285 |
+
return ci
|
286 |
+
|
287 |
+
def _conditional_odds_ratio_ci(self, confidence_level=0.95,
|
288 |
+
alternative='two-sided'):
|
289 |
+
"""
|
290 |
+
Confidence interval for the conditional odds ratio.
|
291 |
+
"""
|
292 |
+
|
293 |
+
table = self._table
|
294 |
+
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
|
295 |
+
# If both values in a row or column are zero, the p-value is 1,
|
296 |
+
# the odds ratio is NaN and the confidence interval is (0, inf).
|
297 |
+
ci = (0, np.inf)
|
298 |
+
else:
|
299 |
+
ci = _conditional_oddsratio_ci(table,
|
300 |
+
confidence_level=confidence_level,
|
301 |
+
alternative=alternative)
|
302 |
+
return ConfidenceInterval(low=ci[0], high=ci[1])
|
303 |
+
|
304 |
+
def _sample_odds_ratio_ci(self, confidence_level=0.95,
|
305 |
+
alternative='two-sided'):
|
306 |
+
"""
|
307 |
+
Confidence interval for the sample odds ratio.
|
308 |
+
"""
|
309 |
+
if confidence_level < 0 or confidence_level > 1:
|
310 |
+
raise ValueError('confidence_level must be between 0 and 1')
|
311 |
+
|
312 |
+
table = self._table
|
313 |
+
if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
|
314 |
+
# If both values in a row or column are zero, the p-value is 1,
|
315 |
+
# the odds ratio is NaN and the confidence interval is (0, inf).
|
316 |
+
ci = (0, np.inf)
|
317 |
+
else:
|
318 |
+
ci = _sample_odds_ratio_ci(table,
|
319 |
+
confidence_level=confidence_level,
|
320 |
+
alternative=alternative)
|
321 |
+
return ConfidenceInterval(low=ci[0], high=ci[1])
|
322 |
+
|
323 |
+
|
324 |
+
def odds_ratio(table, *, kind='conditional'):
|
325 |
+
r"""
|
326 |
+
Compute the odds ratio for a 2x2 contingency table.
|
327 |
+
|
328 |
+
Parameters
|
329 |
+
----------
|
330 |
+
table : array_like of ints
|
331 |
+
A 2x2 contingency table. Elements must be non-negative integers.
|
332 |
+
kind : str, optional
|
333 |
+
Which kind of odds ratio to compute, either the sample
|
334 |
+
odds ratio (``kind='sample'``) or the conditional odds ratio
|
335 |
+
(``kind='conditional'``). Default is ``'conditional'``.
|
336 |
+
|
337 |
+
Returns
|
338 |
+
-------
|
339 |
+
result : `~scipy.stats._result_classes.OddsRatioResult` instance
|
340 |
+
The returned object has two computed attributes:
|
341 |
+
|
342 |
+
statistic : float
|
343 |
+
* If `kind` is ``'sample'``, this is sample (or unconditional)
|
344 |
+
estimate, given by
|
345 |
+
``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
|
346 |
+
* If `kind` is ``'conditional'``, this is the conditional
|
347 |
+
maximum likelihood estimate for the odds ratio. It is
|
348 |
+
the noncentrality parameter of Fisher's noncentral
|
349 |
+
hypergeometric distribution with the same hypergeometric
|
350 |
+
parameters as `table` and whose mean is ``table[0, 0]``.
|
351 |
+
|
352 |
+
The object has the method `confidence_interval` that computes
|
353 |
+
the confidence interval of the odds ratio.
|
354 |
+
|
355 |
+
See Also
|
356 |
+
--------
|
357 |
+
scipy.stats.fisher_exact
|
358 |
+
relative_risk
|
359 |
+
|
360 |
+
Notes
|
361 |
+
-----
|
362 |
+
The conditional odds ratio was discussed by Fisher (see "Example 1"
|
363 |
+
of [1]_). Texts that cover the odds ratio include [2]_ and [3]_.
|
364 |
+
|
365 |
+
.. versionadded:: 1.10.0
|
366 |
+
|
367 |
+
References
|
368 |
+
----------
|
369 |
+
.. [1] R. A. Fisher (1935), The logic of inductive inference,
|
370 |
+
Journal of the Royal Statistical Society, Vol. 98, No. 1,
|
371 |
+
pp. 39-82.
|
372 |
+
.. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research.
|
373 |
+
Volume I - The analysis of case-control studies. IARC Sci Publ.
|
374 |
+
(32):5-338. PMID: 7216345. (See section 4.2.)
|
375 |
+
.. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
|
376 |
+
Methods, Techniques, and Applications, CRC Press LLC, Boca
|
377 |
+
Raton, Florida.
|
378 |
+
.. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
|
379 |
+
Cardiovascular Events in Women and Men: A Sex-Specific
|
380 |
+
Meta-analysis of Randomized Controlled Trials."
|
381 |
+
JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
|
382 |
+
|
383 |
+
Examples
|
384 |
+
--------
|
385 |
+
In epidemiology, individuals are classified as "exposed" or
|
386 |
+
"unexposed" to some factor or treatment. If the occurrence of some
|
387 |
+
illness is under study, those who have the illness are often
|
388 |
+
classified as "cases", and those without it are "noncases". The
|
389 |
+
counts of the occurrences of these classes gives a contingency
|
390 |
+
table::
|
391 |
+
|
392 |
+
exposed unexposed
|
393 |
+
cases a b
|
394 |
+
noncases c d
|
395 |
+
|
396 |
+
The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can
|
397 |
+
be interpreted as the odds of a case occurring in the exposed group,
|
398 |
+
and ``b/d`` as the odds of a case occurring in the unexposed group.
|
399 |
+
The sample odds ratio is the ratio of these odds. If the odds ratio
|
400 |
+
is greater than 1, it suggests that there is a positive association
|
401 |
+
between being exposed and being a case.
|
402 |
+
|
403 |
+
Interchanging the rows or columns of the contingency table inverts
|
404 |
+
the odds ratio, so it is import to understand the meaning of labels
|
405 |
+
given to the rows and columns of the table when interpreting the
|
406 |
+
odds ratio.
|
407 |
+
|
408 |
+
In [4]_, the use of aspirin to prevent cardiovascular events in women
|
409 |
+
and men was investigated. The study notably concluded:
|
410 |
+
|
411 |
+
...aspirin therapy reduced the risk of a composite of
|
412 |
+
cardiovascular events due to its effect on reducing the risk of
|
413 |
+
ischemic stroke in women [...]
|
414 |
+
|
415 |
+
The article lists studies of various cardiovascular events. Let's
|
416 |
+
focus on the ischemic stoke in women.
|
417 |
+
|
418 |
+
The following table summarizes the results of the experiment in which
|
419 |
+
participants took aspirin or a placebo on a regular basis for several
|
420 |
+
years. Cases of ischemic stroke were recorded::
|
421 |
+
|
422 |
+
Aspirin Control/Placebo
|
423 |
+
Ischemic stroke 176 230
|
424 |
+
No stroke 21035 21018
|
425 |
+
|
426 |
+
The question we ask is "Is there evidence that the aspirin reduces the
|
427 |
+
risk of ischemic stroke?"
|
428 |
+
|
429 |
+
Compute the odds ratio:
|
430 |
+
|
431 |
+
>>> from scipy.stats.contingency import odds_ratio
|
432 |
+
>>> res = odds_ratio([[176, 230], [21035, 21018]])
|
433 |
+
>>> res.statistic
|
434 |
+
0.7646037659999126
|
435 |
+
|
436 |
+
For this sample, the odds of getting an ischemic stroke for those who have
|
437 |
+
been taking aspirin are 0.76 times that of those
|
438 |
+
who have received the placebo.
|
439 |
+
|
440 |
+
To make statistical inferences about the population under study,
|
441 |
+
we can compute the 95% confidence interval for the odds ratio:
|
442 |
+
|
443 |
+
>>> res.confidence_interval(confidence_level=0.95)
|
444 |
+
ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372)
|
445 |
+
|
446 |
+
The 95% confidence interval for the conditional odds ratio is
|
447 |
+
approximately (0.62, 0.94).
|
448 |
+
|
449 |
+
The fact that the entire 95% confidence interval falls below 1 supports
|
450 |
+
the authors' conclusion that the aspirin was associated with a
|
451 |
+
statistically significant reduction in ischemic stroke.
|
452 |
+
"""
|
453 |
+
if kind not in ['conditional', 'sample']:
|
454 |
+
raise ValueError("`kind` must be 'conditional' or 'sample'.")
|
455 |
+
|
456 |
+
c = np.asarray(table)
|
457 |
+
|
458 |
+
if c.shape != (2, 2):
|
459 |
+
raise ValueError(f"Invalid shape {c.shape}. The input `table` must be "
|
460 |
+
"of shape (2, 2).")
|
461 |
+
|
462 |
+
if not np.issubdtype(c.dtype, np.integer):
|
463 |
+
raise ValueError("`table` must be an array of integers, but got "
|
464 |
+
f"type {c.dtype}")
|
465 |
+
c = c.astype(np.int64)
|
466 |
+
|
467 |
+
if np.any(c < 0):
|
468 |
+
raise ValueError("All values in `table` must be nonnegative.")
|
469 |
+
|
470 |
+
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
|
471 |
+
# If both values in a row or column are zero, the p-value is NaN and
|
472 |
+
# the odds ratio is NaN.
|
473 |
+
result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)
|
474 |
+
return result
|
475 |
+
|
476 |
+
if kind == 'sample':
|
477 |
+
oddsratio = _sample_odds_ratio(c)
|
478 |
+
else: # kind is 'conditional'
|
479 |
+
oddsratio = _conditional_oddsratio(c)
|
480 |
+
|
481 |
+
result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)
|
482 |
+
return result
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_rcont/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
from .rcont import rvs_rcont1, rvs_rcont2
|
3 |
+
|
4 |
+
__all__ = ["rvs_rcont1", "rvs_rcont2"]
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (281 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (299 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
from scipy import stats
|
5 |
+
from ._stats_py import _get_pvalue, _rankdata
|
6 |
+
from . import _morestats
|
7 |
+
from ._axis_nan_policy import _broadcast_arrays
|
8 |
+
from ._hypotests import _get_wilcoxon_distr
|
9 |
+
from scipy._lib._util import _lazywhere, _get_nan
|
10 |
+
|
11 |
+
|
12 |
+
class WilcoxonDistribution:
|
13 |
+
|
14 |
+
def __init__(self, n):
|
15 |
+
n = np.asarray(n).astype(int, copy=False)
|
16 |
+
self.n = n
|
17 |
+
self._dists = {ni: _get_wilcoxon_distr(ni) for ni in np.unique(n)}
|
18 |
+
|
19 |
+
def _cdf1(self, k, n):
|
20 |
+
pmfs = self._dists[n]
|
21 |
+
return pmfs[:k + 1].sum()
|
22 |
+
|
23 |
+
def _cdf(self, k, n):
|
24 |
+
return np.vectorize(self._cdf1, otypes=[float])(k, n)
|
25 |
+
|
26 |
+
def _sf1(self, k, n):
|
27 |
+
pmfs = self._dists[n]
|
28 |
+
return pmfs[k:].sum()
|
29 |
+
|
30 |
+
def _sf(self, k, n):
|
31 |
+
return np.vectorize(self._sf1, otypes=[float])(k, n)
|
32 |
+
|
33 |
+
def mean(self):
|
34 |
+
return self.n * (self.n + 1) / 4
|
35 |
+
|
36 |
+
def _prep(self, k):
|
37 |
+
k = np.asarray(k).astype(int, copy=False)
|
38 |
+
mn = self.mean()
|
39 |
+
out = np.empty(k.shape, dtype=np.float64)
|
40 |
+
return k, mn, out
|
41 |
+
|
42 |
+
def cdf(self, k):
|
43 |
+
k, mn, out = self._prep(k)
|
44 |
+
return _lazywhere(k <= mn, (k, self.n), self._cdf,
|
45 |
+
f2=lambda k, n: 1 - self._sf(k+1, n))[()]
|
46 |
+
|
47 |
+
def sf(self, k):
|
48 |
+
k, mn, out = self._prep(k)
|
49 |
+
return _lazywhere(k <= mn, (k, self.n), self._sf,
|
50 |
+
f2=lambda k, n: 1 - self._cdf(k-1, n))[()]
|
51 |
+
|
52 |
+
|
53 |
+
def _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis):
|
54 |
+
|
55 |
+
axis = np.asarray(axis)[()]
|
56 |
+
message = "`axis` must be an integer."
|
57 |
+
if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0:
|
58 |
+
raise ValueError(message)
|
59 |
+
|
60 |
+
message = '`axis` must be compatible with the shape(s) of `x` (and `y`)'
|
61 |
+
try:
|
62 |
+
if y is None:
|
63 |
+
x = np.asarray(x)
|
64 |
+
d = x
|
65 |
+
else:
|
66 |
+
x, y = _broadcast_arrays((x, y), axis=axis)
|
67 |
+
d = x - y
|
68 |
+
d = np.moveaxis(d, axis, -1)
|
69 |
+
except np.AxisError as e:
|
70 |
+
raise ValueError(message) from e
|
71 |
+
|
72 |
+
message = "`x` and `y` must have the same length along `axis`."
|
73 |
+
if y is not None and x.shape[axis] != y.shape[axis]:
|
74 |
+
raise ValueError(message)
|
75 |
+
|
76 |
+
message = "`x` (and `y`, if provided) must be an array of real numbers."
|
77 |
+
if np.issubdtype(d.dtype, np.integer):
|
78 |
+
d = d.astype(np.float64)
|
79 |
+
if not np.issubdtype(d.dtype, np.floating):
|
80 |
+
raise ValueError(message)
|
81 |
+
|
82 |
+
zero_method = str(zero_method).lower()
|
83 |
+
zero_methods = {"wilcox", "pratt", "zsplit"}
|
84 |
+
message = f"`zero_method` must be one of {zero_methods}."
|
85 |
+
if zero_method not in zero_methods:
|
86 |
+
raise ValueError(message)
|
87 |
+
|
88 |
+
corrections = {True, False}
|
89 |
+
message = f"`correction` must be one of {corrections}."
|
90 |
+
if correction not in corrections:
|
91 |
+
raise ValueError(message)
|
92 |
+
|
93 |
+
alternative = str(alternative).lower()
|
94 |
+
alternatives = {"two-sided", "less", "greater"}
|
95 |
+
message = f"`alternative` must be one of {alternatives}."
|
96 |
+
if alternative not in alternatives:
|
97 |
+
raise ValueError(message)
|
98 |
+
|
99 |
+
if not isinstance(method, stats.PermutationMethod):
|
100 |
+
methods = {"auto", "approx", "exact"}
|
101 |
+
message = (f"`method` must be one of {methods} or "
|
102 |
+
"an instance of `stats.PermutationMethod`.")
|
103 |
+
if method not in methods:
|
104 |
+
raise ValueError(message)
|
105 |
+
|
106 |
+
# logic unchanged here for backward compatibility
|
107 |
+
n_zero = np.sum(d == 0, axis=-1)
|
108 |
+
has_zeros = np.any(n_zero > 0)
|
109 |
+
if method == "auto":
|
110 |
+
if d.shape[-1] <= 50 and not has_zeros:
|
111 |
+
method = "exact"
|
112 |
+
else:
|
113 |
+
method = "approx"
|
114 |
+
|
115 |
+
n_zero = np.sum(d == 0)
|
116 |
+
if n_zero > 0 and method == "exact":
|
117 |
+
method = "approx"
|
118 |
+
warnings.warn("Exact p-value calculation does not work if there are "
|
119 |
+
"zeros. Switching to normal approximation.",
|
120 |
+
stacklevel=2)
|
121 |
+
|
122 |
+
if (method == "approx" and zero_method in ["wilcox", "pratt"]
|
123 |
+
and n_zero == d.size and d.size > 0 and d.ndim == 1):
|
124 |
+
raise ValueError("zero_method 'wilcox' and 'pratt' do not "
|
125 |
+
"work if x - y is zero for all elements.")
|
126 |
+
|
127 |
+
if 0 < d.shape[-1] < 10 and method == "approx":
|
128 |
+
warnings.warn("Sample size too small for normal approximation.", stacklevel=2)
|
129 |
+
|
130 |
+
return d, zero_method, correction, alternative, method, axis
|
131 |
+
|
132 |
+
|
133 |
+
def _wilcoxon_statistic(d, zero_method='wilcox'):
|
134 |
+
|
135 |
+
i_zeros = (d == 0)
|
136 |
+
|
137 |
+
if zero_method == 'wilcox':
|
138 |
+
# Wilcoxon's method for treating zeros was to remove them from
|
139 |
+
# the calculation. We do this by replacing 0s with NaNs, which
|
140 |
+
# are ignored anyway.
|
141 |
+
if not d.flags['WRITEABLE']:
|
142 |
+
d = d.copy()
|
143 |
+
d[i_zeros] = np.nan
|
144 |
+
|
145 |
+
i_nan = np.isnan(d)
|
146 |
+
n_nan = np.sum(i_nan, axis=-1)
|
147 |
+
count = d.shape[-1] - n_nan
|
148 |
+
|
149 |
+
r, t = _rankdata(abs(d), 'average', return_ties=True)
|
150 |
+
|
151 |
+
r_plus = np.sum((d > 0) * r, axis=-1)
|
152 |
+
r_minus = np.sum((d < 0) * r, axis=-1)
|
153 |
+
|
154 |
+
if zero_method == "zsplit":
|
155 |
+
# The "zero-split" method for treating zeros is to add half their contribution
|
156 |
+
# to r_plus and half to r_minus.
|
157 |
+
# See gh-2263 for the origin of this method.
|
158 |
+
r_zero_2 = np.sum(i_zeros * r, axis=-1) / 2
|
159 |
+
r_plus += r_zero_2
|
160 |
+
r_minus += r_zero_2
|
161 |
+
|
162 |
+
mn = count * (count + 1.) * 0.25
|
163 |
+
se = count * (count + 1.) * (2. * count + 1.)
|
164 |
+
|
165 |
+
if zero_method == "pratt":
|
166 |
+
# Pratt's method for treating zeros was just to modify the z-statistic.
|
167 |
+
|
168 |
+
# normal approximation needs to be adjusted, see Cureton (1967)
|
169 |
+
n_zero = i_zeros.sum(axis=-1)
|
170 |
+
mn -= n_zero * (n_zero + 1.) * 0.25
|
171 |
+
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
|
172 |
+
|
173 |
+
# zeros are not to be included in tie-correction.
|
174 |
+
# any tie counts corresponding with zeros are in the 0th column
|
175 |
+
t[i_zeros.any(axis=-1), 0] = 0
|
176 |
+
|
177 |
+
tie_correct = (t**3 - t).sum(axis=-1)
|
178 |
+
se -= tie_correct/2
|
179 |
+
se = np.sqrt(se / 24)
|
180 |
+
|
181 |
+
z = (r_plus - mn) / se
|
182 |
+
|
183 |
+
return r_plus, r_minus, se, z, count
|
184 |
+
|
185 |
+
|
186 |
+
def _correction_sign(z, alternative):
|
187 |
+
if alternative == 'greater':
|
188 |
+
return 1
|
189 |
+
elif alternative == 'less':
|
190 |
+
return -1
|
191 |
+
else:
|
192 |
+
return np.sign(z)
|
193 |
+
|
194 |
+
|
195 |
+
def _wilcoxon_nd(x, y=None, zero_method='wilcox', correction=True,
|
196 |
+
alternative='two-sided', method='auto', axis=0):
|
197 |
+
|
198 |
+
temp = _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis)
|
199 |
+
d, zero_method, correction, alternative, method, axis = temp
|
200 |
+
|
201 |
+
if d.size == 0:
|
202 |
+
NaN = _get_nan(d)
|
203 |
+
res = _morestats.WilcoxonResult(statistic=NaN, pvalue=NaN)
|
204 |
+
if method == 'approx':
|
205 |
+
res.zstatistic = NaN
|
206 |
+
return res
|
207 |
+
|
208 |
+
r_plus, r_minus, se, z, count = _wilcoxon_statistic(d, zero_method)
|
209 |
+
|
210 |
+
if method == 'approx':
|
211 |
+
if correction:
|
212 |
+
sign = _correction_sign(z, alternative)
|
213 |
+
z -= sign * 0.5 / se
|
214 |
+
p = _get_pvalue(z, stats.norm, alternative)
|
215 |
+
elif method == 'exact':
|
216 |
+
dist = WilcoxonDistribution(count)
|
217 |
+
if alternative == 'less':
|
218 |
+
p = dist.cdf(r_plus)
|
219 |
+
elif alternative == 'greater':
|
220 |
+
p = dist.sf(r_plus)
|
221 |
+
else:
|
222 |
+
p = 2 * np.minimum(dist.sf(r_plus), dist.cdf(r_plus))
|
223 |
+
p = np.clip(p, 0, 1)
|
224 |
+
else: # `PermutationMethod` instance (already validated)
|
225 |
+
p = stats.permutation_test(
|
226 |
+
(d,), lambda d: _wilcoxon_statistic(d, zero_method)[0],
|
227 |
+
permutation_type='samples', **method._asdict(),
|
228 |
+
alternative=alternative, axis=-1).pvalue
|
229 |
+
|
230 |
+
# for backward compatibility...
|
231 |
+
statistic = np.minimum(r_plus, r_minus) if alternative=='two-sided' else r_plus
|
232 |
+
z = -np.abs(z) if (alternative == 'two-sided' and method == 'approx') else z
|
233 |
+
|
234 |
+
res = _morestats.WilcoxonResult(statistic=statistic, pvalue=p[()])
|
235 |
+
if method == 'approx':
|
236 |
+
res.zstatistic = z[()]
|
237 |
+
return res
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (190 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc
ADDED
Binary file (34 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc
ADDED
Binary file (16.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc
ADDED
Binary file (6.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc
ADDED
Binary file (5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc
ADDED
Binary file (27.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_fit_censored.cpython-310.pyc
ADDED
Binary file (22.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc
ADDED
Binary file (3.87 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc
ADDED
Binary file (14 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc
ADDED
Binary file (12.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc
ADDED
Binary file (30.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc
ADDED
Binary file (55.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc
ADDED
Binary file (16.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc
ADDED
Binary file (99.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc
ADDED
Binary file (68.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc
ADDED
Binary file (5.57 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc
ADDED
Binary file (112 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc
ADDED
Binary file (44.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc
ADDED
Binary file (52.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc
ADDED
Binary file (46.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sensitivity_analysis.cpython-310.pyc
ADDED
Binary file (8.37 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc
ADDED
Binary file (265 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_survival.cpython-310.pyc
ADDED
Binary file (13.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_tukeylambda_stats.cpython-310.pyc
ADDED
Binary file (2.23 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc
ADDED
Binary file (6.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py
ADDED
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import numpy.testing as npt
|
5 |
+
from numpy.testing import assert_allclose, assert_equal
|
6 |
+
from pytest import raises as assert_raises
|
7 |
+
|
8 |
+
import numpy.ma.testutils as ma_npt
|
9 |
+
|
10 |
+
from scipy._lib._util import (
|
11 |
+
getfullargspec_no_self as _getfullargspec, np_long
|
12 |
+
)
|
13 |
+
from scipy import stats
|
14 |
+
|
15 |
+
|
16 |
+
def check_named_results(res, attributes, ma=False):
|
17 |
+
for i, attr in enumerate(attributes):
|
18 |
+
if ma:
|
19 |
+
ma_npt.assert_equal(res[i], getattr(res, attr))
|
20 |
+
else:
|
21 |
+
npt.assert_equal(res[i], getattr(res, attr))
|
22 |
+
|
23 |
+
|
24 |
+
def check_normalization(distfn, args, distname):
|
25 |
+
norm_moment = distfn.moment(0, *args)
|
26 |
+
npt.assert_allclose(norm_moment, 1.0)
|
27 |
+
|
28 |
+
if distname == "rv_histogram_instance":
|
29 |
+
atol, rtol = 1e-5, 0
|
30 |
+
else:
|
31 |
+
atol, rtol = 1e-7, 1e-7
|
32 |
+
|
33 |
+
normalization_expect = distfn.expect(lambda x: 1, args=args)
|
34 |
+
npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol,
|
35 |
+
err_msg=distname, verbose=True)
|
36 |
+
|
37 |
+
_a, _b = distfn.support(*args)
|
38 |
+
normalization_cdf = distfn.cdf(_b, *args)
|
39 |
+
npt.assert_allclose(normalization_cdf, 1.0)
|
40 |
+
|
41 |
+
|
42 |
+
def check_moment(distfn, arg, m, v, msg):
|
43 |
+
m1 = distfn.moment(1, *arg)
|
44 |
+
m2 = distfn.moment(2, *arg)
|
45 |
+
if not np.isinf(m):
|
46 |
+
npt.assert_almost_equal(m1, m, decimal=10,
|
47 |
+
err_msg=msg + ' - 1st moment')
|
48 |
+
else: # or np.isnan(m1),
|
49 |
+
npt.assert_(np.isinf(m1),
|
50 |
+
msg + ' - 1st moment -infinite, m1=%s' % str(m1))
|
51 |
+
|
52 |
+
if not np.isinf(v):
|
53 |
+
npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10,
|
54 |
+
err_msg=msg + ' - 2ndt moment')
|
55 |
+
else: # or np.isnan(m2),
|
56 |
+
npt.assert_(np.isinf(m2), msg + f' - 2nd moment -infinite, {m2=}')
|
57 |
+
|
58 |
+
|
59 |
+
def check_mean_expect(distfn, arg, m, msg):
|
60 |
+
if np.isfinite(m):
|
61 |
+
m1 = distfn.expect(lambda x: x, arg)
|
62 |
+
npt.assert_almost_equal(m1, m, decimal=5,
|
63 |
+
err_msg=msg + ' - 1st moment (expect)')
|
64 |
+
|
65 |
+
|
66 |
+
def check_var_expect(distfn, arg, m, v, msg):
|
67 |
+
dist_looser_tolerances = {"rv_histogram_instance" , "ksone"}
|
68 |
+
kwargs = {'rtol': 5e-6} if msg in dist_looser_tolerances else {}
|
69 |
+
if np.isfinite(v):
|
70 |
+
m2 = distfn.expect(lambda x: x*x, arg)
|
71 |
+
npt.assert_allclose(m2, v + m*m, **kwargs)
|
72 |
+
|
73 |
+
|
74 |
+
def check_skew_expect(distfn, arg, m, v, s, msg):
|
75 |
+
if np.isfinite(s):
|
76 |
+
m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
|
77 |
+
npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
|
78 |
+
decimal=5, err_msg=msg + ' - skew')
|
79 |
+
else:
|
80 |
+
npt.assert_(np.isnan(s))
|
81 |
+
|
82 |
+
|
83 |
+
def check_kurt_expect(distfn, arg, m, v, k, msg):
|
84 |
+
if np.isfinite(k):
|
85 |
+
m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
|
86 |
+
npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2),
|
87 |
+
atol=1e-5, rtol=1e-5,
|
88 |
+
err_msg=msg + ' - kurtosis')
|
89 |
+
elif not np.isposinf(k):
|
90 |
+
npt.assert_(np.isnan(k))
|
91 |
+
|
92 |
+
|
93 |
+
def check_munp_expect(dist, args, msg):
|
94 |
+
# If _munp is overridden, test a higher moment. (Before gh-18634, some
|
95 |
+
# distributions had issues with moments 5 and higher.)
|
96 |
+
if dist._munp.__func__ != stats.rv_continuous._munp:
|
97 |
+
res = dist.moment(5, *args) # shouldn't raise an error
|
98 |
+
ref = dist.expect(lambda x: x ** 5, args, lb=-np.inf, ub=np.inf)
|
99 |
+
if not np.isfinite(res): # could be valid; automated test can't know
|
100 |
+
return
|
101 |
+
# loose tolerance, mostly to see whether _munp returns *something*
|
102 |
+
assert_allclose(res, ref, atol=1e-10, rtol=1e-4,
|
103 |
+
err_msg=msg + ' - higher moment / _munp')
|
104 |
+
|
105 |
+
|
106 |
+
def check_entropy(distfn, arg, msg):
|
107 |
+
ent = distfn.entropy(*arg)
|
108 |
+
npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
|
109 |
+
|
110 |
+
|
111 |
+
def check_private_entropy(distfn, args, superclass):
|
112 |
+
# compare a generic _entropy with the distribution-specific implementation
|
113 |
+
npt.assert_allclose(distfn._entropy(*args),
|
114 |
+
superclass._entropy(distfn, *args))
|
115 |
+
|
116 |
+
|
117 |
+
def check_entropy_vect_scale(distfn, arg):
|
118 |
+
# check 2-d
|
119 |
+
sc = np.asarray([[1, 2], [3, 4]])
|
120 |
+
v_ent = distfn.entropy(*arg, scale=sc)
|
121 |
+
s_ent = [distfn.entropy(*arg, scale=s) for s in sc.ravel()]
|
122 |
+
s_ent = np.asarray(s_ent).reshape(v_ent.shape)
|
123 |
+
assert_allclose(v_ent, s_ent, atol=1e-14)
|
124 |
+
|
125 |
+
# check invalid value, check cast
|
126 |
+
sc = [1, 2, -3]
|
127 |
+
v_ent = distfn.entropy(*arg, scale=sc)
|
128 |
+
s_ent = [distfn.entropy(*arg, scale=s) for s in sc]
|
129 |
+
s_ent = np.asarray(s_ent).reshape(v_ent.shape)
|
130 |
+
assert_allclose(v_ent, s_ent, atol=1e-14)
|
131 |
+
|
132 |
+
|
133 |
+
def check_edge_support(distfn, args):
|
134 |
+
# Make sure that x=self.a and self.b are handled correctly.
|
135 |
+
x = distfn.support(*args)
|
136 |
+
if isinstance(distfn, stats.rv_discrete):
|
137 |
+
x = x[0]-1, x[1]
|
138 |
+
|
139 |
+
npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0])
|
140 |
+
npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0])
|
141 |
+
|
142 |
+
if distfn.name not in ('skellam', 'dlaplace'):
|
143 |
+
# with a = -inf, log(0) generates warnings
|
144 |
+
npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0])
|
145 |
+
npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf])
|
146 |
+
|
147 |
+
npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x)
|
148 |
+
npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1])
|
149 |
+
|
150 |
+
# out-of-bounds for isf & ppf
|
151 |
+
npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all())
|
152 |
+
npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all())
|
153 |
+
|
154 |
+
|
155 |
+
def check_named_args(distfn, x, shape_args, defaults, meths):
|
156 |
+
## Check calling w/ named arguments.
|
157 |
+
|
158 |
+
# check consistency of shapes, numargs and _parse signature
|
159 |
+
signature = _getfullargspec(distfn._parse_args)
|
160 |
+
npt.assert_(signature.varargs is None)
|
161 |
+
npt.assert_(signature.varkw is None)
|
162 |
+
npt.assert_(not signature.kwonlyargs)
|
163 |
+
npt.assert_(list(signature.defaults) == list(defaults))
|
164 |
+
|
165 |
+
shape_argnames = signature.args[:-len(defaults)] # a, b, loc=0, scale=1
|
166 |
+
if distfn.shapes:
|
167 |
+
shapes_ = distfn.shapes.replace(',', ' ').split()
|
168 |
+
else:
|
169 |
+
shapes_ = ''
|
170 |
+
npt.assert_(len(shapes_) == distfn.numargs)
|
171 |
+
npt.assert_(len(shapes_) == len(shape_argnames))
|
172 |
+
|
173 |
+
# check calling w/ named arguments
|
174 |
+
shape_args = list(shape_args)
|
175 |
+
|
176 |
+
vals = [meth(x, *shape_args) for meth in meths]
|
177 |
+
npt.assert_(np.all(np.isfinite(vals)))
|
178 |
+
|
179 |
+
names, a, k = shape_argnames[:], shape_args[:], {}
|
180 |
+
while names:
|
181 |
+
k.update({names.pop(): a.pop()})
|
182 |
+
v = [meth(x, *a, **k) for meth in meths]
|
183 |
+
npt.assert_array_equal(vals, v)
|
184 |
+
if 'n' not in k.keys():
|
185 |
+
# `n` is first parameter of moment(), so can't be used as named arg
|
186 |
+
npt.assert_equal(distfn.moment(1, *a, **k),
|
187 |
+
distfn.moment(1, *shape_args))
|
188 |
+
|
189 |
+
# unknown arguments should not go through:
|
190 |
+
k.update({'kaboom': 42})
|
191 |
+
assert_raises(TypeError, distfn.cdf, x, **k)
|
192 |
+
|
193 |
+
|
194 |
+
def check_random_state_property(distfn, args):
|
195 |
+
# check the random_state attribute of a distribution *instance*
|
196 |
+
|
197 |
+
# This test fiddles with distfn.random_state. This breaks other tests,
|
198 |
+
# hence need to save it and then restore.
|
199 |
+
rndm = distfn.random_state
|
200 |
+
|
201 |
+
# baseline: this relies on the global state
|
202 |
+
np.random.seed(1234)
|
203 |
+
distfn.random_state = None
|
204 |
+
r0 = distfn.rvs(*args, size=8)
|
205 |
+
|
206 |
+
# use an explicit instance-level random_state
|
207 |
+
distfn.random_state = 1234
|
208 |
+
r1 = distfn.rvs(*args, size=8)
|
209 |
+
npt.assert_equal(r0, r1)
|
210 |
+
|
211 |
+
distfn.random_state = np.random.RandomState(1234)
|
212 |
+
r2 = distfn.rvs(*args, size=8)
|
213 |
+
npt.assert_equal(r0, r2)
|
214 |
+
|
215 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
216 |
+
if hasattr(np.random, 'default_rng'):
|
217 |
+
# obtain a np.random.Generator object
|
218 |
+
rng = np.random.default_rng(1234)
|
219 |
+
distfn.rvs(*args, size=1, random_state=rng)
|
220 |
+
|
221 |
+
# can override the instance-level random_state for an individual .rvs call
|
222 |
+
distfn.random_state = 2
|
223 |
+
orig_state = distfn.random_state.get_state()
|
224 |
+
|
225 |
+
r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234))
|
226 |
+
npt.assert_equal(r0, r3)
|
227 |
+
|
228 |
+
# ... and that does not alter the instance-level random_state!
|
229 |
+
npt.assert_equal(distfn.random_state.get_state(), orig_state)
|
230 |
+
|
231 |
+
# finally, restore the random_state
|
232 |
+
distfn.random_state = rndm
|
233 |
+
|
234 |
+
|
235 |
+
def check_meth_dtype(distfn, arg, meths):
|
236 |
+
q0 = [0.25, 0.5, 0.75]
|
237 |
+
x0 = distfn.ppf(q0, *arg)
|
238 |
+
x_cast = [x0.astype(tp) for tp in (np_long, np.float16, np.float32,
|
239 |
+
np.float64)]
|
240 |
+
|
241 |
+
for x in x_cast:
|
242 |
+
# casting may have clipped the values, exclude those
|
243 |
+
distfn._argcheck(*arg)
|
244 |
+
x = x[(distfn.a < x) & (x < distfn.b)]
|
245 |
+
for meth in meths:
|
246 |
+
val = meth(x, *arg)
|
247 |
+
npt.assert_(val.dtype == np.float64)
|
248 |
+
|
249 |
+
|
250 |
+
def check_ppf_dtype(distfn, arg):
|
251 |
+
q0 = np.asarray([0.25, 0.5, 0.75])
|
252 |
+
q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)]
|
253 |
+
for q in q_cast:
|
254 |
+
for meth in [distfn.ppf, distfn.isf]:
|
255 |
+
val = meth(q, *arg)
|
256 |
+
npt.assert_(val.dtype == np.float64)
|
257 |
+
|
258 |
+
|
259 |
+
def check_cmplx_deriv(distfn, arg):
|
260 |
+
# Distributions allow complex arguments.
|
261 |
+
def deriv(f, x, *arg):
|
262 |
+
x = np.asarray(x)
|
263 |
+
h = 1e-10
|
264 |
+
return (f(x + h*1j, *arg)/h).imag
|
265 |
+
|
266 |
+
x0 = distfn.ppf([0.25, 0.51, 0.75], *arg)
|
267 |
+
x_cast = [x0.astype(tp) for tp in (np_long, np.float16, np.float32,
|
268 |
+
np.float64)]
|
269 |
+
|
270 |
+
for x in x_cast:
|
271 |
+
# casting may have clipped the values, exclude those
|
272 |
+
distfn._argcheck(*arg)
|
273 |
+
x = x[(distfn.a < x) & (x < distfn.b)]
|
274 |
+
|
275 |
+
pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg)
|
276 |
+
assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5)
|
277 |
+
assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5)
|
278 |
+
|
279 |
+
assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5)
|
280 |
+
assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5)
|
281 |
+
|
282 |
+
assert_allclose(deriv(distfn.logpdf, x, *arg),
|
283 |
+
deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg),
|
284 |
+
rtol=1e-5)
|
285 |
+
|
286 |
+
|
287 |
+
def check_pickling(distfn, args):
|
288 |
+
# check that a distribution instance pickles and unpickles
|
289 |
+
# pay special attention to the random_state property
|
290 |
+
|
291 |
+
# save the random_state (restore later)
|
292 |
+
rndm = distfn.random_state
|
293 |
+
|
294 |
+
# check unfrozen
|
295 |
+
distfn.random_state = 1234
|
296 |
+
distfn.rvs(*args, size=8)
|
297 |
+
s = pickle.dumps(distfn)
|
298 |
+
r0 = distfn.rvs(*args, size=8)
|
299 |
+
|
300 |
+
unpickled = pickle.loads(s)
|
301 |
+
r1 = unpickled.rvs(*args, size=8)
|
302 |
+
npt.assert_equal(r0, r1)
|
303 |
+
|
304 |
+
# also smoke test some methods
|
305 |
+
medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)]
|
306 |
+
npt.assert_equal(medians[0], medians[1])
|
307 |
+
npt.assert_equal(distfn.cdf(medians[0], *args),
|
308 |
+
unpickled.cdf(medians[1], *args))
|
309 |
+
|
310 |
+
# check frozen pickling/unpickling with rvs
|
311 |
+
frozen_dist = distfn(*args)
|
312 |
+
pkl = pickle.dumps(frozen_dist)
|
313 |
+
unpickled = pickle.loads(pkl)
|
314 |
+
|
315 |
+
r0 = frozen_dist.rvs(size=8)
|
316 |
+
r1 = unpickled.rvs(size=8)
|
317 |
+
npt.assert_equal(r0, r1)
|
318 |
+
|
319 |
+
# check pickling/unpickling of .fit method
|
320 |
+
if hasattr(distfn, "fit"):
|
321 |
+
fit_function = distfn.fit
|
322 |
+
pickled_fit_function = pickle.dumps(fit_function)
|
323 |
+
unpickled_fit_function = pickle.loads(pickled_fit_function)
|
324 |
+
assert fit_function.__name__ == unpickled_fit_function.__name__ == "fit"
|
325 |
+
|
326 |
+
# restore the random_state
|
327 |
+
distfn.random_state = rndm
|
328 |
+
|
329 |
+
|
330 |
+
def check_freezing(distfn, args):
|
331 |
+
# regression test for gh-11089: freezing a distribution fails
|
332 |
+
# if loc and/or scale are specified
|
333 |
+
if isinstance(distfn, stats.rv_continuous):
|
334 |
+
locscale = {'loc': 1, 'scale': 2}
|
335 |
+
else:
|
336 |
+
locscale = {'loc': 1}
|
337 |
+
|
338 |
+
rv = distfn(*args, **locscale)
|
339 |
+
assert rv.a == distfn(*args).a
|
340 |
+
assert rv.b == distfn(*args).b
|
341 |
+
|
342 |
+
|
343 |
+
def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype):
|
344 |
+
np.random.seed(123)
|
345 |
+
sample = distfunc.rvs(*allargs)
|
346 |
+
assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname)
|
347 |
+
if not shape_only:
|
348 |
+
rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype)
|
349 |
+
np.random.seed(123)
|
350 |
+
expected = rvs(*allargs)
|
351 |
+
assert_allclose(sample, expected, rtol=1e-13)
|
llmeval-env/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc
ADDED
Binary file (4.05 kB). View file
|
|