diff --git a/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..919d342e4984b7e4919d9787592c2858c92deba7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2499dee742d6c4650f7702856b9e26785b15c99ae5059300b818076b64439310 +size 9372 diff --git a/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..34bb673ec2830747255b6fe535ff8d900e238baf --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6e85e9f611cc866b81ea8aa8bcb7296f7d76c074e5f67845124bbe9d5c2ab6f +size 9387 diff --git a/ckpts/universal/global_step40/zero/24.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/24.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2d0bd609759be7faef99d812c992c8231cd0341f --- /dev/null +++ b/ckpts/universal/global_step40/zero/24.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e7e6fa8ccb9cc938eef23aaabb6fa7fa27a87928118f22a4289a5cdb895c8e1 +size 16778317 diff --git a/ckpts/universal/global_step40/zero/8.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/8.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5683b9e2e461f37b8240ddd78720110f4bbc7861 --- /dev/null +++ b/ckpts/universal/global_step40/zero/8.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17cea6275bb3db06ea0a6bea04b88be865cf24394828603fe67243c1af874849 +size 9372 diff --git a/ckpts/universal/global_step40/zero/8.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/8.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b46750264efea3e5de284bb62ed4ae93756bcfd2 --- /dev/null +++ b/ckpts/universal/global_step40/zero/8.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d73f7b716570ed0cfc63d123110f9d5873e53868510d5511d8b75b2bf3cc7e0 +size 9387 diff --git a/ckpts/universal/global_step40/zero/8.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/8.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..26ec95f541e2a186dfde4c31c5414e46210e4e37 --- /dev/null +++ b/ckpts/universal/global_step40/zero/8.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:277b57f89c06ff0b973db615ac858f9a0783bf79da84adb04b4d997e05764998 +size 9293 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ade6ef216cee77d3768603236d06c75d12cf0854 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_entropy.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_entropy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88685c8c786de3d300e6acc45c41d26986cec524 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_entropy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e95db63dfce498532be68e06712a704ffa8518 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b9cb5f0878414be47fe47905edab767f49a512f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bcaf4a5415ea8b11193e27320b7c91311b11af8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/morestats.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/morestats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28548626ccc669d3b646fdcd082918bceca05ed8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/morestats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/stats.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5851448e0544e1c765af7a1331911835604a088 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/stats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/__init__.py b/venv/lib/python3.10/site-packages/scipy/stats/_boost/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..681694192be623c0920955fd1bf8573e516060ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_boost/__init__.py @@ -0,0 +1,53 @@ +from scipy.stats._boost.beta_ufunc import ( + _beta_pdf, _beta_cdf, _beta_sf, _beta_ppf, + _beta_isf, _beta_mean, _beta_variance, + _beta_skewness, _beta_kurtosis_excess, +) + +from scipy.stats._boost.binom_ufunc import ( + _binom_pdf, _binom_cdf, _binom_sf, _binom_ppf, + _binom_isf, _binom_mean, _binom_variance, + _binom_skewness, _binom_kurtosis_excess, +) + +from scipy.stats._boost.nbinom_ufunc import ( + _nbinom_pdf, _nbinom_cdf, _nbinom_sf, _nbinom_ppf, + _nbinom_isf, _nbinom_mean, _nbinom_variance, + _nbinom_skewness, _nbinom_kurtosis_excess, +) + +from scipy.stats._boost.hypergeom_ufunc import ( + _hypergeom_pdf, _hypergeom_cdf, _hypergeom_sf, _hypergeom_ppf, + _hypergeom_isf, _hypergeom_mean, _hypergeom_variance, + _hypergeom_skewness, _hypergeom_kurtosis_excess, +) + +from scipy.stats._boost.ncf_ufunc import ( + _ncf_pdf, _ncf_cdf, _ncf_sf, _ncf_ppf, + _ncf_isf, _ncf_mean, _ncf_variance, + _ncf_skewness, _ncf_kurtosis_excess, +) + +from scipy.stats._boost.ncx2_ufunc import ( + _ncx2_pdf, _ncx2_cdf, _ncx2_sf, _ncx2_ppf, + _ncx2_isf, _ncx2_mean, _ncx2_variance, + _ncx2_skewness, _ncx2_kurtosis_excess, +) + +from scipy.stats._boost.nct_ufunc import ( + _nct_pdf, _nct_cdf, _nct_sf, _nct_ppf, + _nct_isf, _nct_mean, _nct_variance, + _nct_skewness, _nct_kurtosis_excess, +) + +from scipy.stats._boost.skewnorm_ufunc import ( + _skewnorm_pdf, _skewnorm_cdf, _skewnorm_sf, _skewnorm_ppf, + _skewnorm_isf, _skewnorm_mean, _skewnorm_variance, + _skewnorm_skewness, _skewnorm_kurtosis_excess, +) + +from scipy.stats._boost.invgauss_ufunc import ( + _invgauss_pdf, _invgauss_cdf, _invgauss_sf, _invgauss_ppf, + _invgauss_isf, _invgauss_mean, _invgauss_variance, + _invgauss_skewness, _invgauss_kurtosis_excess, +) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56cc7ebf44b4d0746838c920e1094602270fb910 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c6fc8abdce1ed48fbaeee945246207ad81718f4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d0829c2805ac1d180b68bc5f404e6397c46ef499 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bec1f11d81efbd225fedf58f0c65b4fea6580e0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1598e2cfd2412211079be6328b5544d4ca17d63f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/nbinom_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/nbinom_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5e08881cfbdf9a548185c791e3bc057902e8fd83 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/nbinom_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..97edc4cc3a5742b6bd3661d41c9146562dce7e4c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d0dabbae8983dad265c6cec30ee543d0698dc0d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/ncx2_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/ncx2_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b6b23eef7d8139e4f34b600e5a9d6c4aa23f7b9a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/ncx2_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7f44e7c4141468c10758e95f6de1b33f62fcda13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/__init__.py b/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13d143e3dd562e76d75c3517d2d999fcf5640c8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/__init__.py @@ -0,0 +1,1224 @@ +# + +import warnings +from functools import partial + +import numpy as np + +from scipy import optimize +from scipy import integrate +from scipy.integrate._quadrature import _builtincoeffs +from scipy import interpolate +from scipy.interpolate import RectBivariateSpline +import scipy.special as sc +from scipy._lib._util import _lazywhere +from .._distn_infrastructure import rv_continuous, _ShapeInfo +from .._continuous_distns import uniform, expon, _norm_pdf, _norm_cdf +from .levyst import Nolan +from scipy._lib.doccer import inherit_docstring_from + + +__all__ = ["levy_stable", "levy_stable_gen", "pdf_from_cf_with_fft"] + +# Stable distributions are known for various parameterisations +# some being advantageous for numerical considerations and others +# useful due to their location/scale awareness. +# +# Here we follow [NO] convention (see the references in the docstring +# for levy_stable_gen below). +# +# S0 / Z0 / x0 (aka Zoleterav's M) +# S1 / Z1 / x1 +# +# Where S* denotes parameterisation, Z* denotes standardized +# version where gamma = 1, delta = 0 and x* denotes variable. +# +# Scipy's original Stable was a random variate generator. It +# uses S1 and unfortunately is not a location/scale aware. + + +# default numerical integration tolerance +# used for epsrel in piecewise and both epsrel and epsabs in dni +# (epsabs needed in dni since weighted quad requires epsabs > 0) +_QUAD_EPS = 1.2e-14 + + +def _Phi_Z0(alpha, t): + return ( + -np.tan(np.pi * alpha / 2) * (np.abs(t) ** (1 - alpha) - 1) + if alpha != 1 + else -2.0 * np.log(np.abs(t)) / np.pi + ) + + +def _Phi_Z1(alpha, t): + return ( + np.tan(np.pi * alpha / 2) + if alpha != 1 + else -2.0 * np.log(np.abs(t)) / np.pi + ) + + +def _cf(Phi, t, alpha, beta): + """Characteristic function.""" + return np.exp( + -(np.abs(t) ** alpha) * (1 - 1j * beta * np.sign(t) * Phi(alpha, t)) + ) + + +_cf_Z0 = partial(_cf, _Phi_Z0) +_cf_Z1 = partial(_cf, _Phi_Z1) + + +def _pdf_single_value_cf_integrate(Phi, x, alpha, beta, **kwds): + """To improve DNI accuracy convert characteristic function in to real + valued integral using Euler's formula, then exploit cosine symmetry to + change limits to [0, inf). Finally use cosine addition formula to split + into two parts that can be handled by weighted quad pack. + """ + quad_eps = kwds.get("quad_eps", _QUAD_EPS) + + def integrand1(t): + if t == 0: + return 0 + return np.exp(-(t ** alpha)) * ( + np.cos(beta * (t ** alpha) * Phi(alpha, t)) + ) + + def integrand2(t): + if t == 0: + return 0 + return np.exp(-(t ** alpha)) * ( + np.sin(beta * (t ** alpha) * Phi(alpha, t)) + ) + + with np.errstate(invalid="ignore"): + int1, *ret1 = integrate.quad( + integrand1, + 0, + np.inf, + weight="cos", + wvar=x, + limit=1000, + epsabs=quad_eps, + epsrel=quad_eps, + full_output=1, + ) + + int2, *ret2 = integrate.quad( + integrand2, + 0, + np.inf, + weight="sin", + wvar=x, + limit=1000, + epsabs=quad_eps, + epsrel=quad_eps, + full_output=1, + ) + + return (int1 + int2) / np.pi + + +_pdf_single_value_cf_integrate_Z0 = partial( + _pdf_single_value_cf_integrate, _Phi_Z0 +) +_pdf_single_value_cf_integrate_Z1 = partial( + _pdf_single_value_cf_integrate, _Phi_Z1 +) + + +def _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta): + """Round x close to zeta for Nolan's method in [NO].""" + # "8. When |x0-beta*tan(pi*alpha/2)| is small, the + # computations of the density and cumulative have numerical problems. + # The program works around this by setting + # z = beta*tan(pi*alpha/2) when + # |z-beta*tan(pi*alpha/2)| < tol(5)*alpha**(1/alpha). + # (The bound on the right is ad hoc, to get reasonable behavior + # when alpha is small)." + # where tol(5) = 0.5e-2 by default. + # + # We seem to have partially addressed this through re-expression of + # g(theta) here, but it still needs to be used in some extreme cases. + # Perhaps tol(5) = 0.5e-2 could be reduced for our implementation. + if np.abs(x0 - zeta) < x_tol_near_zeta * alpha ** (1 / alpha): + x0 = zeta + return x0 + + +def _nolan_round_difficult_input( + x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one +): + """Round difficult input values for Nolan's method in [NO].""" + + # following Nolan's STABLE, + # "1. When 0 < |alpha-1| < 0.005, the program has numerical problems + # evaluating the pdf and cdf. The current version of the program sets + # alpha=1 in these cases. This approximation is not bad in the S0 + # parameterization." + if np.abs(alpha - 1) < alpha_tol_near_one: + alpha = 1.0 + + # "2. When alpha=1 and |beta| < 0.005, the program has numerical + # problems. The current version sets beta=0." + # We seem to have addressed this through re-expression of g(theta) here + + x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta) + return x0, alpha, beta + + +def _pdf_single_value_piecewise_Z1(x, alpha, beta, **kwds): + # convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M) + # parameterization + + zeta = -beta * np.tan(np.pi * alpha / 2.0) + x0 = x + zeta if alpha != 1 else x + + return _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds) + + +def _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds): + + quad_eps = kwds.get("quad_eps", _QUAD_EPS) + x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005) + alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005) + + zeta = -beta * np.tan(np.pi * alpha / 2.0) + x0, alpha, beta = _nolan_round_difficult_input( + x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one + ) + + # some other known distribution pdfs / analytical cases + # TODO: add more where possible with test coverage, + # eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases + if alpha == 2.0: + # normal + return _norm_pdf(x0 / np.sqrt(2)) / np.sqrt(2) + elif alpha == 0.5 and beta == 1.0: + # levy + # since S(1/2, 1, gamma, delta; ) == + # S(1/2, 1, gamma, gamma + delta; ). + _x = x0 + 1 + if _x <= 0: + return 0 + + return 1 / np.sqrt(2 * np.pi * _x) / _x * np.exp(-1 / (2 * _x)) + elif alpha == 0.5 and beta == 0.0 and x0 != 0: + # analytical solution [HO] + S, C = sc.fresnel([1 / np.sqrt(2 * np.pi * np.abs(x0))]) + arg = 1 / (4 * np.abs(x0)) + return ( + np.sin(arg) * (0.5 - S[0]) + np.cos(arg) * (0.5 - C[0]) + ) / np.sqrt(2 * np.pi * np.abs(x0) ** 3) + elif alpha == 1.0 and beta == 0.0: + # cauchy + return 1 / (1 + x0 ** 2) / np.pi + + return _pdf_single_value_piecewise_post_rounding_Z0( + x0, alpha, beta, quad_eps, x_tol_near_zeta + ) + + +def _pdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps, + x_tol_near_zeta): + """Calculate pdf using Nolan's methods as detailed in [NO].""" + + _nolan = Nolan(alpha, beta, x0) + zeta = _nolan.zeta + xi = _nolan.xi + c2 = _nolan.c2 + g = _nolan.g + + # round x0 to zeta again if needed. zeta was recomputed and may have + # changed due to floating point differences. + # See https://github.com/scipy/scipy/pull/18133 + x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta) + # handle Nolan's initial case logic + if x0 == zeta: + return ( + sc.gamma(1 + 1 / alpha) + * np.cos(xi) + / np.pi + / ((1 + zeta ** 2) ** (1 / alpha / 2)) + ) + elif x0 < zeta: + return _pdf_single_value_piecewise_post_rounding_Z0( + -x0, alpha, -beta, quad_eps, x_tol_near_zeta + ) + + # following Nolan, we may now assume + # x0 > zeta when alpha != 1 + # beta != 0 when alpha == 1 + + # spare calculating integral on null set + # use isclose as macos has fp differences + if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014): + return 0.0 + + def integrand(theta): + # limit any numerical issues leading to g_1 < 0 near theta limits + g_1 = g(theta) + if not np.isfinite(g_1) or g_1 < 0: + g_1 = 0 + return g_1 * np.exp(-g_1) + + with np.errstate(all="ignore"): + peak = optimize.bisect( + lambda t: g(t) - 1, -xi, np.pi / 2, xtol=quad_eps + ) + + # this integrand can be very peaked, so we need to force + # QUADPACK to evaluate the function inside its support + # + + # lastly, we add additional samples at + # ~exp(-100), ~exp(-10), ~exp(-5), ~exp(-1) + # to improve QUADPACK's detection of rapidly descending tail behavior + # (this choice is fairly ad hoc) + tail_points = [ + optimize.bisect(lambda t: g(t) - exp_height, -xi, np.pi / 2) + for exp_height in [100, 10, 5] + # exp_height = 1 is handled by peak + ] + intg_points = [0, peak] + tail_points + intg, *ret = integrate.quad( + integrand, + -xi, + np.pi / 2, + points=intg_points, + limit=100, + epsrel=quad_eps, + epsabs=0, + full_output=1, + ) + + return c2 * intg + + +def _cdf_single_value_piecewise_Z1(x, alpha, beta, **kwds): + # convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M) + # parameterization + + zeta = -beta * np.tan(np.pi * alpha / 2.0) + x0 = x + zeta if alpha != 1 else x + + return _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds) + + +def _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds): + + quad_eps = kwds.get("quad_eps", _QUAD_EPS) + x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005) + alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005) + + zeta = -beta * np.tan(np.pi * alpha / 2.0) + x0, alpha, beta = _nolan_round_difficult_input( + x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one + ) + + # some other known distribution cdfs / analytical cases + # TODO: add more where possible with test coverage, + # eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases + if alpha == 2.0: + # normal + return _norm_cdf(x0 / np.sqrt(2)) + elif alpha == 0.5 and beta == 1.0: + # levy + # since S(1/2, 1, gamma, delta; ) == + # S(1/2, 1, gamma, gamma + delta; ). + _x = x0 + 1 + if _x <= 0: + return 0 + + return sc.erfc(np.sqrt(0.5 / _x)) + elif alpha == 1.0 and beta == 0.0: + # cauchy + return 0.5 + np.arctan(x0) / np.pi + + return _cdf_single_value_piecewise_post_rounding_Z0( + x0, alpha, beta, quad_eps, x_tol_near_zeta + ) + + +def _cdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps, + x_tol_near_zeta): + """Calculate cdf using Nolan's methods as detailed in [NO].""" + _nolan = Nolan(alpha, beta, x0) + zeta = _nolan.zeta + xi = _nolan.xi + c1 = _nolan.c1 + # c2 = _nolan.c2 + c3 = _nolan.c3 + g = _nolan.g + # round x0 to zeta again if needed. zeta was recomputed and may have + # changed due to floating point differences. + # See https://github.com/scipy/scipy/pull/18133 + x0 = _nolan_round_x_near_zeta(x0, alpha, zeta, x_tol_near_zeta) + # handle Nolan's initial case logic + if (alpha == 1 and beta < 0) or x0 < zeta: + # NOTE: Nolan's paper has a typo here! + # He states F(x) = 1 - F(x, alpha, -beta), but this is clearly + # incorrect since F(-infty) would be 1.0 in this case + # Indeed, the alpha != 1, x0 < zeta case is correct here. + return 1 - _cdf_single_value_piecewise_post_rounding_Z0( + -x0, alpha, -beta, quad_eps, x_tol_near_zeta + ) + elif x0 == zeta: + return 0.5 - xi / np.pi + + # following Nolan, we may now assume + # x0 > zeta when alpha != 1 + # beta > 0 when alpha == 1 + + # spare calculating integral on null set + # use isclose as macos has fp differences + if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014): + return c1 + + def integrand(theta): + g_1 = g(theta) + return np.exp(-g_1) + + with np.errstate(all="ignore"): + # shrink supports where required + left_support = -xi + right_support = np.pi / 2 + if alpha > 1: + # integrand(t) monotonic 0 to 1 + if integrand(-xi) != 0.0: + res = optimize.minimize( + integrand, + (-xi,), + method="L-BFGS-B", + bounds=[(-xi, np.pi / 2)], + ) + left_support = res.x[0] + else: + # integrand(t) monotonic 1 to 0 + if integrand(np.pi / 2) != 0.0: + res = optimize.minimize( + integrand, + (np.pi / 2,), + method="L-BFGS-B", + bounds=[(-xi, np.pi / 2)], + ) + right_support = res.x[0] + + intg, *ret = integrate.quad( + integrand, + left_support, + right_support, + points=[left_support, right_support], + limit=100, + epsrel=quad_eps, + epsabs=0, + full_output=1, + ) + + return c1 + c3 * intg + + +def _rvs_Z1(alpha, beta, size=None, random_state=None): + """Simulate random variables using Nolan's methods as detailed in [NO]. + """ + + def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + return ( + 2 + / np.pi + * ( + (np.pi / 2 + bTH) * tanTH + - beta * np.log((np.pi / 2 * W * cosTH) / (np.pi / 2 + bTH)) + ) + ) + + def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + return ( + W + / (cosTH / np.tan(aTH) + np.sin(TH)) + * ((np.cos(aTH) + np.sin(aTH) * tanTH) / W) ** (1.0 / alpha) + ) + + def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + # alpha is not 1 and beta is not 0 + val0 = beta * np.tan(np.pi * alpha / 2) + th0 = np.arctan(val0) / alpha + val3 = W / (cosTH / np.tan(alpha * (th0 + TH)) + np.sin(TH)) + res3 = val3 * ( + ( + np.cos(aTH) + + np.sin(aTH) * tanTH + - val0 * (np.sin(aTH) - np.cos(aTH) * tanTH) + ) + / W + ) ** (1.0 / alpha) + return res3 + + def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): + res = _lazywhere( + beta == 0, + (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W), + beta0func, + f2=otherwise, + ) + return res + + alpha = np.broadcast_to(alpha, size) + beta = np.broadcast_to(beta, size) + TH = uniform.rvs( + loc=-np.pi / 2.0, scale=np.pi, size=size, random_state=random_state + ) + W = expon.rvs(size=size, random_state=random_state) + aTH = alpha * TH + bTH = beta * TH + cosTH = np.cos(TH) + tanTH = np.tan(TH) + res = _lazywhere( + alpha == 1, + (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W), + alpha1func, + f2=alphanot1func, + ) + return res + + +def _fitstart_S0(data): + alpha, beta, delta1, gamma = _fitstart_S1(data) + + # Formulas for mapping parameters in S1 parameterization to + # those in S0 parameterization can be found in [NO]. Note that + # only delta changes. + if alpha != 1: + delta0 = delta1 + beta * gamma * np.tan(np.pi * alpha / 2.0) + else: + delta0 = delta1 + 2 * beta * gamma * np.log(gamma) / np.pi + + return alpha, beta, delta0, gamma + + +def _fitstart_S1(data): + # We follow McCullock 1986 method - Simple Consistent Estimators + # of Stable Distribution Parameters + + # fmt: off + # Table III and IV + nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4, + 5, 6, 8, 10, 15, 25] + nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1] + + # table III - alpha = psi_1(nu_alpha, nu_beta) + alpha_table = np.array([ + [2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000], + [1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924], + [1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829], + [1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745], + [1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676], + [1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547], + [1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438], + [1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318], + [1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150], + [1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973], + [1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874], + [0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769], + [0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691], + [0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597], + [0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]).T + # transpose because interpolation with `RectBivariateSpline` is with + # `nu_beta` as `x` and `nu_alpha` as `y` + + # table IV - beta = psi_2(nu_alpha, nu_beta) + beta_table = np.array([ + [0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000], + [0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000], + [0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000], + [0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000], + [0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000], + [0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000], + [0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000], + [0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000], + [0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195], + [0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917], + [0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759], + [0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596], + [0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482], + [0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362], + [0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]).T + + # Table V and VII + # These are ordered with decreasing `alpha_range`; so we will need to + # reverse them as required by RectBivariateSpline. + alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, + 1, 0.9, 0.8, 0.7, 0.6, 0.5][::-1] + beta_range = [0, 0.25, 0.5, 0.75, 1] + + # Table V - nu_c = psi_3(alpha, beta) + nu_c_table = np.array([ + [1.908, 1.908, 1.908, 1.908, 1.908], + [1.914, 1.915, 1.916, 1.918, 1.921], + [1.921, 1.922, 1.927, 1.936, 1.947], + [1.927, 1.930, 1.943, 1.961, 1.987], + [1.933, 1.940, 1.962, 1.997, 2.043], + [1.939, 1.952, 1.988, 2.045, 2.116], + [1.946, 1.967, 2.022, 2.106, 2.211], + [1.955, 1.984, 2.067, 2.188, 2.333], + [1.965, 2.007, 2.125, 2.294, 2.491], + [1.980, 2.040, 2.205, 2.435, 2.696], + [2.000, 2.085, 2.311, 2.624, 2.973], + [2.040, 2.149, 2.461, 2.886, 3.356], + [2.098, 2.244, 2.676, 3.265, 3.912], + [2.189, 2.392, 3.004, 3.844, 4.775], + [2.337, 2.634, 3.542, 4.808, 6.247], + [2.588, 3.073, 4.534, 6.636, 9.144]])[::-1].T + # transpose because interpolation with `RectBivariateSpline` is with + # `beta` as `x` and `alpha` as `y` + + # Table VII - nu_zeta = psi_5(alpha, beta) + nu_zeta_table = np.array([ + [0, 0.000, 0.000, 0.000, 0.000], + [0, -0.017, -0.032, -0.049, -0.064], + [0, -0.030, -0.061, -0.092, -0.123], + [0, -0.043, -0.088, -0.132, -0.179], + [0, -0.056, -0.111, -0.170, -0.232], + [0, -0.066, -0.134, -0.206, -0.283], + [0, -0.075, -0.154, -0.241, -0.335], + [0, -0.084, -0.173, -0.276, -0.390], + [0, -0.090, -0.192, -0.310, -0.447], + [0, -0.095, -0.208, -0.346, -0.508], + [0, -0.098, -0.223, -0.380, -0.576], + [0, -0.099, -0.237, -0.424, -0.652], + [0, -0.096, -0.250, -0.469, -0.742], + [0, -0.089, -0.262, -0.520, -0.853], + [0, -0.078, -0.272, -0.581, -0.997], + [0, -0.061, -0.279, -0.659, -1.198]])[::-1].T + # fmt: on + + psi_1 = RectBivariateSpline(nu_beta_range, nu_alpha_range, + alpha_table, kx=1, ky=1, s=0) + + def psi_1_1(nu_beta, nu_alpha): + return psi_1(nu_beta, nu_alpha) \ + if nu_beta > 0 else psi_1(-nu_beta, nu_alpha) + + psi_2 = RectBivariateSpline(nu_beta_range, nu_alpha_range, + beta_table, kx=1, ky=1, s=0) + + def psi_2_1(nu_beta, nu_alpha): + return psi_2(nu_beta, nu_alpha) \ + if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha) + + phi_3 = RectBivariateSpline(beta_range, alpha_range, nu_c_table, + kx=1, ky=1, s=0) + + def phi_3_1(beta, alpha): + return phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha) + + phi_5 = RectBivariateSpline(beta_range, alpha_range, nu_zeta_table, + kx=1, ky=1, s=0) + + def phi_5_1(beta, alpha): + return phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha) + + # quantiles + p05 = np.percentile(data, 5) + p50 = np.percentile(data, 50) + p95 = np.percentile(data, 95) + p25 = np.percentile(data, 25) + p75 = np.percentile(data, 75) + + nu_alpha = (p95 - p05) / (p75 - p25) + nu_beta = (p95 + p05 - 2 * p50) / (p95 - p05) + + if nu_alpha >= 2.439: + eps = np.finfo(float).eps + alpha = np.clip(psi_1_1(nu_beta, nu_alpha)[0, 0], eps, 2.) + beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0, 0], -1.0, 1.0) + else: + alpha = 2.0 + beta = np.sign(nu_beta) + c = (p75 - p25) / phi_3_1(beta, alpha)[0, 0] + zeta = p50 + c * phi_5_1(beta, alpha)[0, 0] + delta = zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha != 1. else zeta + + return (alpha, beta, delta, c) + + +class levy_stable_gen(rv_continuous): + r"""A Levy-stable continuous random variable. + + %(before_notes)s + + See Also + -------- + levy, levy_l, cauchy, norm + + Notes + ----- + The distribution for `levy_stable` has characteristic function: + + .. math:: + + \varphi(t, \alpha, \beta, c, \mu) = + e^{it\mu -|ct|^{\alpha}(1-i\beta\operatorname{sign}(t)\Phi(\alpha, t))} + + where two different parameterizations are supported. The first :math:`S_1`: + + .. math:: + + \Phi = \begin{cases} + \tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\ + -{\frac {2}{\pi }}\log |t|&\alpha =1 + \end{cases} + + The second :math:`S_0`: + + .. math:: + + \Phi = \begin{cases} + -\tan \left({\frac {\pi \alpha }{2}}\right)(|ct|^{1-\alpha}-1) + &\alpha \neq 1\\ + -{\frac {2}{\pi }}\log |ct|&\alpha =1 + \end{cases} + + + The probability density function for `levy_stable` is: + + .. math:: + + f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt + + where :math:`-\infty < t < \infty`. This integral does not have a known + closed form. + + `levy_stable` generalizes several distributions. Where possible, they + should be used instead. Specifically, when the shape parameters + assume the values in the table below, the corresponding equivalent + distribution should be used. + + ========= ======== =========== + ``alpha`` ``beta`` Equivalent + ========= ======== =========== + 1/2 -1 `levy_l` + 1/2 1 `levy` + 1 0 `cauchy` + 2 any `norm` (with ``scale=sqrt(2)``) + ========= ======== =========== + + Evaluation of the pdf uses Nolan's piecewise integration approach with the + Zolotarev :math:`M` parameterization by default. There is also the option + to use direct numerical integration of the standard parameterization of the + characteristic function or to evaluate by taking the FFT of the + characteristic function. + + The default method can changed by setting the class variable + ``levy_stable.pdf_default_method`` to one of 'piecewise' for Nolan's + approach, 'dni' for direct numerical integration, or 'fft-simpson' for the + FFT based approach. For the sake of backwards compatibility, the methods + 'best' and 'zolotarev' are equivalent to 'piecewise' and the method + 'quadrature' is equivalent to 'dni'. + + The parameterization can be changed by setting the class variable + ``levy_stable.parameterization`` to either 'S0' or 'S1'. + The default is 'S1'. + + To improve performance of piecewise and direct numerical integration one + can specify ``levy_stable.quad_eps`` (defaults to 1.2e-14). This is used + as both the absolute and relative quadrature tolerance for direct numerical + integration and as the relative quadrature tolerance for the piecewise + method. One can also specify ``levy_stable.piecewise_x_tol_near_zeta`` + (defaults to 0.005) for how close x is to zeta before it is considered the + same as x [NO]. The exact check is + ``abs(x0 - zeta) < piecewise_x_tol_near_zeta*alpha**(1/alpha)``. One can + also specify ``levy_stable.piecewise_alpha_tol_near_one`` (defaults to + 0.005) for how close alpha is to 1 before being considered equal to 1. + + To increase accuracy of FFT calculation one can specify + ``levy_stable.pdf_fft_grid_spacing`` (defaults to 0.001) and + ``pdf_fft_n_points_two_power`` (defaults to None which means a value is + calculated that sufficiently covers the input range). + + Further control over FFT calculation is available by setting + ``pdf_fft_interpolation_degree`` (defaults to 3) for spline order and + ``pdf_fft_interpolation_level`` for determining the number of points to use + in the Newton-Cotes formula when approximating the characteristic function + (considered experimental). + + Evaluation of the cdf uses Nolan's piecewise integration approach with the + Zolatarev :math:`S_0` parameterization by default. There is also the option + to evaluate through integration of an interpolated spline of the pdf + calculated by means of the FFT method. The settings affecting FFT + calculation are the same as for pdf calculation. The default cdf method can + be changed by setting ``levy_stable.cdf_default_method`` to either + 'piecewise' or 'fft-simpson'. For cdf calculations the Zolatarev method is + superior in accuracy, so FFT is disabled by default. + + Fitting estimate uses quantile estimation method in [MC]. MLE estimation of + parameters in fit method uses this quantile estimate initially. Note that + MLE doesn't always converge if using FFT for pdf calculations; this will be + the case if alpha <= 1 where the FFT approach doesn't give good + approximations. + + Any non-missing value for the attribute + ``levy_stable.pdf_fft_min_points_threshold`` will set + ``levy_stable.pdf_default_method`` to 'fft-simpson' if a valid + default method is not otherwise set. + + + + .. warning:: + + For pdf calculations FFT calculation is considered experimental. + + For cdf calculations FFT calculation is considered experimental. Use + Zolatarev's method instead (default). + + The probability density above is defined in the "standardized" form. To + shift and/or scale the distribution use the ``loc`` and ``scale`` + parameters. + Generally ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically + equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with + ``y = (x - loc) / scale``, except in the ``S1`` parameterization if + ``alpha == 1``. In that case ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` + is identically equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with + ``y = (x - loc - 2 * beta * scale * np.log(scale) / np.pi) / scale``. + See [NO2]_ Definition 1.8 for more information. + Note that shifting the location of a distribution + does not make it a "noncentral" distribution. + + References + ---------- + .. [MC] McCulloch, J., 1986. Simple consistent estimators of stable + distribution parameters. Communications in Statistics - Simulation and + Computation 15, 11091136. + .. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method + to compute densities of stable distribution. + .. [NO] Nolan, J., 1997. Numerical Calculation of Stable Densities and + distributions Functions. + .. [NO2] Nolan, J., 2018. Stable Distributions: Models for Heavy Tailed + Data. + .. [HO] Hopcraft, K. I., Jakeman, E., Tanner, R. M. J., 1999. Lévy random + walks with fluctuating step number and multiscale behavior. + + %(example)s + + """ + # Configurable options as class variables + # (accessible from self by attribute lookup). + parameterization = "S1" + pdf_default_method = "piecewise" + cdf_default_method = "piecewise" + quad_eps = _QUAD_EPS + piecewise_x_tol_near_zeta = 0.005 + piecewise_alpha_tol_near_one = 0.005 + pdf_fft_min_points_threshold = None + pdf_fft_grid_spacing = 0.001 + pdf_fft_n_points_two_power = None + pdf_fft_interpolation_level = 3 + pdf_fft_interpolation_degree = 3 + + def _argcheck(self, alpha, beta): + return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1) + + def _shape_info(self): + ialpha = _ShapeInfo("alpha", False, (0, 2), (False, True)) + ibeta = _ShapeInfo("beta", False, (-1, 1), (True, True)) + return [ialpha, ibeta] + + def _parameterization(self): + allowed = ("S0", "S1") + pz = self.parameterization + if pz not in allowed: + raise RuntimeError( + f"Parameterization '{pz}' in supported list: {allowed}" + ) + return pz + + @inherit_docstring_from(rv_continuous) + def rvs(self, *args, **kwds): + X1 = super().rvs(*args, **kwds) + + kwds.pop("discrete", None) + kwds.pop("random_state", None) + (alpha, beta), delta, gamma, size = self._parse_args_rvs(*args, **kwds) + + # shift location for this parameterisation (S1) + X1 = np.where( + alpha == 1.0, X1 + 2 * beta * gamma * np.log(gamma) / np.pi, X1 + ) + + if self._parameterization() == "S0": + return np.where( + alpha == 1.0, + X1 - (beta * 2 * gamma * np.log(gamma) / np.pi), + X1 - gamma * beta * np.tan(np.pi * alpha / 2.0), + ) + elif self._parameterization() == "S1": + return X1 + + def _rvs(self, alpha, beta, size=None, random_state=None): + return _rvs_Z1(alpha, beta, size, random_state) + + @inherit_docstring_from(rv_continuous) + def pdf(self, x, *args, **kwds): + # override base class version to correct + # location for S1 parameterization + if self._parameterization() == "S0": + return super().pdf(x, *args, **kwds) + elif self._parameterization() == "S1": + (alpha, beta), delta, gamma = self._parse_args(*args, **kwds) + if np.all(np.reshape(alpha, (1, -1))[0, :] != 1): + return super().pdf(x, *args, **kwds) + else: + # correct location for this parameterisation + x = np.reshape(x, (1, -1))[0, :] + x, alpha, beta = np.broadcast_arrays(x, alpha, beta) + + data_in = np.dstack((x, alpha, beta))[0] + data_out = np.empty(shape=(len(data_in), 1)) + # group data in unique arrays of alpha, beta pairs + uniq_param_pairs = np.unique(data_in[:, 1:], axis=0) + for pair in uniq_param_pairs: + _alpha, _beta = pair + _delta = ( + delta + 2 * _beta * gamma * np.log(gamma) / np.pi + if _alpha == 1.0 + else delta + ) + data_mask = np.all(data_in[:, 1:] == pair, axis=-1) + _x = data_in[data_mask, 0] + data_out[data_mask] = ( + super() + .pdf(_x, _alpha, _beta, loc=_delta, scale=gamma) + .reshape(len(_x), 1) + ) + output = data_out.T[0] + if output.shape == (1,): + return output[0] + return output + + def _pdf(self, x, alpha, beta): + if self._parameterization() == "S0": + _pdf_single_value_piecewise = _pdf_single_value_piecewise_Z0 + _pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z0 + _cf = _cf_Z0 + elif self._parameterization() == "S1": + _pdf_single_value_piecewise = _pdf_single_value_piecewise_Z1 + _pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z1 + _cf = _cf_Z1 + + x = np.asarray(x).reshape(1, -1)[0, :] + + x, alpha, beta = np.broadcast_arrays(x, alpha, beta) + + data_in = np.dstack((x, alpha, beta))[0] + data_out = np.empty(shape=(len(data_in), 1)) + + pdf_default_method_name = self.pdf_default_method + if pdf_default_method_name in ("piecewise", "best", "zolotarev"): + pdf_single_value_method = _pdf_single_value_piecewise + elif pdf_default_method_name in ("dni", "quadrature"): + pdf_single_value_method = _pdf_single_value_cf_integrate + elif ( + pdf_default_method_name == "fft-simpson" + or self.pdf_fft_min_points_threshold is not None + ): + pdf_single_value_method = None + + pdf_single_value_kwds = { + "quad_eps": self.quad_eps, + "piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta, + "piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one, + } + + fft_grid_spacing = self.pdf_fft_grid_spacing + fft_n_points_two_power = self.pdf_fft_n_points_two_power + fft_interpolation_level = self.pdf_fft_interpolation_level + fft_interpolation_degree = self.pdf_fft_interpolation_degree + + # group data in unique arrays of alpha, beta pairs + uniq_param_pairs = np.unique(data_in[:, 1:], axis=0) + for pair in uniq_param_pairs: + data_mask = np.all(data_in[:, 1:] == pair, axis=-1) + data_subset = data_in[data_mask] + if pdf_single_value_method is not None: + data_out[data_mask] = np.array( + [ + pdf_single_value_method( + _x, _alpha, _beta, **pdf_single_value_kwds + ) + for _x, _alpha, _beta in data_subset + ] + ).reshape(len(data_subset), 1) + else: + warnings.warn( + "Density calculations experimental for FFT method." + + " Use combination of piecewise and dni methods instead.", + RuntimeWarning, stacklevel=3, + ) + _alpha, _beta = pair + _x = data_subset[:, (0,)] + + if _alpha < 1.0: + raise RuntimeError( + "FFT method does not work well for alpha less than 1." + ) + + # need enough points to "cover" _x for interpolation + if fft_grid_spacing is None and fft_n_points_two_power is None: + raise ValueError( + "One of fft_grid_spacing or fft_n_points_two_power " + + "needs to be set." + ) + max_abs_x = np.max(np.abs(_x)) + h = ( + 2 ** (3 - fft_n_points_two_power) * max_abs_x + if fft_grid_spacing is None + else fft_grid_spacing + ) + q = ( + np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2 + if fft_n_points_two_power is None + else int(fft_n_points_two_power) + ) + + # for some parameters, the range of x can be quite + # large, let's choose an arbitrary cut off (8GB) to save on + # computer memory. + MAX_Q = 30 + if q > MAX_Q: + raise RuntimeError( + "fft_n_points_two_power has a maximum " + + f"value of {MAX_Q}" + ) + + density_x, density = pdf_from_cf_with_fft( + lambda t: _cf(t, _alpha, _beta), + h=h, + q=q, + level=fft_interpolation_level, + ) + f = interpolate.InterpolatedUnivariateSpline( + density_x, np.real(density), k=fft_interpolation_degree + ) # patch FFT to use cubic + data_out[data_mask] = f(_x) + + return data_out.T[0] + + @inherit_docstring_from(rv_continuous) + def cdf(self, x, *args, **kwds): + # override base class version to correct + # location for S1 parameterization + # NOTE: this is near identical to pdf() above + if self._parameterization() == "S0": + return super().cdf(x, *args, **kwds) + elif self._parameterization() == "S1": + (alpha, beta), delta, gamma = self._parse_args(*args, **kwds) + if np.all(np.reshape(alpha, (1, -1))[0, :] != 1): + return super().cdf(x, *args, **kwds) + else: + # correct location for this parameterisation + x = np.reshape(x, (1, -1))[0, :] + x, alpha, beta = np.broadcast_arrays(x, alpha, beta) + + data_in = np.dstack((x, alpha, beta))[0] + data_out = np.empty(shape=(len(data_in), 1)) + # group data in unique arrays of alpha, beta pairs + uniq_param_pairs = np.unique(data_in[:, 1:], axis=0) + for pair in uniq_param_pairs: + _alpha, _beta = pair + _delta = ( + delta + 2 * _beta * gamma * np.log(gamma) / np.pi + if _alpha == 1.0 + else delta + ) + data_mask = np.all(data_in[:, 1:] == pair, axis=-1) + _x = data_in[data_mask, 0] + data_out[data_mask] = ( + super() + .cdf(_x, _alpha, _beta, loc=_delta, scale=gamma) + .reshape(len(_x), 1) + ) + output = data_out.T[0] + if output.shape == (1,): + return output[0] + return output + + def _cdf(self, x, alpha, beta): + if self._parameterization() == "S0": + _cdf_single_value_piecewise = _cdf_single_value_piecewise_Z0 + _cf = _cf_Z0 + elif self._parameterization() == "S1": + _cdf_single_value_piecewise = _cdf_single_value_piecewise_Z1 + _cf = _cf_Z1 + + x = np.asarray(x).reshape(1, -1)[0, :] + + x, alpha, beta = np.broadcast_arrays(x, alpha, beta) + + data_in = np.dstack((x, alpha, beta))[0] + data_out = np.empty(shape=(len(data_in), 1)) + + cdf_default_method_name = self.cdf_default_method + if cdf_default_method_name == "piecewise": + cdf_single_value_method = _cdf_single_value_piecewise + elif cdf_default_method_name == "fft-simpson": + cdf_single_value_method = None + + cdf_single_value_kwds = { + "quad_eps": self.quad_eps, + "piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta, + "piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one, + } + + fft_grid_spacing = self.pdf_fft_grid_spacing + fft_n_points_two_power = self.pdf_fft_n_points_two_power + fft_interpolation_level = self.pdf_fft_interpolation_level + fft_interpolation_degree = self.pdf_fft_interpolation_degree + + # group data in unique arrays of alpha, beta pairs + uniq_param_pairs = np.unique(data_in[:, 1:], axis=0) + for pair in uniq_param_pairs: + data_mask = np.all(data_in[:, 1:] == pair, axis=-1) + data_subset = data_in[data_mask] + if cdf_single_value_method is not None: + data_out[data_mask] = np.array( + [ + cdf_single_value_method( + _x, _alpha, _beta, **cdf_single_value_kwds + ) + for _x, _alpha, _beta in data_subset + ] + ).reshape(len(data_subset), 1) + else: + warnings.warn( + "Cumulative density calculations experimental for FFT" + + " method. Use piecewise method instead.", + RuntimeWarning, stacklevel=3, + ) + _alpha, _beta = pair + _x = data_subset[:, (0,)] + + # need enough points to "cover" _x for interpolation + if fft_grid_spacing is None and fft_n_points_two_power is None: + raise ValueError( + "One of fft_grid_spacing or fft_n_points_two_power " + + "needs to be set." + ) + max_abs_x = np.max(np.abs(_x)) + h = ( + 2 ** (3 - fft_n_points_two_power) * max_abs_x + if fft_grid_spacing is None + else fft_grid_spacing + ) + q = ( + np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2 + if fft_n_points_two_power is None + else int(fft_n_points_two_power) + ) + + density_x, density = pdf_from_cf_with_fft( + lambda t: _cf(t, _alpha, _beta), + h=h, + q=q, + level=fft_interpolation_level, + ) + f = interpolate.InterpolatedUnivariateSpline( + density_x, np.real(density), k=fft_interpolation_degree + ) + data_out[data_mask] = np.array( + [f.integral(self.a, float(x_1.squeeze())) for x_1 in _x] + ).reshape(data_out[data_mask].shape) + + return data_out.T[0] + + def _fitstart(self, data): + if self._parameterization() == "S0": + _fitstart = _fitstart_S0 + elif self._parameterization() == "S1": + _fitstart = _fitstart_S1 + return _fitstart(data) + + def _stats(self, alpha, beta): + mu = 0 if alpha > 1 else np.nan + mu2 = 2 if alpha == 2 else np.inf + g1 = 0.0 if alpha == 2.0 else np.nan + g2 = 0.0 if alpha == 2.0 else np.nan + return mu, mu2, g1, g2 + + +# cotes numbers - see sequence from http://oeis.org/A100642 +Cotes_table = np.array( + [[], [1]] + [v[2] for v in _builtincoeffs.values()], dtype=object +) +Cotes = np.array( + [ + np.pad(r, (0, len(Cotes_table) - 1 - len(r)), mode='constant') + for r in Cotes_table + ] +) + + +def pdf_from_cf_with_fft(cf, h=0.01, q=9, level=3): + """Calculates pdf from characteristic function. + + Uses fast Fourier transform with Newton-Cotes integration following [WZ]. + Defaults to using Simpson's method (3-point Newton-Cotes integration). + + Parameters + ---------- + cf : callable + Single argument function from float -> complex expressing a + characteristic function for some distribution. + h : Optional[float] + Step size for Newton-Cotes integration. Default: 0.01 + q : Optional[int] + Use 2**q steps when performing Newton-Cotes integration. + The infinite integral in the inverse Fourier transform will then + be restricted to the interval [-2**q * h / 2, 2**q * h / 2]. Setting + the number of steps equal to a power of 2 allows the fft to be + calculated in O(n*log(n)) time rather than O(n**2). + Default: 9 + level : Optional[int] + Calculate integral using n-point Newton-Cotes integration for + n = level. The 3-point Newton-Cotes formula corresponds to Simpson's + rule. Default: 3 + + Returns + ------- + x_l : ndarray + Array of points x at which pdf is estimated. 2**q equally spaced + points from -pi/h up to but not including pi/h. + density : ndarray + Estimated values of pdf corresponding to cf at points in x_l. + + References + ---------- + .. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method + to compute densities of stable distribution. + """ + n = level + N = 2**q + steps = np.arange(0, N) + L = N * h / 2 + x_l = np.pi * (steps - N / 2) / L + if level > 1: + indices = np.arange(n).reshape(n, 1) + s1 = np.sum( + (-1) ** steps * Cotes[n, indices] * np.fft.fft( + (-1)**steps * cf(-L + h * steps + h * indices / (n - 1)) + ) * np.exp( + 1j * np.pi * indices / (n - 1) + - 2 * 1j * np.pi * indices * steps / + (N * (n - 1)) + ), + axis=0 + ) + else: + s1 = (-1) ** steps * Cotes[n, 0] * np.fft.fft( + (-1) ** steps * cf(-L + h * steps) + ) + density = h * s1 / (2 * np.pi * np.sum(Cotes[n])) + return (x_l, density) + + +levy_stable = levy_stable_gen(name="levy_stable") diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3febd7c98c6575a431e35a5a6846d789c979f065 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c66cde2da5b812735d070885f9a3b0f0bb7c15cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_levy_stable/levyst.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_rcont/__init__.py b/venv/lib/python3.10/site-packages/scipy/stats/_rcont/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..25728ead5f7ea277e6e94c359d5a5603b99eeb38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_rcont/__init__.py @@ -0,0 +1,4 @@ +# +from .rcont import rvs_rcont1, rvs_rcont2 + +__all__ = ["rvs_rcont1", "rvs_rcont2"] diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5a8ea05ec47996abb2d8cba20e661c3c0e6acd6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4c6a60542e13ecf190258c84953b8e2c9706ef52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..4260eb97f2a2c1d9a839e2f586bd562f28c1ac85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/common_tests.py @@ -0,0 +1,351 @@ +import pickle + +import numpy as np +import numpy.testing as npt +from numpy.testing import assert_allclose, assert_equal +from pytest import raises as assert_raises + +import numpy.ma.testutils as ma_npt + +from scipy._lib._util import ( + getfullargspec_no_self as _getfullargspec, np_long +) +from scipy import stats + + +def check_named_results(res, attributes, ma=False): + for i, attr in enumerate(attributes): + if ma: + ma_npt.assert_equal(res[i], getattr(res, attr)) + else: + npt.assert_equal(res[i], getattr(res, attr)) + + +def check_normalization(distfn, args, distname): + norm_moment = distfn.moment(0, *args) + npt.assert_allclose(norm_moment, 1.0) + + if distname == "rv_histogram_instance": + atol, rtol = 1e-5, 0 + else: + atol, rtol = 1e-7, 1e-7 + + normalization_expect = distfn.expect(lambda x: 1, args=args) + npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol, + err_msg=distname, verbose=True) + + _a, _b = distfn.support(*args) + normalization_cdf = distfn.cdf(_b, *args) + npt.assert_allclose(normalization_cdf, 1.0) + + +def check_moment(distfn, arg, m, v, msg): + m1 = distfn.moment(1, *arg) + m2 = distfn.moment(2, *arg) + if not np.isinf(m): + npt.assert_almost_equal(m1, m, decimal=10, + err_msg=msg + ' - 1st moment') + else: # or np.isnan(m1), + npt.assert_(np.isinf(m1), + msg + ' - 1st moment -infinite, m1=%s' % str(m1)) + + if not np.isinf(v): + npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10, + err_msg=msg + ' - 2ndt moment') + else: # or np.isnan(m2), + npt.assert_(np.isinf(m2), msg + f' - 2nd moment -infinite, {m2=}') + + +def check_mean_expect(distfn, arg, m, msg): + if np.isfinite(m): + m1 = distfn.expect(lambda x: x, arg) + npt.assert_almost_equal(m1, m, decimal=5, + err_msg=msg + ' - 1st moment (expect)') + + +def check_var_expect(distfn, arg, m, v, msg): + dist_looser_tolerances = {"rv_histogram_instance" , "ksone"} + kwargs = {'rtol': 5e-6} if msg in dist_looser_tolerances else {} + if np.isfinite(v): + m2 = distfn.expect(lambda x: x*x, arg) + npt.assert_allclose(m2, v + m*m, **kwargs) + + +def check_skew_expect(distfn, arg, m, v, s, msg): + if np.isfinite(s): + m3e = distfn.expect(lambda x: np.power(x-m, 3), arg) + npt.assert_almost_equal(m3e, s * np.power(v, 1.5), + decimal=5, err_msg=msg + ' - skew') + else: + npt.assert_(np.isnan(s)) + + +def check_kurt_expect(distfn, arg, m, v, k, msg): + if np.isfinite(k): + m4e = distfn.expect(lambda x: np.power(x-m, 4), arg) + npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), + atol=1e-5, rtol=1e-5, + err_msg=msg + ' - kurtosis') + elif not np.isposinf(k): + npt.assert_(np.isnan(k)) + + +def check_munp_expect(dist, args, msg): + # If _munp is overridden, test a higher moment. (Before gh-18634, some + # distributions had issues with moments 5 and higher.) + if dist._munp.__func__ != stats.rv_continuous._munp: + res = dist.moment(5, *args) # shouldn't raise an error + ref = dist.expect(lambda x: x ** 5, args, lb=-np.inf, ub=np.inf) + if not np.isfinite(res): # could be valid; automated test can't know + return + # loose tolerance, mostly to see whether _munp returns *something* + assert_allclose(res, ref, atol=1e-10, rtol=1e-4, + err_msg=msg + ' - higher moment / _munp') + + +def check_entropy(distfn, arg, msg): + ent = distfn.entropy(*arg) + npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan') + + +def check_private_entropy(distfn, args, superclass): + # compare a generic _entropy with the distribution-specific implementation + npt.assert_allclose(distfn._entropy(*args), + superclass._entropy(distfn, *args)) + + +def check_entropy_vect_scale(distfn, arg): + # check 2-d + sc = np.asarray([[1, 2], [3, 4]]) + v_ent = distfn.entropy(*arg, scale=sc) + s_ent = [distfn.entropy(*arg, scale=s) for s in sc.ravel()] + s_ent = np.asarray(s_ent).reshape(v_ent.shape) + assert_allclose(v_ent, s_ent, atol=1e-14) + + # check invalid value, check cast + sc = [1, 2, -3] + v_ent = distfn.entropy(*arg, scale=sc) + s_ent = [distfn.entropy(*arg, scale=s) for s in sc] + s_ent = np.asarray(s_ent).reshape(v_ent.shape) + assert_allclose(v_ent, s_ent, atol=1e-14) + + +def check_edge_support(distfn, args): + # Make sure that x=self.a and self.b are handled correctly. + x = distfn.support(*args) + if isinstance(distfn, stats.rv_discrete): + x = x[0]-1, x[1] + + npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0]) + npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0]) + + if distfn.name not in ('skellam', 'dlaplace'): + # with a = -inf, log(0) generates warnings + npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0]) + npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf]) + + npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x) + npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1]) + + # out-of-bounds for isf & ppf + npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all()) + npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all()) + + +def check_named_args(distfn, x, shape_args, defaults, meths): + ## Check calling w/ named arguments. + + # check consistency of shapes, numargs and _parse signature + signature = _getfullargspec(distfn._parse_args) + npt.assert_(signature.varargs is None) + npt.assert_(signature.varkw is None) + npt.assert_(not signature.kwonlyargs) + npt.assert_(list(signature.defaults) == list(defaults)) + + shape_argnames = signature.args[:-len(defaults)] # a, b, loc=0, scale=1 + if distfn.shapes: + shapes_ = distfn.shapes.replace(',', ' ').split() + else: + shapes_ = '' + npt.assert_(len(shapes_) == distfn.numargs) + npt.assert_(len(shapes_) == len(shape_argnames)) + + # check calling w/ named arguments + shape_args = list(shape_args) + + vals = [meth(x, *shape_args) for meth in meths] + npt.assert_(np.all(np.isfinite(vals))) + + names, a, k = shape_argnames[:], shape_args[:], {} + while names: + k.update({names.pop(): a.pop()}) + v = [meth(x, *a, **k) for meth in meths] + npt.assert_array_equal(vals, v) + if 'n' not in k.keys(): + # `n` is first parameter of moment(), so can't be used as named arg + npt.assert_equal(distfn.moment(1, *a, **k), + distfn.moment(1, *shape_args)) + + # unknown arguments should not go through: + k.update({'kaboom': 42}) + assert_raises(TypeError, distfn.cdf, x, **k) + + +def check_random_state_property(distfn, args): + # check the random_state attribute of a distribution *instance* + + # This test fiddles with distfn.random_state. This breaks other tests, + # hence need to save it and then restore. + rndm = distfn.random_state + + # baseline: this relies on the global state + np.random.seed(1234) + distfn.random_state = None + r0 = distfn.rvs(*args, size=8) + + # use an explicit instance-level random_state + distfn.random_state = 1234 + r1 = distfn.rvs(*args, size=8) + npt.assert_equal(r0, r1) + + distfn.random_state = np.random.RandomState(1234) + r2 = distfn.rvs(*args, size=8) + npt.assert_equal(r0, r2) + + # check that np.random.Generator can be used (numpy >= 1.17) + if hasattr(np.random, 'default_rng'): + # obtain a np.random.Generator object + rng = np.random.default_rng(1234) + distfn.rvs(*args, size=1, random_state=rng) + + # can override the instance-level random_state for an individual .rvs call + distfn.random_state = 2 + orig_state = distfn.random_state.get_state() + + r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234)) + npt.assert_equal(r0, r3) + + # ... and that does not alter the instance-level random_state! + npt.assert_equal(distfn.random_state.get_state(), orig_state) + + # finally, restore the random_state + distfn.random_state = rndm + + +def check_meth_dtype(distfn, arg, meths): + q0 = [0.25, 0.5, 0.75] + x0 = distfn.ppf(q0, *arg) + x_cast = [x0.astype(tp) for tp in (np_long, np.float16, np.float32, + np.float64)] + + for x in x_cast: + # casting may have clipped the values, exclude those + distfn._argcheck(*arg) + x = x[(distfn.a < x) & (x < distfn.b)] + for meth in meths: + val = meth(x, *arg) + npt.assert_(val.dtype == np.float64) + + +def check_ppf_dtype(distfn, arg): + q0 = np.asarray([0.25, 0.5, 0.75]) + q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)] + for q in q_cast: + for meth in [distfn.ppf, distfn.isf]: + val = meth(q, *arg) + npt.assert_(val.dtype == np.float64) + + +def check_cmplx_deriv(distfn, arg): + # Distributions allow complex arguments. + def deriv(f, x, *arg): + x = np.asarray(x) + h = 1e-10 + return (f(x + h*1j, *arg)/h).imag + + x0 = distfn.ppf([0.25, 0.51, 0.75], *arg) + x_cast = [x0.astype(tp) for tp in (np_long, np.float16, np.float32, + np.float64)] + + for x in x_cast: + # casting may have clipped the values, exclude those + distfn._argcheck(*arg) + x = x[(distfn.a < x) & (x < distfn.b)] + + pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg) + assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5) + assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5) + + assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5) + assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5) + + assert_allclose(deriv(distfn.logpdf, x, *arg), + deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg), + rtol=1e-5) + + +def check_pickling(distfn, args): + # check that a distribution instance pickles and unpickles + # pay special attention to the random_state property + + # save the random_state (restore later) + rndm = distfn.random_state + + # check unfrozen + distfn.random_state = 1234 + distfn.rvs(*args, size=8) + s = pickle.dumps(distfn) + r0 = distfn.rvs(*args, size=8) + + unpickled = pickle.loads(s) + r1 = unpickled.rvs(*args, size=8) + npt.assert_equal(r0, r1) + + # also smoke test some methods + medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)] + npt.assert_equal(medians[0], medians[1]) + npt.assert_equal(distfn.cdf(medians[0], *args), + unpickled.cdf(medians[1], *args)) + + # check frozen pickling/unpickling with rvs + frozen_dist = distfn(*args) + pkl = pickle.dumps(frozen_dist) + unpickled = pickle.loads(pkl) + + r0 = frozen_dist.rvs(size=8) + r1 = unpickled.rvs(size=8) + npt.assert_equal(r0, r1) + + # check pickling/unpickling of .fit method + if hasattr(distfn, "fit"): + fit_function = distfn.fit + pickled_fit_function = pickle.dumps(fit_function) + unpickled_fit_function = pickle.loads(pickled_fit_function) + assert fit_function.__name__ == unpickled_fit_function.__name__ == "fit" + + # restore the random_state + distfn.random_state = rndm + + +def check_freezing(distfn, args): + # regression test for gh-11089: freezing a distribution fails + # if loc and/or scale are specified + if isinstance(distfn, stats.rv_continuous): + locscale = {'loc': 1, 'scale': 2} + else: + locscale = {'loc': 1} + + rv = distfn(*args, **locscale) + assert rv.a == distfn(*args).a + assert rv.b == distfn(*args).b + + +def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype): + np.random.seed(123) + sample = distfunc.rvs(*allargs) + assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname) + if not shape_only: + rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype) + np.random.seed(123) + expected = rvs(*allargs) + assert_allclose(sample, expected, rtol=1e-13) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d0a2f625f7556c9e447763f0aa653cffa27cd05 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/_mvt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7599add5b33c5dbac844e644ab00c48810eebf4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/_mvt.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/_mvt.py new file mode 100644 index 0000000000000000000000000000000000000000..c346d0daded4b6e734718742cc8950b84ed333f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/_mvt.py @@ -0,0 +1,171 @@ +import math +import numpy as np +from scipy import special +from scipy.stats._qmc import primes_from_2_to + + +def _primes(n): + # Defined to facilitate comparison between translation and source + # In Matlab, primes(10.5) -> first four primes, primes(11.5) -> first five + return primes_from_2_to(math.ceil(n)) + + +def _gaminv(a, b): + # Defined to facilitate comparison between translation and source + # Matlab's `gaminv` is like `special.gammaincinv` but args are reversed + return special.gammaincinv(b, a) + + +def _qsimvtv(m, nu, sigma, a, b, rng): + """Estimates the multivariate t CDF using randomized QMC + + Parameters + ---------- + m : int + The number of points + nu : float + Degrees of freedom + sigma : ndarray + A 2D positive semidefinite covariance matrix + a : ndarray + Lower integration limits + b : ndarray + Upper integration limits. + rng : Generator + Pseudorandom number generator + + Returns + ------- + p : float + The estimated CDF. + e : float + An absolute error estimate. + + """ + # _qsimvtv is a Python translation of the Matlab function qsimvtv, + # semicolons and all. + # + # This function uses an algorithm given in the paper + # "Comparison of Methods for the Numerical Computation of + # Multivariate t Probabilities", in + # J. of Computational and Graphical Stat., 11(2002), pp. 950-971, by + # Alan Genz and Frank Bretz + # + # The primary references for the numerical integration are + # "On a Number-Theoretical Integration Method" + # H. Niederreiter, Aequationes Mathematicae, 8(1972), pp. 304-11. + # and + # "Randomization of Number Theoretic Methods for Multiple Integration" + # R. Cranley & T.N.L. Patterson, SIAM J Numer Anal, 13(1976), pp. 904-14. + # + # Alan Genz is the author of this function and following Matlab functions. + # Alan Genz, WSU Math, PO Box 643113, Pullman, WA 99164-3113 + # Email : alangenz@wsu.edu + # + # Copyright (C) 2013, Alan Genz, All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided the following conditions are met: + # 1. Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # 2. Redistributions in binary form must reproduce the above copyright + # notice, this list of conditions and the following disclaimer in + # the documentation and/or other materials provided with the + # distribution. + # 3. The contributor name(s) may not be used to endorse or promote + # products derived from this software without specific prior + # written permission. + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + # Initialization + sn = max(1, math.sqrt(nu)); ch, az, bz = _chlrps(sigma, a/sn, b/sn) + n = len(sigma); N = 10; P = math.ceil(m/N); on = np.ones(P); p = 0; e = 0 + ps = np.sqrt(_primes(5*n*math.log(n+4)/4)); q = ps[:, np.newaxis] # Richtmyer gens. + + # Randomization loop for ns samples + c = None; dc = None + for S in range(N): + vp = on.copy(); s = np.zeros((n, P)) + for i in range(n): + x = np.abs(2*np.mod(q[i]*np.arange(1, P+1) + rng.random(), 1)-1) # periodizing transform + if i == 0: + r = on + if nu > 0: + r = np.sqrt(2*_gaminv(x, nu/2)) + else: + y = _Phinv(c + x*dc) + s[i:] += ch[i:, i-1:i] * y + si = s[i, :]; c = on.copy(); ai = az[i]*r - si; d = on.copy(); bi = bz[i]*r - si + c[ai <= -9] = 0; tl = abs(ai) < 9; c[tl] = _Phi(ai[tl]) + d[bi <= -9] = 0; tl = abs(bi) < 9; d[tl] = _Phi(bi[tl]) + dc = d - c; vp = vp * dc + d = (np.mean(vp) - p)/(S + 1); p = p + d; e = (S - 1)*e/(S + 1) + d**2 + e = math.sqrt(e) # error estimate is 3 times std error with N samples. + return p, e + + +# Standard statistical normal distribution functions +def _Phi(z): + return special.ndtr(z) + + +def _Phinv(p): + return special.ndtri(p) + + +def _chlrps(R, a, b): + """ + Computes permuted and scaled lower Cholesky factor c for R which may be + singular, also permuting and scaling integration limit vectors a and b. + """ + ep = 1e-10 # singularity tolerance + eps = np.finfo(R.dtype).eps + + n = len(R); c = R.copy(); ap = a.copy(); bp = b.copy(); d = np.sqrt(np.maximum(np.diag(c), 0)) + for i in range(n): + if d[i] > 0: + c[:, i] /= d[i]; c[i, :] /= d[i] + ap[i] /= d[i]; bp[i] /= d[i] + y = np.zeros((n, 1)); sqtp = math.sqrt(2*math.pi) + + for k in range(n): + im = k; ckk = 0; dem = 1; s = 0 + for i in range(k, n): + if c[i, i] > eps: + cii = math.sqrt(max(c[i, i], 0)) + if i > 0: s = c[i, :k] @ y[:k] + ai = (ap[i]-s)/cii; bi = (bp[i]-s)/cii; de = _Phi(bi)-_Phi(ai) + if de <= dem: + ckk = cii; dem = de; am = ai; bm = bi; im = i + if im > k: + ap[[im, k]] = ap[[k, im]]; bp[[im, k]] = bp[[k, im]]; c[im, im] = c[k, k] + t = c[im, :k].copy(); c[im, :k] = c[k, :k]; c[k, :k] = t + t = c[im+1:, im].copy(); c[im+1:, im] = c[im+1:, k]; c[im+1:, k] = t + t = c[k+1:im, k].copy(); c[k+1:im, k] = c[im, k+1:im].T; c[im, k+1:im] = t.T + if ckk > ep*(k+1): + c[k, k] = ckk; c[k, k+1:] = 0 + for i in range(k+1, n): + c[i, k] = c[i, k]/ckk; c[i, k+1:i+1] = c[i, k+1:i+1] - c[i, k]*c[k+1:i+1, k].T + if abs(dem) > ep: + y[k] = (np.exp(-am**2/2) - np.exp(-bm**2/2)) / (sqtp*dem) + else: + y[k] = (am + bm) / 2 + if am < -10: + y[k] = bm + elif bm > 10: + y[k] = am + c[k, :k+1] /= ckk; ap[k] /= ckk; bp[k] /= ckk + else: + c[k:, k] = 0; y[k] = (ap[k] + bp[k])/2 + pass + return c, ap, bp diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/fisher_exact_results_from_r.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/fisher_exact_results_from_r.py new file mode 100644 index 0000000000000000000000000000000000000000..b7dd8936018eae2f74cc6f5966235a86fa821793 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/fisher_exact_results_from_r.py @@ -0,0 +1,607 @@ +# DO NOT EDIT THIS FILE! +# This file was generated by the R script +# generate_fisher_exact_results_from_r.R +# The script was run with R version 3.6.2 (2019-12-12) at 2020-11-09 06:16:09 + + +from collections import namedtuple +import numpy as np + + +Inf = np.inf + +Parameters = namedtuple('Parameters', + ['table', 'confidence_level', 'alternative']) +RResults = namedtuple('RResults', + ['pvalue', 'conditional_odds_ratio', + 'conditional_odds_ratio_ci']) +data = [ + (Parameters(table=[[100, 2], [1000, 5]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.1300759363430016, + conditional_odds_ratio=0.25055839934223, + conditional_odds_ratio_ci=(0.04035202926536294, + 2.662846672960251))), + (Parameters(table=[[2, 7], [8, 2]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.02301413756522116, + conditional_odds_ratio=0.0858623513573622, + conditional_odds_ratio_ci=(0.004668988338943325, + 0.895792956493601))), + (Parameters(table=[[5, 1], [10, 10]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.1973244147157191, + conditional_odds_ratio=4.725646047336587, + conditional_odds_ratio_ci=(0.4153910882532168, + 259.2593661129417))), + (Parameters(table=[[5, 15], [20, 20]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.09580440012477633, + conditional_odds_ratio=0.3394396617440851, + conditional_odds_ratio_ci=(0.08056337526385809, + 1.22704788545557))), + (Parameters(table=[[5, 16], [16, 25]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.2697004098849359, + conditional_odds_ratio=0.4937791394540491, + conditional_odds_ratio_ci=(0.1176691231650079, + 1.787463657995973))), + (Parameters(table=[[10, 5], [10, 1]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.1973244147157192, + conditional_odds_ratio=0.2116112781158479, + conditional_odds_ratio_ci=(0.003857141267422399, + 2.407369893767229))), + (Parameters(table=[[10, 5], [10, 0]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.06126482213438735, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 1.451643573543705))), + (Parameters(table=[[5, 0], [1, 4]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.04761904761904762, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(1.024822256141754, + Inf))), + (Parameters(table=[[0, 5], [1, 4]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 39.00054996869288))), + (Parameters(table=[[5, 1], [0, 4]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.04761904761904761, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(1.024822256141754, + Inf))), + (Parameters(table=[[0, 1], [3, 2]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 39.00054996869287))), + (Parameters(table=[[200, 7], [8, 300]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=2.005657880389071e-122, + conditional_odds_ratio=977.7866978606228, + conditional_odds_ratio_ci=(349.2595113327733, + 3630.382605689872))), + (Parameters(table=[[28, 21], [6, 1957]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=5.728437460831947e-44, + conditional_odds_ratio=425.2403028434684, + conditional_odds_ratio_ci=(152.4166024390096, + 1425.700792178893))), + (Parameters(table=[[190, 800], [200, 900]], + confidence_level=0.95, + alternative='two.sided'), + RResults(pvalue=0.574111858126088, + conditional_odds_ratio=1.068697577856801, + conditional_odds_ratio_ci=(0.8520462587912048, + 1.340148950273938))), + (Parameters(table=[[100, 2], [1000, 5]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.1300759363430016, + conditional_odds_ratio=0.25055839934223, + conditional_odds_ratio_ci=(0.02502345007115455, + 6.304424772117853))), + (Parameters(table=[[2, 7], [8, 2]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.02301413756522116, + conditional_odds_ratio=0.0858623513573622, + conditional_odds_ratio_ci=(0.001923034001462487, + 1.53670836950172))), + (Parameters(table=[[5, 1], [10, 10]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.1973244147157191, + conditional_odds_ratio=4.725646047336587, + conditional_odds_ratio_ci=(0.2397970951413721, + 1291.342011095509))), + (Parameters(table=[[5, 15], [20, 20]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.09580440012477633, + conditional_odds_ratio=0.3394396617440851, + conditional_odds_ratio_ci=(0.05127576113762925, + 1.717176678806983))), + (Parameters(table=[[5, 16], [16, 25]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.2697004098849359, + conditional_odds_ratio=0.4937791394540491, + conditional_odds_ratio_ci=(0.07498546954483619, + 2.506969905199901))), + (Parameters(table=[[10, 5], [10, 1]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.1973244147157192, + conditional_odds_ratio=0.2116112781158479, + conditional_odds_ratio_ci=(0.0007743881879531337, + 4.170192301163831))), + (Parameters(table=[[10, 5], [10, 0]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.06126482213438735, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 2.642491011905582))), + (Parameters(table=[[5, 0], [1, 4]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.04761904761904762, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0.496935393325443, + Inf))), + (Parameters(table=[[0, 5], [1, 4]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 198.019801980198))), + (Parameters(table=[[5, 1], [0, 4]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.04761904761904761, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0.496935393325443, + Inf))), + (Parameters(table=[[0, 1], [3, 2]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 198.019801980198))), + (Parameters(table=[[200, 7], [8, 300]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=2.005657880389071e-122, + conditional_odds_ratio=977.7866978606228, + conditional_odds_ratio_ci=(270.0334165523604, + 5461.333333326708))), + (Parameters(table=[[28, 21], [6, 1957]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=5.728437460831947e-44, + conditional_odds_ratio=425.2403028434684, + conditional_odds_ratio_ci=(116.7944750275836, + 1931.995993191814))), + (Parameters(table=[[190, 800], [200, 900]], + confidence_level=0.99, + alternative='two.sided'), + RResults(pvalue=0.574111858126088, + conditional_odds_ratio=1.068697577856801, + conditional_odds_ratio_ci=(0.7949398282935892, + 1.436229679394333))), + (Parameters(table=[[100, 2], [1000, 5]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.1300759363430016, + conditional_odds_ratio=0.25055839934223, + conditional_odds_ratio_ci=(0, + 1.797867027270803))), + (Parameters(table=[[2, 7], [8, 2]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.0185217259520665, + conditional_odds_ratio=0.0858623513573622, + conditional_odds_ratio_ci=(0, + 0.6785254803404526))), + (Parameters(table=[[5, 1], [10, 10]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.9782608695652173, + conditional_odds_ratio=4.725646047336587, + conditional_odds_ratio_ci=(0, + 127.8497388102893))), + (Parameters(table=[[5, 15], [20, 20]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.05625775074399956, + conditional_odds_ratio=0.3394396617440851, + conditional_odds_ratio_ci=(0, + 1.032332939718425))), + (Parameters(table=[[5, 16], [16, 25]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.1808979350599346, + conditional_odds_ratio=0.4937791394540491, + conditional_odds_ratio_ci=(0, + 1.502407513296985))), + (Parameters(table=[[10, 5], [10, 1]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.1652173913043479, + conditional_odds_ratio=0.2116112781158479, + conditional_odds_ratio_ci=(0, + 1.820421051562392))), + (Parameters(table=[[10, 5], [10, 0]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.0565217391304348, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 1.06224603077045))), + (Parameters(table=[[5, 0], [1, 4]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[0, 5], [1, 4]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.5, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 19.00192394479939))), + (Parameters(table=[[5, 1], [0, 4]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[0, 1], [3, 2]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.4999999999999999, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 19.00192394479939))), + (Parameters(table=[[200, 7], [8, 300]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=977.7866978606228, + conditional_odds_ratio_ci=(0, + 3045.460216525746))), + (Parameters(table=[[28, 21], [6, 1957]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=425.2403028434684, + conditional_odds_ratio_ci=(0, + 1186.440170942579))), + (Parameters(table=[[190, 800], [200, 900]], + confidence_level=0.95, + alternative='less'), + RResults(pvalue=0.7416227010368963, + conditional_odds_ratio=1.068697577856801, + conditional_odds_ratio_ci=(0, + 1.293551891610822))), + (Parameters(table=[[100, 2], [1000, 5]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.1300759363430016, + conditional_odds_ratio=0.25055839934223, + conditional_odds_ratio_ci=(0, + 4.375946050832565))), + (Parameters(table=[[2, 7], [8, 2]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.0185217259520665, + conditional_odds_ratio=0.0858623513573622, + conditional_odds_ratio_ci=(0, + 1.235282118191202))), + (Parameters(table=[[5, 1], [10, 10]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.9782608695652173, + conditional_odds_ratio=4.725646047336587, + conditional_odds_ratio_ci=(0, + 657.2063583945989))), + (Parameters(table=[[5, 15], [20, 20]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.05625775074399956, + conditional_odds_ratio=0.3394396617440851, + conditional_odds_ratio_ci=(0, + 1.498867660683128))), + (Parameters(table=[[5, 16], [16, 25]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.1808979350599346, + conditional_odds_ratio=0.4937791394540491, + conditional_odds_ratio_ci=(0, + 2.186159386716762))), + (Parameters(table=[[10, 5], [10, 1]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.1652173913043479, + conditional_odds_ratio=0.2116112781158479, + conditional_odds_ratio_ci=(0, + 3.335351451901569))), + (Parameters(table=[[10, 5], [10, 0]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.0565217391304348, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 2.075407697450433))), + (Parameters(table=[[5, 0], [1, 4]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[0, 5], [1, 4]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.5, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 99.00009507969122))), + (Parameters(table=[[5, 1], [0, 4]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[0, 1], [3, 2]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.4999999999999999, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + 99.00009507969123))), + (Parameters(table=[[200, 7], [8, 300]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=977.7866978606228, + conditional_odds_ratio_ci=(0, + 4503.078257659934))), + (Parameters(table=[[28, 21], [6, 1957]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=1, + conditional_odds_ratio=425.2403028434684, + conditional_odds_ratio_ci=(0, + 1811.766127544222))), + (Parameters(table=[[190, 800], [200, 900]], + confidence_level=0.99, + alternative='less'), + RResults(pvalue=0.7416227010368963, + conditional_odds_ratio=1.068697577856801, + conditional_odds_ratio_ci=(0, + 1.396522811516685))), + (Parameters(table=[[100, 2], [1000, 5]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.979790445314723, + conditional_odds_ratio=0.25055839934223, + conditional_odds_ratio_ci=(0.05119649909830196, + Inf))), + (Parameters(table=[[2, 7], [8, 2]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.9990149169715733, + conditional_odds_ratio=0.0858623513573622, + conditional_odds_ratio_ci=(0.007163749169069961, + Inf))), + (Parameters(table=[[5, 1], [10, 10]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.1652173913043478, + conditional_odds_ratio=4.725646047336587, + conditional_odds_ratio_ci=(0.5493234651081089, + Inf))), + (Parameters(table=[[5, 15], [20, 20]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.9849086665340765, + conditional_odds_ratio=0.3394396617440851, + conditional_odds_ratio_ci=(0.1003538933958604, + Inf))), + (Parameters(table=[[5, 16], [16, 25]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.9330176609214881, + conditional_odds_ratio=0.4937791394540491, + conditional_odds_ratio_ci=(0.146507416280863, + Inf))), + (Parameters(table=[[10, 5], [10, 1]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.9782608695652174, + conditional_odds_ratio=0.2116112781158479, + conditional_odds_ratio_ci=(0.007821681994077808, + Inf))), + (Parameters(table=[[10, 5], [10, 0]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[5, 0], [1, 4]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.02380952380952382, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(1.487678929918272, + Inf))), + (Parameters(table=[[0, 5], [1, 4]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[5, 1], [0, 4]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.0238095238095238, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(1.487678929918272, + Inf))), + (Parameters(table=[[0, 1], [3, 2]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[200, 7], [8, 300]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=2.005657880388915e-122, + conditional_odds_ratio=977.7866978606228, + conditional_odds_ratio_ci=(397.784359748113, + Inf))), + (Parameters(table=[[28, 21], [6, 1957]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=5.728437460831983e-44, + conditional_odds_ratio=425.2403028434684, + conditional_odds_ratio_ci=(174.7148056880929, + Inf))), + (Parameters(table=[[190, 800], [200, 900]], + confidence_level=0.95, + alternative='greater'), + RResults(pvalue=0.2959825901308897, + conditional_odds_ratio=1.068697577856801, + conditional_odds_ratio_ci=(0.8828406663967776, + Inf))), + (Parameters(table=[[100, 2], [1000, 5]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.979790445314723, + conditional_odds_ratio=0.25055839934223, + conditional_odds_ratio_ci=(0.03045407081240429, + Inf))), + (Parameters(table=[[2, 7], [8, 2]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.9990149169715733, + conditional_odds_ratio=0.0858623513573622, + conditional_odds_ratio_ci=(0.002768053063547901, + Inf))), + (Parameters(table=[[5, 1], [10, 10]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.1652173913043478, + conditional_odds_ratio=4.725646047336587, + conditional_odds_ratio_ci=(0.2998184792279909, + Inf))), + (Parameters(table=[[5, 15], [20, 20]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.9849086665340765, + conditional_odds_ratio=0.3394396617440851, + conditional_odds_ratio_ci=(0.06180414342643172, + Inf))), + (Parameters(table=[[5, 16], [16, 25]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.9330176609214881, + conditional_odds_ratio=0.4937791394540491, + conditional_odds_ratio_ci=(0.09037094010066403, + Inf))), + (Parameters(table=[[10, 5], [10, 1]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.9782608695652174, + conditional_odds_ratio=0.2116112781158479, + conditional_odds_ratio_ci=(0.001521592095430679, + Inf))), + (Parameters(table=[[10, 5], [10, 0]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[5, 0], [1, 4]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.02380952380952382, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0.6661157890359722, + Inf))), + (Parameters(table=[[0, 5], [1, 4]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[5, 1], [0, 4]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.0238095238095238, + conditional_odds_ratio=Inf, + conditional_odds_ratio_ci=(0.6661157890359725, + Inf))), + (Parameters(table=[[0, 1], [3, 2]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=1, + conditional_odds_ratio=0, + conditional_odds_ratio_ci=(0, + Inf))), + (Parameters(table=[[200, 7], [8, 300]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=2.005657880388915e-122, + conditional_odds_ratio=977.7866978606228, + conditional_odds_ratio_ci=(297.9619252357688, + Inf))), + (Parameters(table=[[28, 21], [6, 1957]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=5.728437460831983e-44, + conditional_odds_ratio=425.2403028434684, + conditional_odds_ratio_ci=(130.3213490295859, + Inf))), + (Parameters(table=[[190, 800], [200, 900]], + confidence_level=0.99, + alternative='greater'), + RResults(pvalue=0.2959825901308897, + conditional_odds_ratio=1.068697577856801, + conditional_odds_ratio_ci=(0.8176272148267533, + Inf))), +] diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/AtmWtAg.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/AtmWtAg.dat new file mode 100644 index 0000000000000000000000000000000000000000..30537565fe8c47f74da0e63a39f4b46600f7768f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/AtmWtAg.dat @@ -0,0 +1,108 @@ +NIST/ITL StRD +Dataset Name: AtmWtAg (AtmWtAg.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 108) + + +Procedure: Analysis of Variance + + +Reference: Powell, L.J., Murphy, T.J. and Gramlich, J.W. (1982). + "The Absolute Isotopic Abundance & Atomic Weight + of a Reference Sample of Silver". + NBS Journal of Research, 87, pp. 9-19. + + +Data: 1 Factor + 2 Treatments + 24 Replicates/Cell + 48 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Observed Data + + +Model: 3 Parameters (mu, tau_1, tau_2) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + + +Between Instrument 1 3.63834187500000E-09 3.63834187500000E-09 1.59467335677930E+01 +Within Instrument 46 1.04951729166667E-08 2.28155932971014E-10 + + Certified R-Squared 2.57426544538321E-01 + + Certified Residual + Standard Deviation 1.51048314446410E-05 + + + + + + + + + + + +Data: Instrument AgWt + 1 107.8681568 + 1 107.8681465 + 1 107.8681572 + 1 107.8681785 + 1 107.8681446 + 1 107.8681903 + 1 107.8681526 + 1 107.8681494 + 1 107.8681616 + 1 107.8681587 + 1 107.8681519 + 1 107.8681486 + 1 107.8681419 + 1 107.8681569 + 1 107.8681508 + 1 107.8681672 + 1 107.8681385 + 1 107.8681518 + 1 107.8681662 + 1 107.8681424 + 1 107.8681360 + 1 107.8681333 + 1 107.8681610 + 1 107.8681477 + 2 107.8681079 + 2 107.8681344 + 2 107.8681513 + 2 107.8681197 + 2 107.8681604 + 2 107.8681385 + 2 107.8681642 + 2 107.8681365 + 2 107.8681151 + 2 107.8681082 + 2 107.8681517 + 2 107.8681448 + 2 107.8681198 + 2 107.8681482 + 2 107.8681334 + 2 107.8681609 + 2 107.8681101 + 2 107.8681512 + 2 107.8681469 + 2 107.8681360 + 2 107.8681254 + 2 107.8681261 + 2 107.8681450 + 2 107.8681368 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SiRstv.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SiRstv.dat new file mode 100644 index 0000000000000000000000000000000000000000..18ea8971fd7a4d67800dafe98ac5ea5acef53025 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SiRstv.dat @@ -0,0 +1,85 @@ +NIST/ITL StRD +Dataset Name: SiRstv (SiRstv.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 85) + + +Procedure: Analysis of Variance + + +Reference: Ehrstein, James and Croarkin, M. Carroll. + Unpublished NIST dataset. + + +Data: 1 Factor + 5 Treatments + 5 Replicates/Cell + 25 Observations + 3 Constant Leading Digits + Lower Level of Difficulty + Observed Data + + +Model: 6 Parameters (mu,tau_1, ... , tau_5) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Instrument 4 5.11462616000000E-02 1.27865654000000E-02 1.18046237440255E+00 +Within Instrument 20 2.16636560000000E-01 1.08318280000000E-02 + + Certified R-Squared 1.90999039051129E-01 + + Certified Residual + Standard Deviation 1.04076068334656E-01 + + + + + + + + + + + + +Data: Instrument Resistance + 1 196.3052 + 1 196.1240 + 1 196.1890 + 1 196.2569 + 1 196.3403 + 2 196.3042 + 2 196.3825 + 2 196.1669 + 2 196.3257 + 2 196.0422 + 3 196.1303 + 3 196.2005 + 3 196.2889 + 3 196.0343 + 3 196.1811 + 4 196.2795 + 4 196.1748 + 4 196.1494 + 4 196.1485 + 4 195.9885 + 5 196.2119 + 5 196.1051 + 5 196.1850 + 5 196.0052 + 5 196.2090 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs01.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs01.dat new file mode 100644 index 0000000000000000000000000000000000000000..945b24bf35422152a5faba73ed054ab78fda1bdf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs01.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs01 (SmLs01.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs02.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs02.dat new file mode 100644 index 0000000000000000000000000000000000000000..ee76633a660a48225064bbb86a25f6a2f36c6d9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs02.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs02 (SmLs02.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs03.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs03.dat new file mode 100644 index 0000000000000000000000000000000000000000..55dfa2313ffb152709c58b47c0058567b710d903 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs03.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs03 (SmLs03.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs04.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs04.dat new file mode 100644 index 0000000000000000000000000000000000000000..6a2a9fc935a56989b166de9b23f3df3bc4f64879 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs04.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs04 (SmLs04.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs05.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs05.dat new file mode 100644 index 0000000000000000000000000000000000000000..fe11c40b5f51aefc81d4d1501a74e627f2b2d992 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs05.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs05 (SmLs05.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs06.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs06.dat new file mode 100644 index 0000000000000000000000000000000000000000..602e4fbdaa26bbb8d95ce78d1f48dbbfa883e7e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs06.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs06 (SmLs06.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs07.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs07.dat new file mode 100644 index 0000000000000000000000000000000000000000..deeac955e65ffaf55838568baa54951efaf2662b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs07.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs07 (SmLs07.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs08.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs08.dat new file mode 100644 index 0000000000000000000000000000000000000000..c5ee643fb8c6ef849ab8e34352bc60f15c715a45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs08.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs08 (SmLs08.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs09.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs09.dat new file mode 100644 index 0000000000000000000000000000000000000000..887905e355a2a13801f1b004187631f2301f7eef --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_anova/SmLs09.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs09 (SmLs09.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_linregress/Norris.dat b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_linregress/Norris.dat new file mode 100644 index 0000000000000000000000000000000000000000..4bf8ed911cae75824b27e5f5d5e444e17fa8eae8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/nist_linregress/Norris.dat @@ -0,0 +1,97 @@ +NIST/ITL StRD +Dataset Name: Norris (Norris.dat) + +File Format: ASCII + Certified Values (lines 31 to 46) + Data (lines 61 to 96) + +Procedure: Linear Least Squares Regression + +Reference: Norris, J., NIST. + Calibration of Ozone Monitors. + +Data: 1 Response Variable (y) + 1 Predictor Variable (x) + 36 Observations + Lower Level of Difficulty + Observed Data + +Model: Linear Class + 2 Parameters (B0,B1) + + y = B0 + B1*x + e + + + + Certified Regression Statistics + + Standard Deviation + Parameter Estimate of Estimate + + B0 -0.262323073774029 0.232818234301152 + B1 1.00211681802045 0.429796848199937E-03 + + Residual + Standard Deviation 0.884796396144373 + + R-Squared 0.999993745883712 + + + Certified Analysis of Variance Table + +Source of Degrees of Sums of Mean +Variation Freedom Squares Squares F Statistic + +Regression 1 4255954.13232369 4255954.13232369 5436385.54079785 +Residual 34 26.6173985294224 0.782864662630069 + + + + + + + + + + + + + +Data: y x + 0.1 0.2 + 338.8 337.4 + 118.1 118.2 + 888.0 884.6 + 9.2 10.1 + 228.1 226.5 + 668.5 666.3 + 998.5 996.3 + 449.1 448.6 + 778.9 777.0 + 559.2 558.2 + 0.3 0.4 + 0.1 0.6 + 778.1 775.5 + 668.8 666.9 + 339.3 338.0 + 448.9 447.5 + 10.8 11.6 + 557.7 556.0 + 228.3 228.1 + 998.0 995.8 + 888.8 887.6 + 119.6 120.2 + 0.3 0.3 + 0.6 0.3 + 557.6 556.8 + 339.3 339.1 + 888.0 887.2 + 998.5 999.0 + 778.9 779.0 + 10.2 11.1 + 117.6 118.3 + 228.9 229.2 + 668.4 669.1 + 449.2 448.9 + 0.2 0.5 + diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/studentized_range_mpmath_ref.json b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/studentized_range_mpmath_ref.json new file mode 100644 index 0000000000000000000000000000000000000000..bb971286cf85b28738a80bacececfb90c2566782 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/studentized_range_mpmath_ref.json @@ -0,0 +1,1499 @@ +{ + "COMMENT": "!!!!!! THIS FILE WAS AUTOGENERATED BY RUNNING `python studentized_range_mpmath_ref.py` !!!!!!", + "moment_data": [ + { + "src_case": { + "m": 0, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 1.0 + }, + { + "src_case": { + "m": 1, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 1.8342745127927962 + }, + { + "src_case": { + "m": 2, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 4.567483357831711 + }, + { + "src_case": { + "m": 3, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 14.412156886227011 + }, + { + "src_case": { + "m": 4, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 56.012250366720444 + } + ], + "cdf_data": [ + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0027502772229359594 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.8544145010066327e-12 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0027520560662338336 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.39089126131273e-13 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.002752437649536182 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.0862189999210748e-12 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.002752755744313648 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0027527430186246545 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.002752666667812431 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.505275157135514e-24 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.8546698113384126e-25 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.7362668562706085e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.571947730052616e-26 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.032619249089036e-27 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.539763646681808e-22 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.618313512511099e-12 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 4.919231733354114e-28 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.159348906295542e-13 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.22331624289542043 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.2395624637676257 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23510918942128056 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23786536230099864 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.000651656693149116 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.2401356460422021 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.003971273224673166 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0008732969319364606 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.24023154593376422 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.001300816146573152 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.5682573722040226e-07 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0005841098057517027 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.2267674885784e-05 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0005731712496327297 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.746798012658064e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.807700350854172e-07 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.147637957472628e-08 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 8.306675539750552e-08 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8711786295203324 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9818862781476212 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9566506502400175 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9849546621386962 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9731488893573804 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8450530667988544 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6164875232404174 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9845292772767739 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8079691517949077 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.7573606942645745 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8587525248147736 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8611036193280976 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.46523135355387657 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6318042819232383 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.5574947140294286 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.5970517763141937 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6493671527818267 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6466699776044968 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9881335633712994 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999861266821 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.999908236635449 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999978467928313 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999996690216 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999993640496 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9570401457077894 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999997977351971 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9991738325963548 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999730883609333 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999905199205 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999950566264 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9312318042339768 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999991743904675 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9977643922032399 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999054426012515 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999602948055 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999792458618 + } + ], + "pdf_data": [ + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05487847613526332 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.564099684606509e-10 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05494947290360002 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 8.442593793786411e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.054964710604860405 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.764441961563576e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05497690690332341 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05497385731702228 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 4.758021225803992e-22 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.054977415200879516 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.8004731453548083e-19 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.5564176176604816e-09 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.342768070688728e-24 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.454372265306114e-10 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.9138464398429654e-25 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.266341131767418e-23 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 8.234556126446594e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.32929780487562e-26 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.36083736990527154 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4137959132282269 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4080239698771056 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.398772020275752 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4160873922094346 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4157583991350054 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.005210720148451848 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.02575314059867804 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.009782573637596617 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.006818708302379005 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0047089182958790715 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.004627085294166373 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0010886280311369462 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.630674470916427e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 4.121713278199428e-05 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.319506007252685e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.5585754418789747e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.4190335899441991e-06 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.07185383302009114 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.050268901219386576 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.03321056847176124 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.04044172384981084 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.030571365659999617 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.030120779149073032 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.17501664247670937 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.22374394725370736 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23246597521020534 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23239043677504484 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23057775622748988 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23012666145240815 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.2073676639537027 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.3245990542431859 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0033733228559870584 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 7.728665739003835e-05 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.38244500549096866 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.45434978340834464 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.43334135870667473 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.159522630228393e-09 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.45807877248528855 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.5303467191175695e-08 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.121281850105421e-06 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.1901591191700855e-09 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0006784051704217357 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.011845582636101885 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.844183552674918e-05 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.215093171597309e-08 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.125792577534542e-07 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.7759015355532446e-08 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0017957646258393628 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.018534407764819284 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.00013316083413164858 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.082489228991225e-06 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.3444226792257012e-07 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 7.446912854228521e-08 + } + ] +} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_axis_nan_policy.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_axis_nan_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a0b30f8374eea644f80c63aa2c3efadbf4695f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_axis_nan_policy.py @@ -0,0 +1,1188 @@ +# Many scipy.stats functions support `axis` and `nan_policy` parameters. +# When the two are combined, it can be tricky to get all the behavior just +# right. This file contains a suite of common tests for scipy.stats functions +# that support `axis` and `nan_policy` and additional tests for some associated +# functions in stats._util. + +from itertools import product, combinations_with_replacement, permutations +import re +import pickle +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_equal, suppress_warnings +from scipy import stats +from scipy.stats import norm # type: ignore[attr-defined] +from scipy.stats._axis_nan_policy import _masked_arrays_2_sentinel_arrays +from scipy._lib._util import AxisError + + +def unpack_ttest_result(res): + low, high = res.confidence_interval() + return (res.statistic, res.pvalue, res.df, res._standard_error, + res._estimate, low, high) + + +def _get_ttest_ci(ttest): + # get a function that returns the CI bounds of provided `ttest` + def ttest_ci(*args, **kwargs): + res = ttest(*args, **kwargs) + return res.confidence_interval() + return ttest_ci + + +axis_nan_policy_cases = [ + # function, args, kwds, number of samples, number of outputs, + # ... paired, unpacker function + # args, kwds typically aren't needed; just showing that they work + (stats.kruskal, tuple(), dict(), 3, 2, False, None), # 4 samples is slow + (stats.ranksums, ('less',), dict(), 2, 2, False, None), + (stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, 2, False, None), + (stats.wilcoxon, ('pratt',), {'mode': 'auto'}, 2, 2, True, + lambda res: (res.statistic, res.pvalue)), + (stats.wilcoxon, tuple(), dict(), 1, 2, True, + lambda res: (res.statistic, res.pvalue)), + (stats.wilcoxon, tuple(), {'mode': 'approx'}, 1, 3, True, + lambda res: (res.statistic, res.pvalue, res.zstatistic)), + (stats.gmean, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.hmean, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.pmean, (1.42,), dict(), 1, 1, False, lambda x: (x,)), + (stats.sem, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.iqr, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.kurtosis, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.skew, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.kstat, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.kstatvar, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.moment, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.moment, tuple(), dict(order=[1, 2]), 1, 2, False, None), + (stats.jarque_bera, tuple(), dict(), 1, 2, False, None), + (stats.ttest_1samp, (np.array([0]),), dict(), 1, 7, False, + unpack_ttest_result), + (stats.ttest_rel, tuple(), dict(), 2, 7, True, unpack_ttest_result), + (stats.ttest_ind, tuple(), dict(), 2, 7, False, unpack_ttest_result), + (_get_ttest_ci(stats.ttest_1samp), (0,), dict(), 1, 2, False, None), + (_get_ttest_ci(stats.ttest_rel), tuple(), dict(), 2, 2, True, None), + (_get_ttest_ci(stats.ttest_ind), tuple(), dict(), 2, 2, False, None), + (stats.mode, tuple(), dict(), 1, 2, True, lambda x: (x.mode, x.count)), + (stats.differential_entropy, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.variation, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.friedmanchisquare, tuple(), dict(), 3, 2, True, None), + (stats.brunnermunzel, tuple(), dict(), 2, 2, False, None), + (stats.mood, tuple(), {}, 2, 2, False, None), + (stats.shapiro, tuple(), {}, 1, 2, False, None), + (stats.ks_1samp, (norm().cdf,), dict(), 1, 4, False, + lambda res: (*res, res.statistic_location, res.statistic_sign)), + (stats.ks_2samp, tuple(), dict(), 2, 4, False, + lambda res: (*res, res.statistic_location, res.statistic_sign)), + (stats.kstest, (norm().cdf,), dict(), 1, 4, False, + lambda res: (*res, res.statistic_location, res.statistic_sign)), + (stats.kstest, tuple(), dict(), 2, 4, False, + lambda res: (*res, res.statistic_location, res.statistic_sign)), + (stats.levene, tuple(), {}, 2, 2, False, None), + (stats.fligner, tuple(), {'center': 'trimmed', 'proportiontocut': 0.01}, + 2, 2, False, None), + (stats.ansari, tuple(), {}, 2, 2, False, None), + (stats.entropy, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.entropy, tuple(), dict(), 2, 1, True, lambda x: (x,)), + (stats.skewtest, tuple(), dict(), 1, 2, False, None), + (stats.kurtosistest, tuple(), dict(), 1, 2, False, None), + (stats.normaltest, tuple(), dict(), 1, 2, False, None), + (stats.cramervonmises, ("norm",), dict(), 1, 2, False, + lambda res: (res.statistic, res.pvalue)), + (stats.cramervonmises_2samp, tuple(), dict(), 2, 2, False, + lambda res: (res.statistic, res.pvalue)), + (stats.epps_singleton_2samp, tuple(), dict(), 2, 2, False, None), + (stats.bartlett, tuple(), {}, 2, 2, False, None), + (stats.tmean, tuple(), {}, 1, 1, False, lambda x: (x,)), + (stats.tvar, tuple(), {}, 1, 1, False, lambda x: (x,)), + (stats.tmin, tuple(), {}, 1, 1, False, lambda x: (x,)), + (stats.tmax, tuple(), {}, 1, 1, False, lambda x: (x,)), + (stats.tstd, tuple(), {}, 1, 1, False, lambda x: (x,)), + (stats.tsem, tuple(), {}, 1, 1, False, lambda x: (x,)), + (stats.circmean, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.circvar, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.circstd, tuple(), dict(), 1, 1, False, lambda x: (x,)), + (stats.f_oneway, tuple(), {}, 2, 2, False, None), + (stats.alexandergovern, tuple(), {}, 2, 2, False, + lambda res: (res.statistic, res.pvalue)), + (stats.combine_pvalues, tuple(), {}, 1, 2, False, None), +] + +# If the message is one of those expected, put nans in +# appropriate places of `statistics` and `pvalues` +too_small_messages = {"The input contains nan", # for nan_policy="raise" + "Degrees of freedom <= 0 for slice", + "x and y should have at least 5 elements", + "Data must be at least length 3", + "The sample must contain at least two", + "x and y must contain at least two", + "division by zero", + "Mean of empty slice", + "Data passed to ks_2samp must not be empty", + "Not enough test observations", + "Not enough other observations", + "Not enough observations.", + "At least one observation is required", + "zero-size array to reduction operation maximum", + "`x` and `y` must be of nonzero size.", + "The exact distribution of the Wilcoxon test", + "Data input must not be empty", + "Window length (0) must be positive and less", + "Window length (1) must be positive and less", + "Window length (2) must be positive and less", + "skewtest is not valid with less than", + "kurtosistest requires at least 5", + "attempt to get argmax of an empty sequence", + "No array values within given limits", + "Input sample size must be greater than one.",} + +# If the message is one of these, results of the function may be inaccurate, +# but NaNs are not to be placed +inaccuracy_messages = {"Precision loss occurred in moment calculation", + "Sample size too small for normal approximation."} + +# For some functions, nan_policy='propagate' should not just return NaNs +override_propagate_funcs = {stats.mode} + +# For some functions, empty arrays produce non-NaN results +empty_special_case_funcs = {stats.entropy} + +def _mixed_data_generator(n_samples, n_repetitions, axis, rng, + paired=False): + # generate random samples to check the response of hypothesis tests to + # samples with different (but broadcastable) shapes and various + # nan patterns (e.g. all nans, some nans, no nans) along axis-slices + + data = [] + for i in range(n_samples): + n_patterns = 6 # number of distinct nan patterns + n_obs = 20 if paired else 20 + i # observations per axis-slice + x = np.ones((n_repetitions, n_patterns, n_obs)) * np.nan + + for j in range(n_repetitions): + samples = x[j, :, :] + + # case 0: axis-slice with all nans (0 reals) + # cases 1-3: axis-slice with 1-3 reals (the rest nans) + # case 4: axis-slice with mostly (all but two) reals + # case 5: axis slice with all reals + for k, n_reals in enumerate([0, 1, 2, 3, n_obs-2, n_obs]): + # for cases 1-3, need paired nansw to be in the same place + indices = rng.permutation(n_obs)[:n_reals] + samples[k, indices] = rng.random(size=n_reals) + + # permute the axis-slices just to show that order doesn't matter + samples[:] = rng.permutation(samples, axis=0) + + # For multi-sample tests, we want to test broadcasting and check + # that nan policy works correctly for each nan pattern for each input. + # This takes care of both simultaneously. + new_shape = [n_repetitions] + [1]*n_samples + [n_obs] + new_shape[1 + i] = 6 + x = x.reshape(new_shape) + + x = np.moveaxis(x, -1, axis) + data.append(x) + return data + + +def _homogeneous_data_generator(n_samples, n_repetitions, axis, rng, + paired=False, all_nans=True): + # generate random samples to check the response of hypothesis tests to + # samples with different (but broadcastable) shapes and homogeneous + # data (all nans or all finite) + data = [] + for i in range(n_samples): + n_obs = 20 if paired else 20 + i # observations per axis-slice + shape = [n_repetitions] + [1]*n_samples + [n_obs] + shape[1 + i] = 2 + x = np.ones(shape) * np.nan if all_nans else rng.random(shape) + x = np.moveaxis(x, -1, axis) + data.append(x) + return data + + +def nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=2, + nan_policy='raise', paired=False, _no_deco=True, **kwds): + # Reference implementation for how `nan_policy` should work for 1d samples + + if nan_policy == 'raise': + for sample in data1d: + if np.any(np.isnan(sample)): + raise ValueError("The input contains nan values") + + elif (nan_policy == 'propagate' + and hypotest not in override_propagate_funcs): + # For all hypothesis tests tested, returning nans is the right thing. + # But many hypothesis tests don't propagate correctly (e.g. they treat + # np.nan the same as np.inf, which doesn't make sense when ranks are + # involved) so override that behavior here. + for sample in data1d: + if np.any(np.isnan(sample)): + return np.full(n_outputs, np.nan) + + elif nan_policy == 'omit': + # manually omit nans (or pairs in which at least one element is nan) + if not paired: + data1d = [sample[~np.isnan(sample)] for sample in data1d] + else: + nan_mask = np.isnan(data1d[0]) + for sample in data1d[1:]: + nan_mask = np.logical_or(nan_mask, np.isnan(sample)) + data1d = [sample[~nan_mask] for sample in data1d] + + return unpacker(hypotest(*data1d, *args, _no_deco=_no_deco, **kwds)) + + +@pytest.mark.filterwarnings('ignore::RuntimeWarning') +@pytest.mark.filterwarnings('ignore::UserWarning') +@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs", + "paired", "unpacker"), axis_nan_policy_cases) +@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise")) +@pytest.mark.parametrize(("axis"), (1,)) +@pytest.mark.parametrize(("data_generator"), ("mixed",)) +def test_axis_nan_policy_fast(hypotest, args, kwds, n_samples, n_outputs, + paired, unpacker, nan_policy, axis, + data_generator): + _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired, + unpacker, nan_policy, axis, data_generator) + + +@pytest.mark.slow +@pytest.mark.filterwarnings('ignore::RuntimeWarning') +@pytest.mark.filterwarnings('ignore::UserWarning') +@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs", + "paired", "unpacker"), axis_nan_policy_cases) +@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise")) +@pytest.mark.parametrize(("axis"), range(-3, 3)) +@pytest.mark.parametrize(("data_generator"), + ("all_nans", "all_finite", "mixed")) +def test_axis_nan_policy_full(hypotest, args, kwds, n_samples, n_outputs, + paired, unpacker, nan_policy, axis, + data_generator): + _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired, + unpacker, nan_policy, axis, data_generator) + + +def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired, + unpacker, nan_policy, axis, data_generator): + # Tests the 1D and vectorized behavior of hypothesis tests against a + # reference implementation (nan_policy_1d with np.ndenumerate) + + # Some hypothesis tests return a non-iterable that needs an `unpacker` to + # extract the statistic and p-value. For those that don't: + if not unpacker: + def unpacker(res): + return res + + rng = np.random.default_rng(0) + + # Generate multi-dimensional test data with all important combinations + # of patterns of nans along `axis` + n_repetitions = 3 # number of repetitions of each pattern + data_gen_kwds = {'n_samples': n_samples, 'n_repetitions': n_repetitions, + 'axis': axis, 'rng': rng, 'paired': paired} + if data_generator == 'mixed': + inherent_size = 6 # number of distinct types of patterns + data = _mixed_data_generator(**data_gen_kwds) + elif data_generator == 'all_nans': + inherent_size = 2 # hard-coded in _homogeneous_data_generator + data_gen_kwds['all_nans'] = True + data = _homogeneous_data_generator(**data_gen_kwds) + elif data_generator == 'all_finite': + inherent_size = 2 # hard-coded in _homogeneous_data_generator + data_gen_kwds['all_nans'] = False + data = _homogeneous_data_generator(**data_gen_kwds) + + output_shape = [n_repetitions] + [inherent_size]*n_samples + + # To generate reference behavior to compare against, loop over the axis- + # slices in data. Make indexing easier by moving `axis` to the end and + # broadcasting all samples to the same shape. + data_b = [np.moveaxis(sample, axis, -1) for sample in data] + data_b = [np.broadcast_to(sample, output_shape + [sample.shape[-1]]) + for sample in data_b] + statistics = np.zeros(output_shape) + pvalues = np.zeros(output_shape) + + for i, _ in np.ndenumerate(statistics): + data1d = [sample[i] for sample in data_b] + with np.errstate(divide='ignore', invalid='ignore'): + try: + res1d = nan_policy_1d(hypotest, data1d, unpacker, *args, + n_outputs=n_outputs, + nan_policy=nan_policy, + paired=paired, _no_deco=True, **kwds) + + # Eventually we'll check the results of a single, vectorized + # call of `hypotest` against the arrays `statistics` and + # `pvalues` populated using the reference `nan_policy_1d`. + # But while we're at it, check the results of a 1D call to + # `hypotest` against the reference `nan_policy_1d`. + res1db = unpacker(hypotest(*data1d, *args, + nan_policy=nan_policy, **kwds)) + assert_equal(res1db[0], res1d[0]) + if len(res1db) == 2: + assert_equal(res1db[1], res1d[1]) + + # When there is not enough data in 1D samples, many existing + # hypothesis tests raise errors instead of returning nans . + # For vectorized calls, we put nans in the corresponding elements + # of the output. + except (RuntimeWarning, UserWarning, ValueError, + ZeroDivisionError) as e: + + # whatever it is, make sure same error is raised by both + # `nan_policy_1d` and `hypotest` + with pytest.raises(type(e), match=re.escape(str(e))): + nan_policy_1d(hypotest, data1d, unpacker, *args, + n_outputs=n_outputs, nan_policy=nan_policy, + paired=paired, _no_deco=True, **kwds) + with pytest.raises(type(e), match=re.escape(str(e))): + hypotest(*data1d, *args, nan_policy=nan_policy, **kwds) + + if any([str(e).startswith(message) + for message in too_small_messages]): + res1d = np.full(n_outputs, np.nan) + elif any([str(e).startswith(message) + for message in inaccuracy_messages]): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sup.filter(UserWarning) + res1d = nan_policy_1d(hypotest, data1d, unpacker, + *args, n_outputs=n_outputs, + nan_policy=nan_policy, + paired=paired, _no_deco=True, + **kwds) + else: + raise e + statistics[i] = res1d[0] + if len(res1d) == 2: + pvalues[i] = res1d[1] + + # Perform a vectorized call to the hypothesis test. + # If `nan_policy == 'raise'`, check that it raises the appropriate error. + # If not, compare against the output against `statistics` and `pvalues` + if nan_policy == 'raise' and not data_generator == "all_finite": + message = 'The input contains nan values' + with pytest.raises(ValueError, match=message): + hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds) + + else: + with suppress_warnings() as sup, \ + np.errstate(divide='ignore', invalid='ignore'): + sup.filter(RuntimeWarning, "Precision loss occurred in moment") + sup.filter(UserWarning, "Sample size too small for normal " + "approximation.") + res = unpacker(hypotest(*data, axis=axis, nan_policy=nan_policy, + *args, **kwds)) + assert_allclose(res[0], statistics, rtol=1e-15) + assert_equal(res[0].dtype, statistics.dtype) + + if len(res) == 2: + assert_allclose(res[1], pvalues, rtol=1e-15) + assert_equal(res[1].dtype, pvalues.dtype) + + +@pytest.mark.filterwarnings('ignore::RuntimeWarning') +@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs", + "paired", "unpacker"), axis_nan_policy_cases) +@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise")) +@pytest.mark.parametrize(("data_generator"), + ("all_nans", "all_finite", "mixed", "empty")) +def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples, + n_outputs, paired, unpacker, nan_policy, + data_generator): + # check for correct behavior when `axis=None` + + if not unpacker: + def unpacker(res): + return res + + rng = np.random.default_rng(0) + + if data_generator == "empty": + data = [rng.random((2, 0)) for i in range(n_samples)] + else: + data = [rng.random((2, 20)) for i in range(n_samples)] + + if data_generator == "mixed": + masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)] + for sample, mask in zip(data, masks): + sample[mask] = np.nan + elif data_generator == "all_nans": + data = [sample * np.nan for sample in data] + + data_raveled = [sample.ravel() for sample in data] + + if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}: + message = 'The input contains nan values' + + # check for correct behavior whether or not data is 1d to begin with + with pytest.raises(ValueError, match=message): + hypotest(*data, axis=None, nan_policy=nan_policy, + *args, **kwds) + with pytest.raises(ValueError, match=message): + hypotest(*data_raveled, axis=None, nan_policy=nan_policy, + *args, **kwds) + + else: + # behavior of reference implementation with 1d input, hypotest with 1d + # input, and hypotest with Nd input should match, whether that means + # that outputs are equal or they raise the same exception + + ea_str, eb_str, ec_str = None, None, None + with np.errstate(divide='ignore', invalid='ignore'): + try: + res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args, + n_outputs=n_outputs, + nan_policy=nan_policy, paired=paired, + _no_deco=True, **kwds) + except (RuntimeWarning, ValueError, ZeroDivisionError) as ea: + ea_str = str(ea) + + try: + res1db = unpacker(hypotest(*data_raveled, *args, + nan_policy=nan_policy, **kwds)) + except (RuntimeWarning, ValueError, ZeroDivisionError) as eb: + eb_str = str(eb) + + try: + res1dc = unpacker(hypotest(*data, *args, axis=None, + nan_policy=nan_policy, **kwds)) + except (RuntimeWarning, ValueError, ZeroDivisionError) as ec: + ec_str = str(ec) + + if ea_str or eb_str or ec_str: + assert any([str(ea_str).startswith(message) + for message in too_small_messages]) + assert ea_str == eb_str == ec_str + else: + assert_equal(res1db, res1da) + assert_equal(res1dc, res1da) + for item in list(res1da) + list(res1db) + list(res1dc): + # Most functions naturally return NumPy numbers, which + # are drop-in replacements for the Python versions but with + # desirable attributes. Make sure this is consistent. + assert np.issubdtype(item.dtype, np.number) + +# Test keepdims for: +# - single-output and multi-output functions (gmean and mannwhitneyu) +# - Axis negative, positive, None, and tuple +# - 1D with no NaNs +# - 1D with NaN propagation +# - Zero-sized output +@pytest.mark.parametrize("nan_policy", ("omit", "propagate")) +@pytest.mark.parametrize( + ("hypotest", "args", "kwds", "n_samples", "unpacker"), + ((stats.gmean, tuple(), dict(), 1, lambda x: (x,)), + (stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, None)) +) +@pytest.mark.parametrize( + ("sample_shape", "axis_cases"), + (((2, 3, 3, 4), (None, 0, -1, (0, 2), (1, -1), (3, 1, 2, 0))), + ((10, ), (0, -1)), + ((20, 0), (0, 1))) +) +def test_keepdims(hypotest, args, kwds, n_samples, unpacker, + sample_shape, axis_cases, nan_policy): + # test if keepdims parameter works correctly + if not unpacker: + def unpacker(res): + return res + rng = np.random.default_rng(0) + data = [rng.random(sample_shape) for _ in range(n_samples)] + nan_data = [sample.copy() for sample in data] + nan_mask = [rng.random(sample_shape) < 0.2 for _ in range(n_samples)] + for sample, mask in zip(nan_data, nan_mask): + sample[mask] = np.nan + for axis in axis_cases: + expected_shape = list(sample_shape) + if axis is None: + expected_shape = np.ones(len(sample_shape)) + else: + if isinstance(axis, int): + expected_shape[axis] = 1 + else: + for ax in axis: + expected_shape[ax] = 1 + expected_shape = tuple(expected_shape) + res = unpacker(hypotest(*data, *args, axis=axis, keepdims=True, + **kwds)) + res_base = unpacker(hypotest(*data, *args, axis=axis, keepdims=False, + **kwds)) + nan_res = unpacker(hypotest(*nan_data, *args, axis=axis, + keepdims=True, nan_policy=nan_policy, + **kwds)) + nan_res_base = unpacker(hypotest(*nan_data, *args, axis=axis, + keepdims=False, + nan_policy=nan_policy, **kwds)) + for r, r_base, rn, rn_base in zip(res, res_base, nan_res, + nan_res_base): + assert r.shape == expected_shape + r = np.squeeze(r, axis=axis) + assert_equal(r, r_base) + assert rn.shape == expected_shape + rn = np.squeeze(rn, axis=axis) + assert_equal(rn, rn_base) + + +@pytest.mark.parametrize(("fun", "nsamp"), + [(stats.kstat, 1), + (stats.kstatvar, 1)]) +def test_hypotest_back_compat_no_axis(fun, nsamp): + m, n = 8, 9 + + rng = np.random.default_rng(0) + x = rng.random((nsamp, m, n)) + res = fun(*x) + res2 = fun(*x, _no_deco=True) + res3 = fun([xi.ravel() for xi in x]) + assert_equal(res, res2) + assert_equal(res, res3) + + +@pytest.mark.parametrize(("axis"), (0, 1, 2)) +def test_axis_nan_policy_decorated_positional_axis(axis): + # Test for correct behavior of function decorated with + # _axis_nan_policy_decorator whether `axis` is provided as positional or + # keyword argument + + shape = (8, 9, 10) + rng = np.random.default_rng(0) + x = rng.random(shape) + y = rng.random(shape) + res1 = stats.mannwhitneyu(x, y, True, 'two-sided', axis) + res2 = stats.mannwhitneyu(x, y, True, 'two-sided', axis=axis) + assert_equal(res1, res2) + + message = "mannwhitneyu() got multiple values for argument 'axis'" + with pytest.raises(TypeError, match=re.escape(message)): + stats.mannwhitneyu(x, y, True, 'two-sided', axis, axis=axis) + + +def test_axis_nan_policy_decorated_positional_args(): + # Test for correct behavior of function decorated with + # _axis_nan_policy_decorator when function accepts *args + + shape = (3, 8, 9, 10) + rng = np.random.default_rng(0) + x = rng.random(shape) + x[0, 0, 0, 0] = np.nan + stats.kruskal(*x) + + message = "kruskal() got an unexpected keyword argument 'samples'" + with pytest.raises(TypeError, match=re.escape(message)): + stats.kruskal(samples=x) + + with pytest.raises(TypeError, match=re.escape(message)): + stats.kruskal(*x, samples=x) + + +def test_axis_nan_policy_decorated_keyword_samples(): + # Test for correct behavior of function decorated with + # _axis_nan_policy_decorator whether samples are provided as positional or + # keyword arguments + + shape = (2, 8, 9, 10) + rng = np.random.default_rng(0) + x = rng.random(shape) + x[0, 0, 0, 0] = np.nan + res1 = stats.mannwhitneyu(*x) + res2 = stats.mannwhitneyu(x=x[0], y=x[1]) + assert_equal(res1, res2) + + message = "mannwhitneyu() got multiple values for argument" + with pytest.raises(TypeError, match=re.escape(message)): + stats.mannwhitneyu(*x, x=x[0], y=x[1]) + + +@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs", + "paired", "unpacker"), axis_nan_policy_cases) +def test_axis_nan_policy_decorated_pickled(hypotest, args, kwds, n_samples, + n_outputs, paired, unpacker): + if "ttest_ci" in hypotest.__name__: + pytest.skip("Can't pickle functions defined within functions.") + + rng = np.random.default_rng(0) + + # Some hypothesis tests return a non-iterable that needs an `unpacker` to + # extract the statistic and p-value. For those that don't: + if not unpacker: + def unpacker(res): + return res + + data = rng.uniform(size=(n_samples, 2, 30)) + pickled_hypotest = pickle.dumps(hypotest) + unpickled_hypotest = pickle.loads(pickled_hypotest) + res1 = unpacker(hypotest(*data, *args, axis=-1, **kwds)) + res2 = unpacker(unpickled_hypotest(*data, *args, axis=-1, **kwds)) + assert_allclose(res1, res2, rtol=1e-12) + + +def test_check_empty_inputs(): + # Test that _check_empty_inputs is doing its job, at least for single- + # sample inputs. (Multi-sample functionality is tested below.) + # If the input sample is not empty, it should return None. + # If the input sample is empty, it should return an array of NaNs or an + # empty array of appropriate shape. np.mean is used as a reference for the + # output because, like the statistics calculated by these functions, + # it works along and "consumes" `axis` but preserves the other axes. + for i in range(5): + for combo in combinations_with_replacement([0, 1, 2], i): + for axis in range(len(combo)): + samples = (np.zeros(combo),) + output = stats._axis_nan_policy._check_empty_inputs(samples, + axis) + if output is not None: + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice.") + sup.filter(RuntimeWarning, "invalid value encountered") + reference = samples[0].mean(axis=axis) + np.testing.assert_equal(output, reference) + + +def _check_arrays_broadcastable(arrays, axis): + # https://numpy.org/doc/stable/user/basics.broadcasting.html + # "When operating on two arrays, NumPy compares their shapes element-wise. + # It starts with the trailing (i.e. rightmost) dimensions and works its + # way left. + # Two dimensions are compatible when + # 1. they are equal, or + # 2. one of them is 1 + # ... + # Arrays do not need to have the same number of dimensions." + # (Clarification: if the arrays are compatible according to the criteria + # above and an array runs out of dimensions, it is still compatible.) + # Below, we follow the rules above except ignoring `axis` + + n_dims = max([arr.ndim for arr in arrays]) + if axis is not None: + # convert to negative axis + axis = (-n_dims + axis) if axis >= 0 else axis + + for dim in range(1, n_dims+1): # we'll index from -1 to -n_dims, inclusive + if -dim == axis: + continue # ignore lengths along `axis` + + dim_lengths = set() + for arr in arrays: + if dim <= arr.ndim and arr.shape[-dim] != 1: + dim_lengths.add(arr.shape[-dim]) + + if len(dim_lengths) > 1: + return False + return True + + +@pytest.mark.slow +@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs", + "paired", "unpacker"), axis_nan_policy_cases) +def test_empty(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker): + # test for correct output shape when at least one input is empty + + if hypotest in override_propagate_funcs: + reason = "Doesn't follow the usual pattern. Tested separately." + pytest.skip(reason=reason) + + if unpacker is None: + unpacker = lambda res: (res[0], res[1]) # noqa: E731 + + def small_data_generator(n_samples, n_dims): + + def small_sample_generator(n_dims): + # return all possible "small" arrays in up to n_dim dimensions + for i in n_dims: + # "small" means with size along dimension either 0 or 1 + for combo in combinations_with_replacement([0, 1, 2], i): + yield np.zeros(combo) + + # yield all possible combinations of small samples + gens = [small_sample_generator(n_dims) for i in range(n_samples)] + yield from product(*gens) + + n_dims = [2, 3] + for samples in small_data_generator(n_samples, n_dims): + + # this test is only for arrays of zero size + if not any(sample.size == 0 for sample in samples): + continue + + max_axis = max(sample.ndim for sample in samples) + + # need to test for all valid values of `axis` parameter, too + for axis in range(-max_axis, max_axis): + + try: + # After broadcasting, all arrays are the same shape, so + # the shape of the output should be the same as a single- + # sample statistic. Use np.mean as a reference. + concat = stats._stats_py._broadcast_concatenate(samples, axis) + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice.") + sup.filter(RuntimeWarning, "invalid value encountered") + expected = np.mean(concat, axis=axis) * np.nan + + if hypotest in empty_special_case_funcs: + empty_val = hypotest(*([[]]*len(samples)), *args, **kwds) + mask = np.isnan(expected) + expected[mask] = empty_val + + with np.testing.suppress_warnings() as sup: + # generated by f_oneway for too_small inputs + sup.filter(stats.DegenerateDataWarning) + res = hypotest(*samples, *args, axis=axis, **kwds) + res = unpacker(res) + + for i in range(n_outputs): + assert_equal(res[i], expected) + + except ValueError: + # confirm that the arrays truly are not broadcastable + assert not _check_arrays_broadcastable(samples, + None if paired else axis) + + # confirm that _both_ `_broadcast_concatenate` and `hypotest` + # produce this information. + message = "Array shapes are incompatible for broadcasting." + with pytest.raises(ValueError, match=message): + stats._stats_py._broadcast_concatenate(samples, axis, paired) + with pytest.raises(ValueError, match=message): + hypotest(*samples, *args, axis=axis, **kwds) + + +def test_masked_array_2_sentinel_array(): + # prepare arrays + np.random.seed(0) + A = np.random.rand(10, 11, 12) + B = np.random.rand(12) + mask = A < 0.5 + A = np.ma.masked_array(A, mask) + + # set arbitrary elements to special values + # (these values might have been considered for use as sentinel values) + max_float = np.finfo(np.float64).max + max_float2 = np.nextafter(max_float, -np.inf) + max_float3 = np.nextafter(max_float2, -np.inf) + A[3, 4, 1] = np.nan + A[4, 5, 2] = np.inf + A[5, 6, 3] = max_float + B[8] = np.nan + B[7] = np.inf + B[6] = max_float2 + + # convert masked A to array with sentinel value, don't modify B + out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([A, B]) + A_out, B_out = out_arrays + + # check that good sentinel value was chosen (according to intended logic) + assert (sentinel != max_float) and (sentinel != max_float2) + assert sentinel == max_float3 + + # check that output arrays are as intended + A_reference = A.data + A_reference[A.mask] = sentinel + np.testing.assert_array_equal(A_out, A_reference) + assert B_out is B + + +def test_masked_dtype(): + # When _masked_arrays_2_sentinel_arrays was first added, it always + # upcast the arrays to np.float64. After gh16662, check expected promotion + # and that the expected sentinel is found. + + # these are important because the max of the promoted dtype is the first + # candidate to be the sentinel value + max16 = np.iinfo(np.int16).max + max128c = np.finfo(np.complex128).max + + # a is a regular array, b has masked elements, and c has no masked elements + a = np.array([1, 2, max16], dtype=np.int16) + b = np.ma.array([1, 2, 1], dtype=np.int8, mask=[0, 1, 0]) + c = np.ma.array([1, 2, 1], dtype=np.complex128, mask=[0, 0, 0]) + + # check integer masked -> sentinel conversion + out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a, b]) + a_out, b_out = out_arrays + assert sentinel == max16-1 # not max16 because max16 was in the data + assert b_out.dtype == np.int16 # check expected promotion + assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement + assert a_out is a # not a masked array, so left untouched + assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array + + # similarly with complex + out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([b, c]) + b_out, c_out = out_arrays + assert sentinel == max128c # max128c was not in the data + assert b_out.dtype == np.complex128 # b got promoted + assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement + assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array + assert not isinstance(c_out, np.ma.MaskedArray) # c became regular array + + # Also, check edge case when a sentinel value cannot be found in the data + min8, max8 = np.iinfo(np.int8).min, np.iinfo(np.int8).max + a = np.arange(min8, max8+1, dtype=np.int8) # use all possible values + mask1 = np.zeros_like(a, dtype=bool) + mask0 = np.zeros_like(a, dtype=bool) + + # a masked value can be used as the sentinel + mask1[1] = True + a1 = np.ma.array(a, mask=mask1) + out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a1]) + assert sentinel == min8+1 + + # unless it's the smallest possible; skipped for simiplicity (see code) + mask0[0] = True + a0 = np.ma.array(a, mask=mask0) + message = "This function replaces masked elements with sentinel..." + with pytest.raises(ValueError, match=message): + _masked_arrays_2_sentinel_arrays([a0]) + + # test that dtype is preserved in functions + a = np.ma.array([1, 2, 3], mask=[0, 1, 0], dtype=np.float32) + assert stats.gmean(a).dtype == np.float32 + + +def test_masked_stat_1d(): + # basic test of _axis_nan_policy_factory with 1D masked sample + males = [19, 22, 16, 29, 24] + females = [20, 11, 17, 12] + res = stats.mannwhitneyu(males, females) + + # same result when extra nan is omitted + females2 = [20, 11, 17, np.nan, 12] + res2 = stats.mannwhitneyu(males, females2, nan_policy='omit') + np.testing.assert_array_equal(res2, res) + + # same result when extra element is masked + females3 = [20, 11, 17, 1000, 12] + mask3 = [False, False, False, True, False] + females3 = np.ma.masked_array(females3, mask=mask3) + res3 = stats.mannwhitneyu(males, females3) + np.testing.assert_array_equal(res3, res) + + # same result when extra nan is omitted and additional element is masked + females4 = [20, 11, 17, np.nan, 1000, 12] + mask4 = [False, False, False, False, True, False] + females4 = np.ma.masked_array(females4, mask=mask4) + res4 = stats.mannwhitneyu(males, females4, nan_policy='omit') + np.testing.assert_array_equal(res4, res) + + # same result when extra elements, including nan, are masked + females5 = [20, 11, 17, np.nan, 1000, 12] + mask5 = [False, False, False, True, True, False] + females5 = np.ma.masked_array(females5, mask=mask5) + res5 = stats.mannwhitneyu(males, females5, nan_policy='propagate') + res6 = stats.mannwhitneyu(males, females5, nan_policy='raise') + np.testing.assert_array_equal(res5, res) + np.testing.assert_array_equal(res6, res) + + +@pytest.mark.parametrize(("axis"), range(-3, 3)) +def test_masked_stat_3d(axis): + # basic test of _axis_nan_policy_factory with 3D masked sample + np.random.seed(0) + a = np.random.rand(3, 4, 5) + b = np.random.rand(4, 5) + c = np.random.rand(4, 1) + + mask_a = a < 0.1 + mask_c = [False, False, False, True] + a_masked = np.ma.masked_array(a, mask=mask_a) + c_masked = np.ma.masked_array(c, mask=mask_c) + + a_nans = a.copy() + a_nans[mask_a] = np.nan + c_nans = c.copy() + c_nans[mask_c] = np.nan + + res = stats.kruskal(a_nans, b, c_nans, nan_policy='omit', axis=axis) + res2 = stats.kruskal(a_masked, b, c_masked, axis=axis) + np.testing.assert_array_equal(res, res2) + + +def test_mixed_mask_nan_1(): + # targeted test of _axis_nan_policy_factory with 2D masked sample: + # omitting samples with masks and nan_policy='omit' are equivalent + # also checks paired-sample sentinel value removal + m, n = 3, 20 + axis = -1 + + np.random.seed(0) + a = np.random.rand(m, n) + b = np.random.rand(m, n) + mask_a1 = np.random.rand(m, n) < 0.2 + mask_a2 = np.random.rand(m, n) < 0.1 + mask_b1 = np.random.rand(m, n) < 0.15 + mask_b2 = np.random.rand(m, n) < 0.15 + mask_a1[2, :] = True + + a_nans = a.copy() + b_nans = b.copy() + a_nans[mask_a1 | mask_a2] = np.nan + b_nans[mask_b1 | mask_b2] = np.nan + + a_masked1 = np.ma.masked_array(a, mask=mask_a1) + b_masked1 = np.ma.masked_array(b, mask=mask_b1) + a_masked1[mask_a2] = np.nan + b_masked1[mask_b2] = np.nan + + a_masked2 = np.ma.masked_array(a, mask=mask_a2) + b_masked2 = np.ma.masked_array(b, mask=mask_b2) + a_masked2[mask_a1] = np.nan + b_masked2[mask_b1] = np.nan + + a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2)) + b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2)) + + res = stats.wilcoxon(a_nans, b_nans, nan_policy='omit', axis=axis) + res1 = stats.wilcoxon(a_masked1, b_masked1, nan_policy='omit', axis=axis) + res2 = stats.wilcoxon(a_masked2, b_masked2, nan_policy='omit', axis=axis) + res3 = stats.wilcoxon(a_masked3, b_masked3, nan_policy='raise', axis=axis) + res4 = stats.wilcoxon(a_masked3, b_masked3, + nan_policy='propagate', axis=axis) + + np.testing.assert_array_equal(res1, res) + np.testing.assert_array_equal(res2, res) + np.testing.assert_array_equal(res3, res) + np.testing.assert_array_equal(res4, res) + + +def test_mixed_mask_nan_2(): + # targeted test of _axis_nan_policy_factory with 2D masked sample: + # check for expected interaction between masks and nans + + # Cases here are + # [mixed nan/mask, all nans, all masked, + # unmasked nan, masked nan, unmasked non-nan] + a = [[1, np.nan, 2], [np.nan, np.nan, np.nan], [1, 2, 3], + [1, np.nan, 3], [1, np.nan, 3], [1, 2, 3]] + mask = [[1, 0, 1], [0, 0, 0], [1, 1, 1], + [0, 0, 0], [0, 1, 0], [0, 0, 0]] + a_masked = np.ma.masked_array(a, mask=mask) + b = [[4, 5, 6]] + ref1 = stats.ranksums([1, 3], [4, 5, 6]) + ref2 = stats.ranksums([1, 2, 3], [4, 5, 6]) + + # nan_policy = 'omit' + # all elements are removed from first three rows + # middle element is removed from fourth and fifth rows + # no elements removed from last row + res = stats.ranksums(a_masked, b, nan_policy='omit', axis=-1) + stat_ref = [np.nan, np.nan, np.nan, + ref1.statistic, ref1.statistic, ref2.statistic] + p_ref = [np.nan, np.nan, np.nan, + ref1.pvalue, ref1.pvalue, ref2.pvalue] + np.testing.assert_array_equal(res.statistic, stat_ref) + np.testing.assert_array_equal(res.pvalue, p_ref) + + # nan_policy = 'propagate' + # nans propagate in first, second, and fourth row + # all elements are removed by mask from third row + # middle element is removed from fifth row + # no elements removed from last row + res = stats.ranksums(a_masked, b, nan_policy='propagate', axis=-1) + stat_ref = [np.nan, np.nan, np.nan, + np.nan, ref1.statistic, ref2.statistic] + p_ref = [np.nan, np.nan, np.nan, + np.nan, ref1.pvalue, ref2.pvalue] + np.testing.assert_array_equal(res.statistic, stat_ref) + np.testing.assert_array_equal(res.pvalue, p_ref) + + +def test_axis_None_vs_tuple(): + # `axis` `None` should be equivalent to tuple with all axes + shape = (3, 8, 9, 10) + rng = np.random.default_rng(0) + x = rng.random(shape) + res = stats.kruskal(*x, axis=None) + res2 = stats.kruskal(*x, axis=(0, 1, 2)) + np.testing.assert_array_equal(res, res2) + + +def test_axis_None_vs_tuple_with_broadcasting(): + # `axis` `None` should be equivalent to tuple with all axes, + # which should be equivalent to raveling the arrays before passing them + rng = np.random.default_rng(0) + x = rng.random((5, 1)) + y = rng.random((1, 5)) + x2, y2 = np.broadcast_arrays(x, y) + + res0 = stats.mannwhitneyu(x.ravel(), y.ravel()) + res1 = stats.mannwhitneyu(x, y, axis=None) + res2 = stats.mannwhitneyu(x, y, axis=(0, 1)) + res3 = stats.mannwhitneyu(x2.ravel(), y2.ravel()) + + assert res1 == res0 + assert res2 == res0 + assert res3 != res0 + + +@pytest.mark.parametrize(("axis"), + list(permutations(range(-3, 3), 2)) + [(-4, 1)]) +def test_other_axis_tuples(axis): + # Check that _axis_nan_policy_factory treats all `axis` tuples as expected + rng = np.random.default_rng(0) + shape_x = (4, 5, 6) + shape_y = (1, 6) + x = rng.random(shape_x) + y = rng.random(shape_y) + axis_original = axis + + # convert axis elements to positive + axis = tuple([(i if i >= 0 else 3 + i) for i in axis]) + axis = sorted(axis) + + if len(set(axis)) != len(axis): + message = "`axis` must contain only distinct elements" + with pytest.raises(AxisError, match=re.escape(message)): + stats.mannwhitneyu(x, y, axis=axis_original) + return + + if axis[0] < 0 or axis[-1] > 2: + message = "`axis` is out of bounds for array of dimension 3" + with pytest.raises(AxisError, match=re.escape(message)): + stats.mannwhitneyu(x, y, axis=axis_original) + return + + res = stats.mannwhitneyu(x, y, axis=axis_original) + + # reference behavior + not_axis = {0, 1, 2} - set(axis) # which axis is not part of `axis` + not_axis = next(iter(not_axis)) # take it out of the set + + x2 = x + shape_y_broadcasted = [1, 1, 6] + shape_y_broadcasted[not_axis] = shape_x[not_axis] + y2 = np.broadcast_to(y, shape_y_broadcasted) + + m = x2.shape[not_axis] + x2 = np.moveaxis(x2, axis, (1, 2)) + y2 = np.moveaxis(y2, axis, (1, 2)) + x2 = np.reshape(x2, (m, -1)) + y2 = np.reshape(y2, (m, -1)) + res2 = stats.mannwhitneyu(x2, y2, axis=1) + + np.testing.assert_array_equal(res, res2) + + +@pytest.mark.parametrize( + ("weighted_fun_name, unpacker"), + [ + ("gmean", lambda x: x), + ("hmean", lambda x: x), + ("pmean", lambda x: x), + ("combine_pvalues", lambda x: (x.pvalue, x.statistic)), + ], +) +def test_mean_mixed_mask_nan_weights(weighted_fun_name, unpacker): + # targeted test of _axis_nan_policy_factory with 2D masked sample: + # omitting samples with masks and nan_policy='omit' are equivalent + # also checks paired-sample sentinel value removal + + if weighted_fun_name == 'pmean': + def weighted_fun(a, **kwargs): + return stats.pmean(a, p=0.42, **kwargs) + else: + weighted_fun = getattr(stats, weighted_fun_name) + + def func(*args, **kwargs): + return unpacker(weighted_fun(*args, **kwargs)) + + m, n = 3, 20 + axis = -1 + + rng = np.random.default_rng(6541968121) + a = rng.uniform(size=(m, n)) + b = rng.uniform(size=(m, n)) + mask_a1 = rng.uniform(size=(m, n)) < 0.2 + mask_a2 = rng.uniform(size=(m, n)) < 0.1 + mask_b1 = rng.uniform(size=(m, n)) < 0.15 + mask_b2 = rng.uniform(size=(m, n)) < 0.15 + mask_a1[2, :] = True + + a_nans = a.copy() + b_nans = b.copy() + a_nans[mask_a1 | mask_a2] = np.nan + b_nans[mask_b1 | mask_b2] = np.nan + + a_masked1 = np.ma.masked_array(a, mask=mask_a1) + b_masked1 = np.ma.masked_array(b, mask=mask_b1) + a_masked1[mask_a2] = np.nan + b_masked1[mask_b2] = np.nan + + a_masked2 = np.ma.masked_array(a, mask=mask_a2) + b_masked2 = np.ma.masked_array(b, mask=mask_b2) + a_masked2[mask_a1] = np.nan + b_masked2[mask_b1] = np.nan + + a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2)) + b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2)) + + mask_all = (mask_a1 | mask_a2 | mask_b1 | mask_b2) + a_masked4 = np.ma.masked_array(a, mask=mask_all) + b_masked4 = np.ma.masked_array(b, mask=mask_all) + + with np.testing.suppress_warnings() as sup: + message = 'invalid value encountered' + sup.filter(RuntimeWarning, message) + res = func(a_nans, weights=b_nans, nan_policy="omit", axis=axis) + res1 = func(a_masked1, weights=b_masked1, nan_policy="omit", axis=axis) + res2 = func(a_masked2, weights=b_masked2, nan_policy="omit", axis=axis) + res3 = func(a_masked3, weights=b_masked3, nan_policy="raise", axis=axis) + res4 = func(a_masked3, weights=b_masked3, nan_policy="propagate", axis=axis) + # Would test with a_masked3/b_masked3, but there is a bug in np.average + # that causes a bug in _no_deco mean with masked weights. Would use + # np.ma.average, but that causes other problems. See numpy/numpy#7330. + if weighted_fun_name in {"hmean"}: + weighted_fun_ma = getattr(stats.mstats, weighted_fun_name) + res5 = weighted_fun_ma(a_masked4, weights=b_masked4, + axis=axis, _no_deco=True) + + np.testing.assert_array_equal(res1, res) + np.testing.assert_array_equal(res2, res) + np.testing.assert_array_equal(res3, res) + np.testing.assert_array_equal(res4, res) + if weighted_fun_name in {"hmean"}: + # _no_deco mean returns masked array, last element was masked + np.testing.assert_allclose(res5.compressed(), res[~np.isnan(res)]) + + +def test_raise_invalid_args_g17713(): + # other cases are handled in: + # test_axis_nan_policy_decorated_positional_axis - multiple values for arg + # test_axis_nan_policy_decorated_positional_args - unexpected kwd arg + message = "got an unexpected keyword argument" + with pytest.raises(TypeError, match=message): + stats.gmean([1, 2, 3], invalid_arg=True) + + message = " got multiple values for argument" + with pytest.raises(TypeError, match=message): + stats.gmean([1, 2, 3], a=True) + + message = "missing 1 required positional argument" + with pytest.raises(TypeError, match=message): + stats.gmean() + + message = "takes from 1 to 4 positional arguments but 5 were given" + with pytest.raises(TypeError, match=message): + stats.gmean([1, 2, 3], 0, float, [1, 1, 1], 10) + + +@pytest.mark.parametrize('dtype', [np.int16, np.float32, np.complex128]) +def test_array_like_input(dtype): + # Check that `_axis_nan_policy`-decorated functions work with custom + # containers that are coercible to numeric arrays + + class ArrLike: + def __init__(self, x, dtype): + self._x = x + self._dtype = dtype + + def __array__(self, dtype=None, copy=None): + return np.asarray(x, dtype=self._dtype) + + x = [1]*2 + [3, 4, 5] + res = stats.mode(ArrLike(x, dtype=dtype)) + assert res.mode == 1 + assert res.count == 2 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_binned_statistic.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_binned_statistic.py new file mode 100644 index 0000000000000000000000000000000000000000..932df07f2e489c137b378841fd749272ae0bcc89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_binned_statistic.py @@ -0,0 +1,568 @@ +import numpy as np +from numpy.testing import assert_allclose +import pytest +from pytest import raises as assert_raises +from scipy.stats import (binned_statistic, binned_statistic_2d, + binned_statistic_dd) +from scipy._lib._util import check_random_state + +from .common_tests import check_named_results + + +class TestBinnedStatistic: + + @classmethod + def setup_class(cls): + rng = check_random_state(9865) + cls.x = rng.uniform(size=100) + cls.y = rng.uniform(size=100) + cls.v = rng.uniform(size=100) + cls.X = rng.uniform(size=(100, 3)) + cls.w = rng.uniform(size=100) + cls.u = rng.uniform(size=100) + 1e6 + + def test_1d_count(self): + x = self.x + v = self.v + + count1, edges1, bc = binned_statistic(x, v, 'count', bins=10) + count2, edges2 = np.histogram(x, bins=10) + + assert_allclose(count1, count2) + assert_allclose(edges1, edges2) + + def test_gh5927(self): + # smoke test for gh5927 - binned_statistic was using `is` for string + # comparison + x = self.x + v = self.v + statistics = ['mean', 'median', 'count', 'sum'] + for statistic in statistics: + binned_statistic(x, v, statistic, bins=10) + + def test_big_number_std(self): + # tests for numerical stability of std calculation + # see issue gh-10126 for more + x = self.x + u = self.u + stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10) + stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10) + + assert_allclose(stat1, stat2) + + def test_empty_bins_std(self): + # tests that std returns gives nan for empty bins + x = self.x + u = self.u + print(binned_statistic(x, u, 'count', bins=1000)) + stat1, edges1, bc = binned_statistic(x, u, 'std', bins=1000) + stat2, edges2, bc = binned_statistic(x, u, np.std, bins=1000) + + assert_allclose(stat1, stat2) + + def test_non_finite_inputs_and_int_bins(self): + # if either `values` or `sample` contain np.inf or np.nan throw + # see issue gh-9010 for more + x = self.x + u = self.u + orig = u[0] + u[0] = np.inf + assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10) + # need to test for non-python specific ints, e.g. np.int8, np.int64 + assert_raises(ValueError, binned_statistic, u, x, 'std', + bins=np.int64(10)) + u[0] = np.nan + assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10) + # replace original value, u belongs the class + u[0] = orig + + def test_1d_result_attributes(self): + x = self.x + v = self.v + + res = binned_statistic(x, v, 'count', bins=10) + attributes = ('statistic', 'bin_edges', 'binnumber') + check_named_results(res, attributes) + + def test_1d_sum(self): + x = self.x + v = self.v + + sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10) + sum2, edges2 = np.histogram(x, bins=10, weights=v) + + assert_allclose(sum1, sum2) + assert_allclose(edges1, edges2) + + def test_1d_mean(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_std(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_min(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_max(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_median(self): + x = self.x + v = self.v + + stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10) + stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_1d_bincode(self): + x = self.x[:20] + v = self.v[:20] + + count1, edges1, bc = binned_statistic(x, v, 'count', bins=3) + bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1, + 1, 2, 1]) + + bcount = [(bc == i).sum() for i in np.unique(bc)] + + assert_allclose(bc, bc2) + assert_allclose(bcount, count1) + + def test_1d_range_keyword(self): + # Regression test for gh-3063, range can be (min, max) or [(min, max)] + np.random.seed(9865) + x = np.arange(30) + data = np.random.random(30) + + mean, bins, _ = binned_statistic(x[:15], data[:15]) + mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)]) + mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14)) + + assert_allclose(mean, mean_range) + assert_allclose(bins, bins_range) + assert_allclose(mean, mean_range2) + assert_allclose(bins, bins_range2) + + def test_1d_multi_values(self): + x = self.x + v = self.v + w = self.w + + stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10) + stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10) + stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10) + + assert_allclose(stat2[0], stat1v) + assert_allclose(stat2[1], stat1w) + assert_allclose(edges1v, edges2) + assert_allclose(bc1v, bc2) + + def test_2d_count(self): + x = self.x + y = self.y + v = self.v + + count1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'count', bins=5) + count2, binx2, biny2 = np.histogram2d(x, y, bins=5) + + assert_allclose(count1, count2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_result_attributes(self): + x = self.x + y = self.y + v = self.v + + res = binned_statistic_2d(x, y, v, 'count', bins=5) + attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber') + check_named_results(res, attributes) + + def test_2d_sum(self): + x = self.x + y = self.y + v = self.v + + sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5) + sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v) + + assert_allclose(sum1, sum2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_mean(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_mean_unicode(self): + x = self.x + y = self.y + v = self.v + stat1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'mean', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5) + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_std(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_min(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_max(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_median(self): + x = self.x + y = self.y + v = self.v + + stat1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'median', bins=5) + stat2, binx2, biny2, bc = binned_statistic_2d( + x, y, v, np.median, bins=5) + + assert_allclose(stat1, stat2) + assert_allclose(binx1, binx2) + assert_allclose(biny1, biny2) + + def test_2d_bincode(self): + x = self.x[:20] + y = self.y[:20] + v = self.v[:20] + + count1, binx1, biny1, bc = binned_statistic_2d( + x, y, v, 'count', bins=3) + bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16, + 6, 11, 16, 6, 6, 11, 8]) + + bcount = [(bc == i).sum() for i in np.unique(bc)] + + assert_allclose(bc, bc2) + count1adj = count1[count1.nonzero()] + assert_allclose(bcount, count1adj) + + def test_2d_multi_values(self): + x = self.x + y = self.y + v = self.v + w = self.w + + stat1v, binx1v, biny1v, bc1v = binned_statistic_2d( + x, y, v, 'mean', bins=8) + stat1w, binx1w, biny1w, bc1w = binned_statistic_2d( + x, y, w, 'mean', bins=8) + stat2, binx2, biny2, bc2 = binned_statistic_2d( + x, y, [v, w], 'mean', bins=8) + + assert_allclose(stat2[0], stat1v) + assert_allclose(stat2[1], stat1w) + assert_allclose(binx1v, binx2) + assert_allclose(biny1w, biny2) + assert_allclose(bc1v, bc2) + + def test_2d_binnumbers_unraveled(self): + x = self.x + y = self.y + v = self.v + + stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20) + stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10) + + stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d( + x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True) + + bcx3 = np.searchsorted(edgesx, x, side='right') + bcy3 = np.searchsorted(edgesy, y, side='right') + + # `numpy.searchsorted` is non-inclusive on right-edge, compensate + bcx3[x == x.max()] -= 1 + bcy3[y == y.max()] -= 1 + + assert_allclose(bcx, bc2[0]) + assert_allclose(bcy, bc2[1]) + assert_allclose(bcx3, bc2[0]) + assert_allclose(bcy3, bc2[1]) + + def test_dd_count(self): + X = self.X + v = self.v + + count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3) + count2, edges2 = np.histogramdd(X, bins=3) + + assert_allclose(count1, count2) + assert_allclose(edges1, edges2) + + def test_dd_result_attributes(self): + X = self.X + v = self.v + + res = binned_statistic_dd(X, v, 'count', bins=3) + attributes = ('statistic', 'bin_edges', 'binnumber') + check_named_results(res, attributes) + + def test_dd_sum(self): + X = self.X + v = self.v + + sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3) + sum2, edges2 = np.histogramdd(X, bins=3, weights=v) + sum3, edges3, bc = binned_statistic_dd(X, v, np.sum, bins=3) + + assert_allclose(sum1, sum2) + assert_allclose(edges1, edges2) + assert_allclose(sum1, sum3) + assert_allclose(edges1, edges3) + + def test_dd_mean(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_std(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_min(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_max(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_median(self): + X = self.X + v = self.v + + stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3) + stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3) + + assert_allclose(stat1, stat2) + assert_allclose(edges1, edges2) + + def test_dd_bincode(self): + X = self.X[:20] + v = self.v[:20] + + count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3) + bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92, + 32, 36, 91, 43, 87, 81, 81]) + + bcount = [(bc == i).sum() for i in np.unique(bc)] + + assert_allclose(bc, bc2) + count1adj = count1[count1.nonzero()] + assert_allclose(bcount, count1adj) + + def test_dd_multi_values(self): + X = self.X + v = self.v + w = self.w + + for stat in ["count", "sum", "mean", "std", "min", "max", "median", + np.std]: + stat1v, edges1v, bc1v = binned_statistic_dd(X, v, stat, bins=8) + stat1w, edges1w, bc1w = binned_statistic_dd(X, w, stat, bins=8) + stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], stat, bins=8) + assert_allclose(stat2[0], stat1v) + assert_allclose(stat2[1], stat1w) + assert_allclose(edges1v, edges2) + assert_allclose(edges1w, edges2) + assert_allclose(bc1v, bc2) + + def test_dd_binnumbers_unraveled(self): + X = self.X + v = self.v + + stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15) + stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20) + stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10) + + stat2, edges2, bc2 = binned_statistic_dd( + X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True) + + assert_allclose(bcx, bc2[0]) + assert_allclose(bcy, bc2[1]) + assert_allclose(bcz, bc2[2]) + + def test_dd_binned_statistic_result(self): + # NOTE: tests the reuse of bin_edges from previous call + x = np.random.random((10000, 3)) + v = np.random.random(10000) + bins = np.linspace(0, 1, 10) + bins = (bins, bins, bins) + + result = binned_statistic_dd(x, v, 'mean', bins=bins) + stat = result.statistic + + result = binned_statistic_dd(x, v, 'mean', + binned_statistic_result=result) + stat2 = result.statistic + + assert_allclose(stat, stat2) + + def test_dd_zero_dedges(self): + x = np.random.random((10000, 3)) + v = np.random.random(10000) + bins = np.linspace(0, 1, 10) + bins = np.append(bins, 1) + bins = (bins, bins, bins) + with assert_raises(ValueError, match='difference is numerically 0'): + binned_statistic_dd(x, v, 'mean', bins=bins) + + def test_dd_range_errors(self): + # Test that descriptive exceptions are raised as appropriate for bad + # values of the `range` argument. (See gh-12996) + with assert_raises(ValueError, + match='In range, start must be <= stop'): + binned_statistic_dd([self.y], self.v, + range=[[1, 0]]) + with assert_raises( + ValueError, + match='In dimension 1 of range, start must be <= stop'): + binned_statistic_dd([self.x, self.y], self.v, + range=[[1, 0], [0, 1]]) + with assert_raises( + ValueError, + match='In dimension 2 of range, start must be <= stop'): + binned_statistic_dd([self.x, self.y], self.v, + range=[[0, 1], [1, 0]]) + with assert_raises( + ValueError, + match='range given for 1 dimensions; 2 required'): + binned_statistic_dd([self.x, self.y], self.v, + range=[[0, 1]]) + + def test_binned_statistic_float32(self): + X = np.array([0, 0.42358226], dtype=np.float32) + stat, _, _ = binned_statistic(X, None, 'count', bins=5) + assert_allclose(stat, np.array([1, 0, 0, 0, 1], dtype=np.float64)) + + def test_gh14332(self): + # Test the wrong output when the `sample` is close to bin edge + x = [] + size = 20 + for i in range(size): + x += [1-0.1**i] + + bins = np.linspace(0,1,11) + sum1, edges1, bc = binned_statistic_dd(x, np.ones(len(x)), + bins=[bins], statistic='sum') + sum2, edges2 = np.histogram(x, bins=bins) + + assert_allclose(sum1, sum2) + assert_allclose(edges1[0], edges2) + + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) + @pytest.mark.parametrize("statistic", [np.mean, np.median, np.sum, np.std, + np.min, np.max, 'count', + lambda x: (x**2).sum(), + lambda x: (x**2).sum() * 1j]) + def test_dd_all(self, dtype, statistic): + def ref_statistic(x): + return len(x) if statistic == 'count' else statistic(x) + + rng = np.random.default_rng(3704743126639371) + n = 10 + x = rng.random(size=n) + i = x >= 0.5 + v = rng.random(size=n) + if dtype is np.complex128: + v = v + rng.random(size=n)*1j + + stat, _, _ = binned_statistic_dd(x, v, statistic, bins=2) + ref = np.array([ref_statistic(v[~i]), ref_statistic(v[i])]) + assert_allclose(stat, ref) + assert stat.dtype == np.result_type(ref.dtype, np.float64) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_boost_ufuncs.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_boost_ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..89b7558899f35571d9e7e415b68f78f80a76caac --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_boost_ufuncs.py @@ -0,0 +1,47 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose +from scipy.stats import _boost + + +type_char_to_type_tol = {'f': (np.float32, 32*np.finfo(np.float32).eps), + 'd': (np.float64, 32*np.finfo(np.float64).eps)} + + +# Each item in this list is +# (func, args, expected_value) +# All the values can be represented exactly, even with np.float32. +# +# This is not an exhaustive test data set of all the functions! +# It is a spot check of several functions, primarily for +# checking that the different data types are handled correctly. +test_data = [ + (_boost._beta_cdf, (0.5, 2, 3), 0.6875), + (_boost._beta_ppf, (0.6875, 2, 3), 0.5), + (_boost._beta_pdf, (0.5, 2, 3), 1.5), + (_boost._beta_pdf, (0, 1, 5), 5.0), + (_boost._beta_pdf, (1, 5, 1), 5.0), + (_boost._beta_sf, (0.5, 2, 1), 0.75), + (_boost._beta_isf, (0.75, 2, 1), 0.5), + (_boost._binom_cdf, (1, 3, 0.5), 0.5), + (_boost._binom_pdf, (1, 4, 0.5), 0.25), + (_boost._hypergeom_cdf, (2, 3, 5, 6), 0.5), + (_boost._nbinom_cdf, (1, 4, 0.25), 0.015625), + (_boost._ncf_mean, (10, 12, 2.5), 1.5), +] + + +@pytest.mark.parametrize('func, args, expected', test_data) +def test_stats_boost_ufunc(func, args, expected): + type_sigs = func.types + type_chars = [sig.split('->')[-1] for sig in type_sigs] + for type_char in type_chars: + typ, rtol = type_char_to_type_tol[type_char] + args = [typ(arg) for arg in args] + # Harmless overflow warnings are a "feature" of some wrappers on some + # platforms. This test is about dtype and accuracy, so let's avoid false + # test failures cause by these warnings. See gh-17432. + with np.errstate(over='ignore'): + value = func(*args) + assert isinstance(value, typ) + assert_allclose(value, expected, rtol=rtol) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_censored_data.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_censored_data.py new file mode 100644 index 0000000000000000000000000000000000000000..ae71dcfaccf899645051287f8944131ec48a1eee --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_censored_data.py @@ -0,0 +1,152 @@ +# Tests for the CensoredData class. + +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_array_equal +from scipy.stats import CensoredData + + +class TestCensoredData: + + def test_basic(self): + uncensored = [1] + left = [0] + right = [2, 5] + interval = [[2, 3]] + data = CensoredData(uncensored, left=left, right=right, + interval=interval) + assert_equal(data._uncensored, uncensored) + assert_equal(data._left, left) + assert_equal(data._right, right) + assert_equal(data._interval, interval) + + udata = data._uncensor() + assert_equal(udata, np.concatenate((uncensored, left, right, + np.mean(interval, axis=1)))) + + def test_right_censored(self): + x = np.array([0, 3, 2.5]) + is_censored = np.array([0, 1, 0], dtype=bool) + data = CensoredData.right_censored(x, is_censored) + assert_equal(data._uncensored, x[~is_censored]) + assert_equal(data._right, x[is_censored]) + assert_equal(data._left, []) + assert_equal(data._interval, np.empty((0, 2))) + + def test_left_censored(self): + x = np.array([0, 3, 2.5]) + is_censored = np.array([0, 1, 0], dtype=bool) + data = CensoredData.left_censored(x, is_censored) + assert_equal(data._uncensored, x[~is_censored]) + assert_equal(data._left, x[is_censored]) + assert_equal(data._right, []) + assert_equal(data._interval, np.empty((0, 2))) + + def test_interval_censored_basic(self): + a = [0.5, 2.0, 3.0, 5.5] + b = [1.0, 2.5, 3.5, 7.0] + data = CensoredData.interval_censored(low=a, high=b) + assert_array_equal(data._interval, np.array(list(zip(a, b)))) + assert data._uncensored.shape == (0,) + assert data._left.shape == (0,) + assert data._right.shape == (0,) + + def test_interval_censored_mixed(self): + # This is actually a mix of uncensored, left-censored, right-censored + # and interval-censored data. Check that when the `interval_censored` + # class method is used, the data is correctly separated into the + # appropriate arrays. + a = [0.5, -np.inf, -13.0, 2.0, 1.0, 10.0, -1.0] + b = [0.5, 2500.0, np.inf, 3.0, 1.0, 11.0, np.inf] + data = CensoredData.interval_censored(low=a, high=b) + assert_array_equal(data._interval, [[2.0, 3.0], [10.0, 11.0]]) + assert_array_equal(data._uncensored, [0.5, 1.0]) + assert_array_equal(data._left, [2500.0]) + assert_array_equal(data._right, [-13.0, -1.0]) + + def test_interval_to_other_types(self): + # The interval parameter can represent uncensored and + # left- or right-censored data. Test the conversion of such + # an example to the canonical form in which the different + # types have been split into the separate arrays. + interval = np.array([[0, 1], # interval-censored + [2, 2], # not censored + [3, 3], # not censored + [9, np.inf], # right-censored + [8, np.inf], # right-censored + [-np.inf, 0], # left-censored + [1, 2]]) # interval-censored + data = CensoredData(interval=interval) + assert_equal(data._uncensored, [2, 3]) + assert_equal(data._left, [0]) + assert_equal(data._right, [9, 8]) + assert_equal(data._interval, [[0, 1], [1, 2]]) + + def test_empty_arrays(self): + data = CensoredData(uncensored=[], left=[], right=[], interval=[]) + assert data._uncensored.shape == (0,) + assert data._left.shape == (0,) + assert data._right.shape == (0,) + assert data._interval.shape == (0, 2) + assert len(data) == 0 + + def test_invalid_constructor_args(self): + with pytest.raises(ValueError, match='must be a one-dimensional'): + CensoredData(uncensored=[[1, 2, 3]]) + with pytest.raises(ValueError, match='must be a one-dimensional'): + CensoredData(left=[[1, 2, 3]]) + with pytest.raises(ValueError, match='must be a one-dimensional'): + CensoredData(right=[[1, 2, 3]]) + with pytest.raises(ValueError, match='must be a two-dimensional'): + CensoredData(interval=[[1, 2, 3]]) + + with pytest.raises(ValueError, match='must not contain nan'): + CensoredData(uncensored=[1, np.nan, 2]) + with pytest.raises(ValueError, match='must not contain nan'): + CensoredData(left=[1, np.nan, 2]) + with pytest.raises(ValueError, match='must not contain nan'): + CensoredData(right=[1, np.nan, 2]) + with pytest.raises(ValueError, match='must not contain nan'): + CensoredData(interval=[[1, np.nan], [2, 3]]) + + with pytest.raises(ValueError, + match='both values must not be infinite'): + CensoredData(interval=[[1, 3], [2, 9], [np.inf, np.inf]]) + + with pytest.raises(ValueError, + match='left value must not exceed the right'): + CensoredData(interval=[[1, 0], [2, 2]]) + + @pytest.mark.parametrize('func', [CensoredData.left_censored, + CensoredData.right_censored]) + def test_invalid_left_right_censored_args(self, func): + with pytest.raises(ValueError, + match='`x` must be one-dimensional'): + func([[1, 2, 3]], [0, 1, 1]) + with pytest.raises(ValueError, + match='`censored` must be one-dimensional'): + func([1, 2, 3], [[0, 1, 1]]) + with pytest.raises(ValueError, match='`x` must not contain'): + func([1, 2, np.nan], [0, 1, 1]) + with pytest.raises(ValueError, match='must have the same length'): + func([1, 2, 3], [0, 0, 1, 1]) + + def test_invalid_censored_args(self): + with pytest.raises(ValueError, + match='`low` must be a one-dimensional'): + CensoredData.interval_censored(low=[[3]], high=[4, 5]) + with pytest.raises(ValueError, + match='`high` must be a one-dimensional'): + CensoredData.interval_censored(low=[3], high=[[4, 5]]) + with pytest.raises(ValueError, match='`low` must not contain'): + CensoredData.interval_censored([1, 2, np.nan], [0, 1, 1]) + with pytest.raises(ValueError, match='must have the same length'): + CensoredData.interval_censored([1, 2, 3], [0, 0, 1, 1]) + + def test_count_censored(self): + x = [1, 2, 3] + # data1 has no censored data. + data1 = CensoredData(x) + assert data1.num_censored() == 0 + data2 = CensoredData(uncensored=[2.5], left=[10], interval=[[0, 1]]) + assert data2.num_censored() == 2 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_contingency.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_contingency.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7fad58b0d6eee2815cfced77d293f10551343f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_contingency.py @@ -0,0 +1,241 @@ +import numpy as np +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_approx_equal, + assert_allclose) +import pytest +from pytest import raises as assert_raises +from scipy.special import xlogy +from scipy.stats.contingency import (margins, expected_freq, + chi2_contingency, association) + + +def test_margins(): + a = np.array([1]) + m = margins(a) + assert_equal(len(m), 1) + m0 = m[0] + assert_array_equal(m0, np.array([1])) + + a = np.array([[1]]) + m0, m1 = margins(a) + expected0 = np.array([[1]]) + expected1 = np.array([[1]]) + assert_array_equal(m0, expected0) + assert_array_equal(m1, expected1) + + a = np.arange(12).reshape(2, 6) + m0, m1 = margins(a) + expected0 = np.array([[15], [51]]) + expected1 = np.array([[6, 8, 10, 12, 14, 16]]) + assert_array_equal(m0, expected0) + assert_array_equal(m1, expected1) + + a = np.arange(24).reshape(2, 3, 4) + m0, m1, m2 = margins(a) + expected0 = np.array([[[66]], [[210]]]) + expected1 = np.array([[[60], [92], [124]]]) + expected2 = np.array([[[60, 66, 72, 78]]]) + assert_array_equal(m0, expected0) + assert_array_equal(m1, expected1) + assert_array_equal(m2, expected2) + + +def test_expected_freq(): + assert_array_equal(expected_freq([1]), np.array([1.0])) + + observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]]) + e = expected_freq(observed) + assert_array_equal(e, np.ones_like(observed)) + + observed = np.array([[10, 10, 20], [20, 20, 20]]) + e = expected_freq(observed) + correct = np.array([[12., 12., 16.], [18., 18., 24.]]) + assert_array_almost_equal(e, correct) + + +def test_chi2_contingency_trivial(): + # Some very simple tests for chi2_contingency. + + # A trivial case + obs = np.array([[1, 2], [1, 2]]) + chi2, p, dof, expected = chi2_contingency(obs, correction=False) + assert_equal(chi2, 0.0) + assert_equal(p, 1.0) + assert_equal(dof, 1) + assert_array_equal(obs, expected) + + # A *really* trivial case: 1-D data. + obs = np.array([1, 2, 3]) + chi2, p, dof, expected = chi2_contingency(obs, correction=False) + assert_equal(chi2, 0.0) + assert_equal(p, 1.0) + assert_equal(dof, 0) + assert_array_equal(obs, expected) + + +def test_chi2_contingency_R(): + # Some test cases that were computed independently, using R. + + # Rcode = \ + # """ + # # Data vector. + # data <- c( + # 12, 34, 23, 4, 47, 11, + # 35, 31, 11, 34, 10, 18, + # 12, 32, 9, 18, 13, 19, + # 12, 12, 14, 9, 33, 25 + # ) + # + # # Create factor tags:r=rows, c=columns, t=tiers + # r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4"))) + # c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3"))) + # t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2"))) + # + # # 3-way Chi squared test of independence + # s = summary(xtabs(data~r+c+t)) + # print(s) + # """ + # Routput = \ + # """ + # Call: xtabs(formula = data ~ r + c + t) + # Number of cases in table: 478 + # Number of factors: 3 + # Test for independence of all factors: + # Chisq = 102.17, df = 17, p-value = 3.514e-14 + # """ + obs = np.array( + [[[12, 34, 23], + [35, 31, 11], + [12, 32, 9], + [12, 12, 14]], + [[4, 47, 11], + [34, 10, 18], + [18, 13, 19], + [9, 33, 25]]]) + chi2, p, dof, expected = chi2_contingency(obs) + assert_approx_equal(chi2, 102.17, significant=5) + assert_approx_equal(p, 3.514e-14, significant=4) + assert_equal(dof, 17) + + # Rcode = \ + # """ + # # Data vector. + # data <- c( + # # + # 12, 17, + # 11, 16, + # # + # 11, 12, + # 15, 16, + # # + # 23, 15, + # 30, 22, + # # + # 14, 17, + # 15, 16 + # ) + # + # # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers + # r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2"))) + # c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2"))) + # d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2"))) + # t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2"))) + # + # # 4-way Chi squared test of independence + # s = summary(xtabs(data~r+c+d+t)) + # print(s) + # """ + # Routput = \ + # """ + # Call: xtabs(formula = data ~ r + c + d + t) + # Number of cases in table: 262 + # Number of factors: 4 + # Test for independence of all factors: + # Chisq = 8.758, df = 11, p-value = 0.6442 + # """ + obs = np.array( + [[[[12, 17], + [11, 16]], + [[11, 12], + [15, 16]]], + [[[23, 15], + [30, 22]], + [[14, 17], + [15, 16]]]]) + chi2, p, dof, expected = chi2_contingency(obs) + assert_approx_equal(chi2, 8.758, significant=4) + assert_approx_equal(p, 0.6442, significant=4) + assert_equal(dof, 11) + + +def test_chi2_contingency_g(): + c = np.array([[15, 60], [15, 90]]) + g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', + correction=False) + assert_allclose(g, 2*xlogy(c, c/e).sum()) + + g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', + correction=True) + c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]]) + assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum()) + + c = np.array([[10, 12, 10], [12, 10, 10]]) + g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood') + assert_allclose(g, 2*xlogy(c, c/e).sum()) + + +def test_chi2_contingency_bad_args(): + # Test that "bad" inputs raise a ValueError. + + # Negative value in the array of observed frequencies. + obs = np.array([[-1, 10], [1, 2]]) + assert_raises(ValueError, chi2_contingency, obs) + + # The zeros in this will result in zeros in the array + # of expected frequencies. + obs = np.array([[0, 1], [0, 1]]) + assert_raises(ValueError, chi2_contingency, obs) + + # A degenerate case: `observed` has size 0. + obs = np.empty((0, 8)) + assert_raises(ValueError, chi2_contingency, obs) + + +def test_chi2_contingency_yates_gh13875(): + # Magnitude of Yates' continuity correction should not exceed difference + # between expected and observed value of the statistic; see gh-13875 + observed = np.array([[1573, 3], [4, 0]]) + p = chi2_contingency(observed)[1] + assert_allclose(p, 1, rtol=1e-12) + + +@pytest.mark.parametrize("correction", [False, True]) +def test_result(correction): + obs = np.array([[1, 2], [1, 2]]) + res = chi2_contingency(obs, correction=correction) + assert_equal((res.statistic, res.pvalue, res.dof, res.expected_freq), res) + + +def test_bad_association_args(): + # Invalid Test Statistic + assert_raises(ValueError, association, [[1, 2], [3, 4]], "X") + # Invalid array shape + assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer") + # chi2_contingency exception + assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer') + # Invalid Array Item Data Type + assert_raises(ValueError, association, + np.array([[1, 2], ["dd", 4]], dtype=object), 'cramer') + + +@pytest.mark.parametrize('stat, expected', + [('cramer', 0.09222412010290792), + ('tschuprow', 0.0775509319944633), + ('pearson', 0.12932925727138758)]) +def test_assoc(stat, expected): + # 2d Array + obs1 = np.array([[12, 13, 14, 15, 16], + [17, 16, 18, 19, 11], + [9, 15, 14, 12, 11]]) + a = association(observed=obs1, method=stat) + assert_allclose(a, expected) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_basic.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..7afee32a52b7f7d0fbeb4997cc1961e6783ced8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_basic.py @@ -0,0 +1,1016 @@ +import sys +import numpy as np +import numpy.testing as npt +import pytest +from pytest import raises as assert_raises +from scipy.integrate import IntegrationWarning +import itertools + +from scipy import stats +from .common_tests import (check_normalization, check_moment, + check_mean_expect, + check_var_expect, check_skew_expect, + check_kurt_expect, check_entropy, + check_private_entropy, check_entropy_vect_scale, + check_edge_support, check_named_args, + check_random_state_property, + check_meth_dtype, check_ppf_dtype, + check_cmplx_deriv, + check_pickling, check_rvs_broadcast, + check_freezing, check_munp_expect,) +from scipy.stats._distr_params import distcont +from scipy.stats._distn_infrastructure import rv_continuous_frozen + +""" +Test all continuous distributions. + +Parameters were chosen for those distributions that pass the +Kolmogorov-Smirnov test. This provides safe parameters for each +distributions so that we can perform further testing of class methods. + +These tests currently check only/mostly for serious errors and exceptions, +not for numerically exact results. +""" + +# Note that you need to add new distributions you want tested +# to _distr_params + +DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5 +_IS_32BIT = (sys.maxsize < 2**32) + +# For skipping test_cont_basic +distslow = ['recipinvgauss', 'vonmises', 'kappa4', 'vonmises_line', + 'gausshyper', 'norminvgauss', 'geninvgauss', 'genhyperbolic', + 'truncnorm', 'truncweibull_min'] + +# distxslow are sorted by speed (very slow to slow) +distxslow = ['studentized_range', 'kstwo', 'ksone', 'wrapcauchy', 'genexpon'] + +# For skipping test_moments, which is already marked slow +distxslow_test_moments = ['studentized_range', 'vonmises', 'vonmises_line', + 'ksone', 'kstwo', 'recipinvgauss', 'genexpon'] + +# skip check_fit_args (test is slow) +skip_fit_test_mle = ['exponpow', 'exponweib', 'gausshyper', 'genexpon', + 'halfgennorm', 'gompertz', 'johnsonsb', 'johnsonsu', + 'kappa4', 'ksone', 'kstwo', 'kstwobign', 'mielke', 'ncf', + 'nct', 'powerlognorm', 'powernorm', 'recipinvgauss', + 'trapezoid', 'vonmises', 'vonmises_line', 'levy_stable', + 'rv_histogram_instance', 'studentized_range'] + +# these were really slow in `test_fit`.py. +# note that this list is used to skip both fit_test and fit_fix tests +slow_fit_test_mm = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon', + 'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb', + 'kappa4', 'kstwobign', 'recipinvgauss', + 'trapezoid', 'truncexpon', 'vonmises', 'vonmises_line', + 'studentized_range'] +# pearson3 fails due to something weird +# the first list fails due to non-finite distribution moments encountered +# most of the rest fail due to integration warnings +# pearson3 is overridden as not implemented due to gh-11746 +fail_fit_test_mm = (['alpha', 'betaprime', 'bradford', 'burr', 'burr12', + 'cauchy', 'crystalball', 'f', 'fisk', 'foldcauchy', + 'genextreme', 'genpareto', 'halfcauchy', 'invgamma', + 'jf_skew_t', 'kappa3', 'levy', 'levy_l', 'loglaplace', + 'lomax', 'mielke', 'nakagami', 'ncf', 'skewcauchy', 't', + 'tukeylambda', 'invweibull', 'rel_breitwigner'] + + ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo', + 'nct', 'pareto', 'powernorm', 'powerlognorm'] + + ['pearson3']) + +skip_fit_test = {"MLE": skip_fit_test_mle, + "MM": slow_fit_test_mm + fail_fit_test_mm} + +# skip check_fit_args_fix (test is slow) +skip_fit_fix_test_mle = ['burr', 'exponpow', 'exponweib', 'gausshyper', + 'genexpon', 'halfgennorm', 'gompertz', 'johnsonsb', + 'johnsonsu', 'kappa4', 'ksone', 'kstwo', 'kstwobign', + 'levy_stable', 'mielke', 'ncf', 'ncx2', + 'powerlognorm', 'powernorm', 'rdist', 'recipinvgauss', + 'trapezoid', 'truncpareto', 'vonmises', 'vonmises_line', + 'studentized_range'] +# the first list fails due to non-finite distribution moments encountered +# most of the rest fail due to integration warnings +# pearson3 is overridden as not implemented due to gh-11746 +fail_fit_fix_test_mm = (['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', + 'crystalball', 'f', 'fisk', 'foldcauchy', + 'genextreme', 'genpareto', 'halfcauchy', 'invgamma', + 'jf_skew_t', 'kappa3', 'levy', 'levy_l', 'loglaplace', + 'lomax', 'mielke', 'nakagami', 'ncf', 'nct', + 'skewcauchy', 't', 'truncpareto', 'invweibull'] + + ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo', + 'pareto', 'powernorm', 'powerlognorm'] + + ['pearson3']) +skip_fit_fix_test = {"MLE": skip_fit_fix_test_mle, + "MM": slow_fit_test_mm + fail_fit_fix_test_mm} + +# These distributions fail the complex derivative test below. +# Here 'fail' mean produce wrong results and/or raise exceptions, depending +# on the implementation details of corresponding special functions. +# cf https://github.com/scipy/scipy/pull/4979 for a discussion. +fails_cmplx = {'argus', 'beta', 'betaprime', 'chi', 'chi2', 'cosine', + 'dgamma', 'dweibull', 'erlang', 'f', 'foldcauchy', 'gamma', + 'gausshyper', 'gengamma', 'genhyperbolic', + 'geninvgauss', 'gennorm', 'genpareto', + 'halfcauchy', 'halfgennorm', 'invgamma', 'jf_skew_t', + 'ksone', 'kstwo', 'kstwobign', 'levy_l', 'loggamma', + 'logistic', 'loguniform', 'maxwell', 'nakagami', + 'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3', + 'powerlaw', 'rdist', 'reciprocal', 'rice', + 'skewnorm', 't', 'truncweibull_min', + 'tukeylambda', 'vonmises', 'vonmises_line', + 'rv_histogram_instance', 'truncnorm', 'studentized_range', + 'johnsonsb', 'halflogistic', 'rel_breitwigner'} + + +# rv_histogram instances, with uniform and non-uniform bins; +# stored as (dist, arg) tuples for cases_test_cont_basic +# and cases_test_moments. +histogram_test_instances = [] +case1 = {'a': [1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, + 6, 6, 6, 7, 7, 7, 8, 8, 9], 'bins': 8} # equal width bins +case2 = {'a': [1, 1], 'bins': [0, 1, 10]} # unequal width bins +for case, density in itertools.product([case1, case2], [True, False]): + _hist = np.histogram(**case, density=density) + _rv_hist = stats.rv_histogram(_hist, density=density) + histogram_test_instances.append((_rv_hist, tuple())) + + +def cases_test_cont_basic(): + for distname, arg in distcont[:] + histogram_test_instances: + if distname == 'levy_stable': + continue + elif distname in distslow: + yield pytest.param(distname, arg, marks=pytest.mark.slow) + elif distname in distxslow: + yield pytest.param(distname, arg, marks=pytest.mark.xslow) + else: + yield distname, arg + + +@pytest.mark.parametrize('distname,arg', cases_test_cont_basic()) +@pytest.mark.parametrize('sn, n_fit_samples', [(500, 200)]) +def test_cont_basic(distname, arg, sn, n_fit_samples): + # this test skips slow distributions + + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'rv_histogram_instance' + + rng = np.random.RandomState(765456) + rvs = distfn.rvs(size=sn, *arg, random_state=rng) + m, v = distfn.stats(*arg) + + if distname not in {'laplace_asymmetric'}: + check_sample_meanvar_(m, v, rvs) + check_cdf_ppf(distfn, arg, distname) + check_sf_isf(distfn, arg, distname) + check_cdf_sf(distfn, arg, distname) + check_ppf_isf(distfn, arg, distname) + check_pdf(distfn, arg, distname) + check_pdf_logpdf(distfn, arg, distname) + check_pdf_logpdf_at_endpoints(distfn, arg, distname) + check_cdf_logcdf(distfn, arg, distname) + check_sf_logsf(distfn, arg, distname) + check_ppf_broadcast(distfn, arg, distname) + + alpha = 0.01 + if distname == 'rv_histogram_instance': + check_distribution_rvs(distfn.cdf, arg, alpha, rvs) + elif distname != 'geninvgauss': + # skip kstest for geninvgauss since cdf is too slow; see test for + # rv generation in TestGenInvGauss in test_distributions.py + check_distribution_rvs(distname, arg, alpha, rvs) + + locscale_defaults = (0, 1) + meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf, + distfn.logsf] + # make sure arguments are within support + spec_x = {'weibull_max': -0.5, 'levy_l': -0.5, + 'pareto': 1.5, 'truncpareto': 3.2, 'tukeylambda': 0.3, + 'rv_histogram_instance': 5.0} + x = spec_x.get(distname, 0.5) + if distname == 'invweibull': + arg = (1,) + elif distname == 'ksone': + arg = (3,) + + check_named_args(distfn, x, arg, locscale_defaults, meths) + check_random_state_property(distfn, arg) + + if distname in ['rel_breitwigner'] and _IS_32BIT: + # gh18414 + pytest.skip("fails on Linux 32-bit") + else: + check_pickling(distfn, arg) + check_freezing(distfn, arg) + + # Entropy + if distname not in ['kstwobign', 'kstwo', 'ncf']: + check_entropy(distfn, arg, distname) + + if distfn.numargs == 0: + check_vecentropy(distfn, arg) + + if (distfn.__class__._entropy != stats.rv_continuous._entropy + and distname != 'vonmises'): + check_private_entropy(distfn, arg, stats.rv_continuous) + + with npt.suppress_warnings() as sup: + sup.filter(IntegrationWarning, "The occurrence of roundoff error") + sup.filter(IntegrationWarning, "Extremely bad integrand") + sup.filter(RuntimeWarning, "invalid value") + check_entropy_vect_scale(distfn, arg) + + check_retrieving_support(distfn, arg) + check_edge_support(distfn, arg) + + check_meth_dtype(distfn, arg, meths) + check_ppf_dtype(distfn, arg) + + if distname not in fails_cmplx: + check_cmplx_deriv(distfn, arg) + + if distname != 'truncnorm': + check_ppf_private(distfn, arg, distname) + + for method in ["MLE", "MM"]: + if distname not in skip_fit_test[method]: + check_fit_args(distfn, arg, rvs[:n_fit_samples], method) + + if distname not in skip_fit_fix_test[method]: + check_fit_args_fix(distfn, arg, rvs[:n_fit_samples], method) + + +@pytest.mark.parametrize('distname,arg', cases_test_cont_basic()) +def test_rvs_scalar(distname, arg): + # rvs should return a scalar when given scalar arguments (gh-12428) + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'rv_histogram_instance' + + assert np.isscalar(distfn.rvs(*arg)) + assert np.isscalar(distfn.rvs(*arg, size=())) + assert np.isscalar(distfn.rvs(*arg, size=None)) + + +def test_levy_stable_random_state_property(): + # levy_stable only implements rvs(), so it is skipped in the + # main loop in test_cont_basic(). Here we apply just the test + # check_random_state_property to levy_stable. + check_random_state_property(stats.levy_stable, (0.5, 0.1)) + + +def cases_test_moments(): + fail_normalization = set() + fail_higher = {'ncf'} + fail_moment = {'johnsonsu'} # generic `munp` is inaccurate for johnsonsu + + for distname, arg in distcont[:] + histogram_test_instances: + if distname == 'levy_stable': + continue + + if distname in distxslow_test_moments: + yield pytest.param(distname, arg, True, True, True, True, + marks=pytest.mark.xslow(reason="too slow")) + continue + + cond1 = distname not in fail_normalization + cond2 = distname not in fail_higher + cond3 = distname not in fail_moment + + marks = list() + # Currently unused, `marks` can be used to add a timeout to a test of + # a specific distribution. For example, this shows how a timeout could + # be added for the 'skewnorm' distribution: + # + # marks = list() + # if distname == 'skewnorm': + # marks.append(pytest.mark.timeout(300)) + + yield pytest.param(distname, arg, cond1, cond2, cond3, + False, marks=marks) + + if not cond1 or not cond2 or not cond3: + # Run the distributions that have issues twice, once skipping the + # not_ok parts, once with the not_ok parts but marked as knownfail + yield pytest.param(distname, arg, True, True, True, True, + marks=[pytest.mark.xfail] + marks) + + +@pytest.mark.slow +@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,moment_ok,' + 'is_xfailing', + cases_test_moments()) +def test_moments(distname, arg, normalization_ok, higher_ok, moment_ok, + is_xfailing): + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'rv_histogram_instance' + + with npt.suppress_warnings() as sup: + sup.filter(IntegrationWarning, + "The integral is probably divergent, or slowly convergent.") + sup.filter(IntegrationWarning, + "The maximum number of subdivisions.") + sup.filter(IntegrationWarning, + "The algorithm does not converge.") + + if is_xfailing: + sup.filter(IntegrationWarning) + + m, v, s, k = distfn.stats(*arg, moments='mvsk') + + with np.errstate(all="ignore"): + if normalization_ok: + check_normalization(distfn, arg, distname) + + if higher_ok: + check_mean_expect(distfn, arg, m, distname) + check_skew_expect(distfn, arg, m, v, s, distname) + check_var_expect(distfn, arg, m, v, distname) + check_kurt_expect(distfn, arg, m, v, k, distname) + check_munp_expect(distfn, arg, distname) + + check_loc_scale(distfn, arg, m, v, distname) + + if moment_ok: + check_moment(distfn, arg, m, v, distname) + + +@pytest.mark.parametrize('dist,shape_args', distcont) +def test_rvs_broadcast(dist, shape_args): + if dist in ['gausshyper', 'studentized_range']: + pytest.skip("too slow") + + if dist in ['rel_breitwigner'] and _IS_32BIT: + # gh18414 + pytest.skip("fails on Linux 32-bit") + + # If shape_only is True, it means the _rvs method of the + # distribution uses more than one random number to generate a random + # variate. That means the result of using rvs with broadcasting or + # with a nontrivial size will not necessarily be the same as using the + # numpy.vectorize'd version of rvs(), so we can only compare the shapes + # of the results, not the values. + # Whether or not a distribution is in the following list is an + # implementation detail of the distribution, not a requirement. If + # the implementation the rvs() method of a distribution changes, this + # test might also have to be changed. + shape_only = dist in ['argus', 'betaprime', 'dgamma', 'dweibull', + 'exponnorm', 'genhyperbolic', 'geninvgauss', + 'levy_stable', 'nct', 'norminvgauss', 'rice', + 'skewnorm', 'semicircular', 'gennorm', 'loggamma'] + + distfunc = getattr(stats, dist) + loc = np.zeros(2) + scale = np.ones((3, 1)) + nargs = distfunc.numargs + allargs = [] + bshape = [3, 2] + # Generate shape parameter arguments... + for k in range(nargs): + shp = (k + 4,) + (1,)*(k + 2) + allargs.append(shape_args[k]*np.ones(shp)) + bshape.insert(0, k + 4) + allargs.extend([loc, scale]) + # bshape holds the expected shape when loc, scale, and the shape + # parameters are all broadcast together. + + check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd') + + +# Expected values of the SF, CDF, PDF were computed using +# mpmath with mpmath.mp.dps = 50 and output at 20: +# +# def ks(x, n): +# x = mpmath.mpf(x) +# logp = -mpmath.power(6.0*n*x+1.0, 2)/18.0/n +# sf, cdf = mpmath.exp(logp), -mpmath.expm1(logp) +# pdf = (6.0*n*x+1.0) * 2 * sf/3 +# print(mpmath.nstr(sf, 20), mpmath.nstr(cdf, 20), mpmath.nstr(pdf, 20)) +# +# Tests use 1/n < x < 1-1/n and n > 1e6 to use the asymptotic computation. +# Larger x has a smaller sf. +@pytest.mark.parametrize('x,n,sf,cdf,pdf,rtol', + [(2.0e-5, 1000000000, + 0.44932297307934442379, 0.55067702692065557621, + 35946.137394996276407, 5e-15), + (2.0e-9, 1000000000, + 0.99999999061111115519, 9.3888888448132728224e-9, + 8.6666665852962971765, 5e-14), + (5.0e-4, 1000000000, + 7.1222019433090374624e-218, 1.0, + 1.4244408634752704094e-211, 5e-14)]) +def test_gh17775_regression(x, n, sf, cdf, pdf, rtol): + # Regression test for gh-17775. In scipy 1.9.3 and earlier, + # these test would fail. + # + # KS one asymptotic sf ~ e^(-(6nx+1)^2 / 18n) + # Given a large 32-bit integer n, 6n will overflow in the c implementation. + # Example of broken behaviour: + # ksone.sf(2.0e-5, 1000000000) == 0.9374359693473666 + ks = stats.ksone + vals = np.array([ks.sf(x, n), ks.cdf(x, n), ks.pdf(x, n)]) + expected = np.array([sf, cdf, pdf]) + npt.assert_allclose(vals, expected, rtol=rtol) + # The sf+cdf must sum to 1.0. + npt.assert_equal(vals[0] + vals[1], 1.0) + # Check inverting the (potentially very small) sf (uses a lower tolerance) + npt.assert_allclose([ks.isf(sf, n)], [x], rtol=1e-8) + + +def test_rvs_gh2069_regression(): + # Regression tests for gh-2069. In scipy 0.17 and earlier, + # these tests would fail. + # + # A typical example of the broken behavior: + # >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5)) + # array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705]) + rng = np.random.RandomState(123) + vals = stats.norm.rvs(loc=np.zeros(5), scale=1, random_state=rng) + d = np.diff(vals) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + vals = stats.norm.rvs(loc=0, scale=np.ones(5), random_state=rng) + d = np.diff(vals) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5), random_state=rng) + d = np.diff(vals) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5), + random_state=rng) + d = np.diff(vals.ravel()) + npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") + + assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]], + [[1, 1], [1, 1]], 1) + assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2)) + assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0], + [[1], [2]], (4,)) + + +def test_nomodify_gh9900_regression(): + # Regression test for gh-9990 + # Prior to gh-9990, calls to stats.truncnorm._cdf() use what ever was + # set inside the stats.truncnorm instance during stats.truncnorm.cdf(). + # This could cause issues with multi-threaded code. + # Since then, the calls to cdf() are not permitted to modify the global + # stats.truncnorm instance. + tn = stats.truncnorm + # Use the right-half truncated normal + # Check that the cdf and _cdf return the same result. + npt.assert_almost_equal(tn.cdf(1, 0, np.inf), + 0.6826894921370859) + npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]), + 0.6826894921370859) + + # Now use the left-half truncated normal + npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), + 0.31731050786291415) + npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]), + 0.31731050786291415) + + # Check that the right-half truncated normal _cdf hasn't changed + npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]), + 0.6826894921370859) # Not 1.6826894921370859 + npt.assert_almost_equal(tn.cdf(1, 0, np.inf), + 0.6826894921370859) + + # Check that the left-half truncated normal _cdf hasn't changed + npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]), + 0.31731050786291415) # Not -0.6826894921370859 + npt.assert_almost_equal(tn.cdf(1, -np.inf, 0), + 1) # Not 1.6826894921370859 + npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), + 0.31731050786291415) # Not -0.6826894921370859 + + +def test_broadcast_gh9990_regression(): + # Regression test for gh-9990 + # The x-value 7 only lies within the support of 4 of the supplied + # distributions. Prior to 9990, one array passed to + # stats.reciprocal._cdf would have 4 elements, but an array + # previously stored by stats.reciprocal_argcheck() would have 6, leading + # to a broadcast error. + a = np.array([1, 2, 3, 4, 5, 6]) + b = np.array([8, 16, 1, 32, 1, 48]) + ans = [stats.reciprocal.cdf(7, _a, _b) for _a, _b in zip(a,b)] + npt.assert_array_almost_equal(stats.reciprocal.cdf(7, a, b), ans) + + ans = [stats.reciprocal.cdf(1, _a, _b) for _a, _b in zip(a,b)] + npt.assert_array_almost_equal(stats.reciprocal.cdf(1, a, b), ans) + + ans = [stats.reciprocal.cdf(_a, _a, _b) for _a, _b in zip(a,b)] + npt.assert_array_almost_equal(stats.reciprocal.cdf(a, a, b), ans) + + ans = [stats.reciprocal.cdf(_b, _a, _b) for _a, _b in zip(a,b)] + npt.assert_array_almost_equal(stats.reciprocal.cdf(b, a, b), ans) + + +def test_broadcast_gh7933_regression(): + # Check broadcast works + stats.truncnorm.logpdf( + np.array([3.0, 2.0, 1.0]), + a=(1.5 - np.array([6.0, 5.0, 4.0])) / 3.0, + b=np.inf, + loc=np.array([6.0, 5.0, 4.0]), + scale=3.0 + ) + + +def test_gh2002_regression(): + # Add a check that broadcast works in situations where only some + # x-values are compatible with some of the shape arguments. + x = np.r_[-2:2:101j] + a = np.r_[-np.ones(50), np.ones(51)] + expected = [stats.truncnorm.pdf(_x, _a, np.inf) for _x, _a in zip(x, a)] + ans = stats.truncnorm.pdf(x, a, np.inf) + npt.assert_array_almost_equal(ans, expected) + + +def test_gh1320_regression(): + # Check that the first example from gh-1320 now works. + c = 2.62 + stats.genextreme.ppf(0.5, np.array([[c], [c + 0.5]])) + # The other examples in gh-1320 appear to have stopped working + # some time ago. + # ans = stats.genextreme.moment(2, np.array([c, c + 0.5])) + # expected = np.array([25.50105963, 115.11191437]) + # stats.genextreme.moment(5, np.array([[c], [c + 0.5]])) + # stats.genextreme.moment(5, np.array([c, c + 0.5])) + + +def test_method_of_moments(): + # example from https://en.wikipedia.org/wiki/Method_of_moments_(statistics) + np.random.seed(1234) + x = [0, 0, 0, 0, 1] + a = 1/5 - 2*np.sqrt(3)/5 + b = 1/5 + 2*np.sqrt(3)/5 + # force use of method of moments (uniform.fit is overridden) + loc, scale = super(type(stats.uniform), stats.uniform).fit(x, method="MM") + npt.assert_almost_equal(loc, a, decimal=4) + npt.assert_almost_equal(loc+scale, b, decimal=4) + + +def check_sample_meanvar_(popmean, popvar, sample): + if np.isfinite(popmean): + check_sample_mean(sample, popmean) + if np.isfinite(popvar): + check_sample_var(sample, popvar) + + +def check_sample_mean(sample, popmean): + # Checks for unlikely difference between sample mean and population mean + prob = stats.ttest_1samp(sample, popmean).pvalue + assert prob > 0.01 + + +def check_sample_var(sample, popvar): + # check that population mean lies within the CI bootstrapped from the + # sample. This used to be a chi-squared test for variance, but there were + # too many false positives + res = stats.bootstrap( + (sample,), + lambda x, axis: x.var(ddof=1, axis=axis), + confidence_level=0.995, + ) + conf = res.confidence_interval + low, high = conf.low, conf.high + assert low <= popvar <= high + + +def check_cdf_ppf(distfn, arg, msg): + values = [0.001, 0.5, 0.999] + npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg), + values, decimal=DECIMAL, err_msg=msg + + ' - cdf-ppf roundtrip') + + +def check_sf_isf(distfn, arg, msg): + npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg), + [0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg + + ' - sf-isf roundtrip') + + +def check_cdf_sf(distfn, arg, msg): + npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg), + 1.0 - distfn.sf([0.1, 0.9], *arg), + decimal=DECIMAL, err_msg=msg + + ' - cdf-sf relationship') + + +def check_ppf_isf(distfn, arg, msg): + p = np.array([0.1, 0.9]) + npt.assert_almost_equal(distfn.isf(p, *arg), distfn.ppf(1-p, *arg), + decimal=DECIMAL, err_msg=msg + + ' - ppf-isf relationship') + + +def check_pdf(distfn, arg, msg): + # compares pdf at median with numerical derivative of cdf + median = distfn.ppf(0.5, *arg) + eps = 1e-6 + pdfv = distfn.pdf(median, *arg) + if (pdfv < 1e-4) or (pdfv > 1e4): + # avoid checking a case where pdf is close to zero or + # huge (singularity) + median = median + 0.1 + pdfv = distfn.pdf(median, *arg) + cdfdiff = (distfn.cdf(median + eps, *arg) - + distfn.cdf(median - eps, *arg))/eps/2.0 + # replace with better diff and better test (more points), + # actually, this works pretty well + msg += ' - cdf-pdf relationship' + npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg) + + +def check_pdf_logpdf(distfn, args, msg): + # compares pdf at several points with the log of the pdf + points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) + vals = distfn.ppf(points, *args) + vals = vals[np.isfinite(vals)] + pdf = distfn.pdf(vals, *args) + logpdf = distfn.logpdf(vals, *args) + pdf = pdf[(pdf != 0) & np.isfinite(pdf)] + logpdf = logpdf[np.isfinite(logpdf)] + msg += " - logpdf-log(pdf) relationship" + npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg) + + +def check_pdf_logpdf_at_endpoints(distfn, args, msg): + # compares pdf with the log of the pdf at the (finite) end points + points = np.array([0, 1]) + vals = distfn.ppf(points, *args) + vals = vals[np.isfinite(vals)] + pdf = distfn.pdf(vals, *args) + logpdf = distfn.logpdf(vals, *args) + pdf = pdf[(pdf != 0) & np.isfinite(pdf)] + logpdf = logpdf[np.isfinite(logpdf)] + msg += " - logpdf-log(pdf) relationship" + npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg) + + +def check_sf_logsf(distfn, args, msg): + # compares sf at several points with the log of the sf + points = np.array([0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0]) + vals = distfn.ppf(points, *args) + vals = vals[np.isfinite(vals)] + sf = distfn.sf(vals, *args) + logsf = distfn.logsf(vals, *args) + sf = sf[sf != 0] + logsf = logsf[np.isfinite(logsf)] + msg += " - logsf-log(sf) relationship" + npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg) + + +def check_cdf_logcdf(distfn, args, msg): + # compares cdf at several points with the log of the cdf + points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0]) + vals = distfn.ppf(points, *args) + vals = vals[np.isfinite(vals)] + cdf = distfn.cdf(vals, *args) + logcdf = distfn.logcdf(vals, *args) + cdf = cdf[cdf != 0] + logcdf = logcdf[np.isfinite(logcdf)] + msg += " - logcdf-log(cdf) relationship" + npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg) + + +def check_ppf_broadcast(distfn, arg, msg): + # compares ppf for multiple argsets. + num_repeats = 5 + args = [] * num_repeats + if arg: + args = [np.array([_] * num_repeats) for _ in arg] + + median = distfn.ppf(0.5, *arg) + medians = distfn.ppf(0.5, *args) + msg += " - ppf multiple" + npt.assert_almost_equal(medians, [median] * num_repeats, decimal=7, err_msg=msg) + + +def check_distribution_rvs(dist, args, alpha, rvs): + # dist is either a cdf function or name of a distribution in scipy.stats. + # args are the args for scipy.stats.dist(*args) + # alpha is a significance level, ~0.01 + # rvs is array_like of random variables + # test from scipy.stats.tests + # this version reuses existing random variables + D, pval = stats.kstest(rvs, dist, args=args, N=1000) + if (pval < alpha): + # The rvs passed in failed the K-S test, which _could_ happen + # but is unlikely if alpha is small enough. + # Repeat the test with a new sample of rvs. + # Generate 1000 rvs, perform a K-S test that the new sample of rvs + # are distributed according to the distribution. + D, pval = stats.kstest(dist, dist, args=args, N=1000) + npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) + + "; alpha = " + str(alpha) + "\nargs = " + str(args)) + + +def check_vecentropy(distfn, args): + npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args)) + + +def check_loc_scale(distfn, arg, m, v, msg): + # Make `loc` and `scale` arrays to catch bugs like gh-13580 where + # `loc` and `scale` arrays improperly broadcast with shapes. + loc, scale = np.array([10.0, 20.0]), np.array([10.0, 20.0]) + mt, vt = distfn.stats(*arg, loc=loc, scale=scale) + npt.assert_allclose(m*scale + loc, mt) + npt.assert_allclose(v*scale*scale, vt) + + +def check_ppf_private(distfn, arg, msg): + # fails by design for truncnorm self.nb not defined + ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg) + npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan') + + +def check_retrieving_support(distfn, args): + loc, scale = 1, 2 + supp = distfn.support(*args) + supp_loc_scale = distfn.support(*args, loc=loc, scale=scale) + npt.assert_almost_equal(np.array(supp)*scale + loc, + np.array(supp_loc_scale)) + + +def check_fit_args(distfn, arg, rvs, method): + with np.errstate(all='ignore'), npt.suppress_warnings() as sup: + sup.filter(category=RuntimeWarning, + message="The shape parameter of the erlang") + sup.filter(category=RuntimeWarning, + message="floating point number truncated") + vals = distfn.fit(rvs, method=method) + vals2 = distfn.fit(rvs, optimizer='powell', method=method) + # Only check the length of the return; accuracy tested in test_fit.py + npt.assert_(len(vals) == 2+len(arg)) + npt.assert_(len(vals2) == 2+len(arg)) + + +def check_fit_args_fix(distfn, arg, rvs, method): + with np.errstate(all='ignore'), npt.suppress_warnings() as sup: + sup.filter(category=RuntimeWarning, + message="The shape parameter of the erlang") + + vals = distfn.fit(rvs, floc=0, method=method) + vals2 = distfn.fit(rvs, fscale=1, method=method) + npt.assert_(len(vals) == 2+len(arg)) + npt.assert_(vals[-2] == 0) + npt.assert_(vals2[-1] == 1) + npt.assert_(len(vals2) == 2+len(arg)) + if len(arg) > 0: + vals3 = distfn.fit(rvs, f0=arg[0], method=method) + npt.assert_(len(vals3) == 2+len(arg)) + npt.assert_(vals3[0] == arg[0]) + if len(arg) > 1: + vals4 = distfn.fit(rvs, f1=arg[1], method=method) + npt.assert_(len(vals4) == 2+len(arg)) + npt.assert_(vals4[1] == arg[1]) + if len(arg) > 2: + vals5 = distfn.fit(rvs, f2=arg[2], method=method) + npt.assert_(len(vals5) == 2+len(arg)) + npt.assert_(vals5[2] == arg[2]) + + +@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf', + 'sf', 'logsf', 'ppf', 'isf']) +@pytest.mark.parametrize('distname, args', distcont) +def test_methods_with_lists(method, distname, args): + # Test that the continuous distributions can accept Python lists + # as arguments. + dist = getattr(stats, distname) + f = getattr(dist, method) + if distname == 'invweibull' and method.startswith('log'): + x = [1.5, 2] + else: + x = [0.1, 0.2] + + shape2 = [[a]*2 for a in args] + loc = [0, 0.1] + scale = [1, 1.01] + result = f(x, *shape2, loc=loc, scale=scale) + npt.assert_allclose(result, + [f(*v) for v in zip(x, *shape2, loc, scale)], + rtol=1e-14, atol=5e-14) + + +def test_burr_fisk_moment_gh13234_regression(): + vals0 = stats.burr.moment(1, 5, 4) + assert isinstance(vals0, float) + + vals1 = stats.fisk.moment(1, 8) + assert isinstance(vals1, float) + + +def test_moments_with_array_gh12192_regression(): + # array loc and scalar scale + vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=1) + expected0 = np.array([1., 2., 3.]) + npt.assert_equal(vals0, expected0) + + # array loc and invalid scalar scale + vals1 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=-1) + expected1 = np.array([np.nan, np.nan, np.nan]) + npt.assert_equal(vals1, expected1) + + # array loc and array scale with invalid entries + vals2 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), + scale=[-3, 1, 0]) + expected2 = np.array([np.nan, 2., np.nan]) + npt.assert_equal(vals2, expected2) + + # (loc == 0) & (scale < 0) + vals3 = stats.norm.moment(order=2, loc=0, scale=-4) + expected3 = np.nan + npt.assert_equal(vals3, expected3) + assert isinstance(vals3, expected3.__class__) + + # array loc with 0 entries and scale with invalid entries + vals4 = stats.norm.moment(order=2, loc=[1, 0, 2], scale=[3, -4, -5]) + expected4 = np.array([10., np.nan, np.nan]) + npt.assert_equal(vals4, expected4) + + # all(loc == 0) & (array scale with invalid entries) + vals5 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[5., -2, 100.]) + expected5 = np.array([25., np.nan, 10000.]) + npt.assert_equal(vals5, expected5) + + # all( (loc == 0) & (scale < 0) ) + vals6 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[-5., -2, -100.]) + expected6 = np.array([np.nan, np.nan, np.nan]) + npt.assert_equal(vals6, expected6) + + # scalar args, loc, and scale + vals7 = stats.chi.moment(order=2, df=1, loc=0, scale=0) + expected7 = np.nan + npt.assert_equal(vals7, expected7) + assert isinstance(vals7, expected7.__class__) + + # array args, scalar loc, and scalar scale + vals8 = stats.chi.moment(order=2, df=[1, 2, 3], loc=0, scale=0) + expected8 = np.array([np.nan, np.nan, np.nan]) + npt.assert_equal(vals8, expected8) + + # array args, array loc, and array scale + vals9 = stats.chi.moment(order=2, df=[1, 2, 3], loc=[1., 0., 2.], + scale=[1., -3., 0.]) + expected9 = np.array([3.59576912, np.nan, np.nan]) + npt.assert_allclose(vals9, expected9, rtol=1e-8) + + # (n > 4), all(loc != 0), and all(scale != 0) + vals10 = stats.norm.moment(5, [1., 2.], [1., 2.]) + expected10 = np.array([26., 832.]) + npt.assert_allclose(vals10, expected10, rtol=1e-13) + + # test broadcasting and more + a = [-1.1, 0, 1, 2.2, np.pi] + b = [-1.1, 0, 1, 2.2, np.pi] + loc = [-1.1, 0, np.sqrt(2)] + scale = [-2.1, 0, 1, 2.2, np.pi] + + a = np.array(a).reshape((-1, 1, 1, 1)) + b = np.array(b).reshape((-1, 1, 1)) + loc = np.array(loc).reshape((-1, 1)) + scale = np.array(scale) + + vals11 = stats.beta.moment(order=2, a=a, b=b, loc=loc, scale=scale) + + a, b, loc, scale = np.broadcast_arrays(a, b, loc, scale) + + for i in np.ndenumerate(a): + with np.errstate(invalid='ignore', divide='ignore'): + i = i[0] # just get the index + # check against same function with scalar input + expected = stats.beta.moment(order=2, a=a[i], b=b[i], + loc=loc[i], scale=scale[i]) + np.testing.assert_equal(vals11[i], expected) + + +def test_broadcasting_in_moments_gh12192_regression(): + vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=[[1]]) + expected0 = np.array([[1., 2., 3.]]) + npt.assert_equal(vals0, expected0) + assert vals0.shape == expected0.shape + + vals1 = stats.norm.moment(order=1, loc=np.array([[1], [2], [3]]), + scale=[1, 2, 3]) + expected1 = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]) + npt.assert_equal(vals1, expected1) + assert vals1.shape == expected1.shape + + vals2 = stats.chi.moment(order=1, df=[1., 2., 3.], loc=0., scale=1.) + expected2 = np.array([0.79788456, 1.25331414, 1.59576912]) + npt.assert_allclose(vals2, expected2, rtol=1e-8) + assert vals2.shape == expected2.shape + + vals3 = stats.chi.moment(order=1, df=[[1.], [2.], [3.]], loc=[0., 1., 2.], + scale=[-1., 0., 3.]) + expected3 = np.array([[np.nan, np.nan, 4.39365368], + [np.nan, np.nan, 5.75994241], + [np.nan, np.nan, 6.78730736]]) + npt.assert_allclose(vals3, expected3, rtol=1e-8) + assert vals3.shape == expected3.shape + + +def test_kappa3_array_gh13582(): + # https://github.com/scipy/scipy/pull/15140#issuecomment-994958241 + shapes = [0.5, 1.5, 2.5, 3.5, 4.5] + moments = 'mvsk' + res = np.array([[stats.kappa3.stats(shape, moments=moment) + for shape in shapes] for moment in moments]) + res2 = np.array(stats.kappa3.stats(shapes, moments=moments)) + npt.assert_allclose(res, res2) + + +@pytest.mark.xslow +def test_kappa4_array_gh13582(): + h = np.array([-0.5, 2.5, 3.5, 4.5, -3]) + k = np.array([-0.5, 1, -1.5, 0, 3.5]) + moments = 'mvsk' + res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment) + for i in range(5)] for moment in moments]) + res2 = np.array(stats.kappa4.stats(h, k, moments=moments)) + npt.assert_allclose(res, res2) + + # https://github.com/scipy/scipy/pull/15250#discussion_r775112913 + h = np.array([-1, -1/4, -1/4, 1, -1, 0]) + k = np.array([1, 1, 1/2, -1/3, -1, 0]) + res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment) + for i in range(6)] for moment in moments]) + res2 = np.array(stats.kappa4.stats(h, k, moments=moments)) + npt.assert_allclose(res, res2) + + # https://github.com/scipy/scipy/pull/15250#discussion_r775115021 + h = np.array([-1, -0.5, 1]) + k = np.array([-1, -0.5, 0, 1])[:, None] + res2 = np.array(stats.kappa4.stats(h, k, moments=moments)) + assert res2.shape == (4, 4, 3) + + +def test_frozen_attributes(): + # gh-14827 reported that all frozen distributions had both pmf and pdf + # attributes; continuous should have pdf and discrete should have pmf. + message = "'rv_continuous_frozen' object has no attribute" + with pytest.raises(AttributeError, match=message): + stats.norm().pmf + with pytest.raises(AttributeError, match=message): + stats.norm().logpmf + stats.norm.pmf = "herring" + frozen_norm = stats.norm() + assert isinstance(frozen_norm, rv_continuous_frozen) + delattr(stats.norm, 'pmf') + + +def test_skewnorm_pdf_gh16038(): + rng = np.random.default_rng(0) + x, a = -np.inf, 0 + npt.assert_equal(stats.skewnorm.pdf(x, a), stats.norm.pdf(x)) + x, a = rng.random(size=(3, 3)), rng.random(size=(3, 3)) + mask = rng.random(size=(3, 3)) < 0.5 + a[mask] = 0 + x_norm = x[mask] + res = stats.skewnorm.pdf(x, a) + npt.assert_equal(res[mask], stats.norm.pdf(x_norm)) + npt.assert_equal(res[~mask], stats.skewnorm.pdf(x[~mask], a[~mask])) + + +# for scalar input, these functions should return scalar output +scalar_out = [['rvs', []], ['pdf', [0]], ['logpdf', [0]], ['cdf', [0]], + ['logcdf', [0]], ['sf', [0]], ['logsf', [0]], ['ppf', [0]], + ['isf', [0]], ['moment', [1]], ['entropy', []], ['expect', []], + ['median', []], ['mean', []], ['std', []], ['var', []]] +scalars_out = [['interval', [0.95]], ['support', []], ['stats', ['mv']]] + + +@pytest.mark.parametrize('case', scalar_out + scalars_out) +def test_scalar_for_scalar(case): + # Some rv_continuous functions returned 0d array instead of NumPy scalar + # Guard against regression + method_name, args = case + method = getattr(stats.norm(), method_name) + res = method(*args) + if case in scalar_out: + assert isinstance(res, np.number) + else: + assert isinstance(res[0], np.number) + assert isinstance(res[1], np.number) + + +def test_scalar_for_scalar2(): + # test methods that are not attributes of frozen distributions + res = stats.norm.fit([1, 2, 3]) + assert isinstance(res[0], np.number) + assert isinstance(res[1], np.number) + res = stats.norm.fit_loc_scale([1, 2, 3]) + assert isinstance(res[0], np.number) + assert isinstance(res[1], np.number) + res = stats.norm.nnlf((0, 1), [1, 2, 3]) + assert isinstance(res, np.number) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_fit_censored.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_fit_censored.py new file mode 100644 index 0000000000000000000000000000000000000000..4508b49712e5bc8975bf2f9b2681ccc6504b0ae0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_continuous_fit_censored.py @@ -0,0 +1,683 @@ +# Tests for fitting specific distributions to censored data. + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.optimize import fmin +from scipy.stats import (CensoredData, beta, cauchy, chi2, expon, gamma, + gumbel_l, gumbel_r, invgauss, invweibull, laplace, + logistic, lognorm, nct, ncx2, norm, weibull_max, + weibull_min) + + +# In some tests, we'll use this optimizer for improved accuracy. +def optimizer(func, x0, args=(), disp=0): + return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12) + + +def test_beta(): + """ + Test fitting beta shape parameters to interval-censored data. + + Calculation in R: + + > library(fitdistrplus) + > data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80), + + right=c(0.20, 0.55, 0.90, 0.95)) + > result = fitdistcens(data, 'beta', control=list(reltol=1e-14)) + + > result + Fitting of the distribution ' beta ' on censored data by maximum likelihood + Parameters: + estimate + shape1 1.419941 + shape2 1.027066 + > result$sd + shape1 shape2 + 0.9914177 0.6866565 + """ + data = CensoredData(interval=[[0.10, 0.20], + [0.50, 0.55], + [0.75, 0.90], + [0.80, 0.95]]) + + # For this test, fit only the shape parameters; loc and scale are fixed. + a, b, loc, scale = beta.fit(data, floc=0, fscale=1, optimizer=optimizer) + + assert_allclose(a, 1.419941, rtol=5e-6) + assert_allclose(b, 1.027066, rtol=5e-6) + assert loc == 0 + assert scale == 1 + + +def test_cauchy_right_censored(): + """ + Test fitting the Cauchy distribution to right-censored data. + + Calculation in R, with two values not censored [1, 10] and + one right-censored value [30]. + + > library(fitdistrplus) + > data <- data.frame(left=c(1, 10, 30), right=c(1, 10, NA)) + > result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14)) + > result + Fitting of the distribution ' cauchy ' on censored data by maximum + likelihood + Parameters: + estimate + location 7.100001 + scale 7.455866 + """ + data = CensoredData(uncensored=[1, 10], right=[30]) + loc, scale = cauchy.fit(data, optimizer=optimizer) + assert_allclose(loc, 7.10001, rtol=5e-6) + assert_allclose(scale, 7.455866, rtol=5e-6) + + +def test_cauchy_mixed(): + """ + Test fitting the Cauchy distribution to data with mixed censoring. + + Calculation in R, with: + * two values not censored [1, 10], + * one left-censored [1], + * one right-censored [30], and + * one interval-censored [[4, 8]]. + + > library(fitdistrplus) + > data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA)) + > result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14)) + > result + Fitting of the distribution ' cauchy ' on censored data by maximum + likelihood + Parameters: + estimate + location 4.605150 + scale 5.900852 + """ + data = CensoredData(uncensored=[1, 10], left=[1], right=[30], + interval=[[4, 8]]) + loc, scale = cauchy.fit(data, optimizer=optimizer) + assert_allclose(loc, 4.605150, rtol=5e-6) + assert_allclose(scale, 5.900852, rtol=5e-6) + + +def test_chi2_mixed(): + """ + Test fitting just the shape parameter (df) of chi2 to mixed data. + + Calculation in R, with: + * two values not censored [1, 10], + * one left-censored [1], + * one right-censored [30], and + * one interval-censored [[4, 8]]. + + > library(fitdistrplus) + > data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA)) + > result = fitdistcens(data, 'chisq', control=list(reltol=1e-14)) + > result + Fitting of the distribution ' chisq ' on censored data by maximum + likelihood + Parameters: + estimate + df 5.060329 + """ + data = CensoredData(uncensored=[1, 10], left=[1], right=[30], + interval=[[4, 8]]) + df, loc, scale = chi2.fit(data, floc=0, fscale=1, optimizer=optimizer) + assert_allclose(df, 5.060329, rtol=5e-6) + assert loc == 0 + assert scale == 1 + + +def test_expon_right_censored(): + """ + For the exponential distribution with loc=0, the exact solution for + fitting n uncensored points x[0]...x[n-1] and m right-censored points + x[n]..x[n+m-1] is + + scale = sum(x)/n + + That is, divide the sum of all the values (not censored and + right-censored) by the number of uncensored values. (See, for example, + https://en.wikipedia.org/wiki/Censoring_(statistics)#Likelihood.) + + The second derivative of the log-likelihood function is + + n/scale**2 - 2*sum(x)/scale**3 + + from which the estimate of the standard error can be computed. + + ----- + + Calculation in R, for reference only. The R results are not + used in the test. + + > library(fitdistrplus) + > dexps <- function(x, scale) { + + return(dexp(x, 1/scale)) + + } + > pexps <- function(q, scale) { + + return(pexp(q, 1/scale)) + + } + > left <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15, + + 16, 16, 20, 20, 21, 22) + > right <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15, + + NA, NA, NA, NA, NA, NA) + > result = fitdistcens(data, 'exps', start=list(scale=mean(data$left)), + + control=list(reltol=1e-14)) + > result + Fitting of the distribution ' exps ' on censored data by maximum likelihood + Parameters: + estimate + scale 19.85 + > result$sd + scale + 6.277119 + """ + # This data has 10 uncensored values and 6 right-censored values. + obs = [1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15, 16, 16, 20, 20, 21, 22] + cens = [False]*10 + [True]*6 + data = CensoredData.right_censored(obs, cens) + + loc, scale = expon.fit(data, floc=0, optimizer=optimizer) + + assert loc == 0 + # Use the analytical solution to compute the expected value. This + # is the sum of the observed values divided by the number of uncensored + # values. + n = len(data) - data.num_censored() + total = data._uncensored.sum() + data._right.sum() + expected = total / n + assert_allclose(scale, expected, 1e-8) + + +def test_gamma_right_censored(): + """ + Fit gamma shape and scale to data with one right-censored value. + + Calculation in R: + + > library(fitdistrplus) + > data <- data.frame(left=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, 25.0), + + right=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, NA)) + > result = fitdistcens(data, 'gamma', start=list(shape=1, scale=10), + + control=list(reltol=1e-13)) + > result + Fitting of the distribution ' gamma ' on censored data by maximum + likelihood + Parameters: + estimate + shape 1.447623 + scale 8.360197 + > result$sd + shape scale + 0.7053086 5.1016531 + """ + # The last value is right-censored. + x = CensoredData.right_censored([2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, + 25.0], + [0]*7 + [1]) + + a, loc, scale = gamma.fit(x, floc=0, optimizer=optimizer) + + assert_allclose(a, 1.447623, rtol=5e-6) + assert loc == 0 + assert_allclose(scale, 8.360197, rtol=5e-6) + + +def test_gumbel(): + """ + Fit gumbel_l and gumbel_r to censored data. + + This R calculation should match gumbel_r. + + > library(evd) + > library(fitdistrplus) + > data = data.frame(left=c(0, 2, 3, 9, 10, 10), + + right=c(1, 2, 3, 9, NA, NA)) + > result = fitdistcens(data, 'gumbel', + + control=list(reltol=1e-14), + + start=list(loc=4, scale=5)) + > result + Fitting of the distribution ' gumbel ' on censored data by maximum + likelihood + Parameters: + estimate + loc 4.487853 + scale 4.843640 + """ + # First value is interval-censored. Last two are right-censored. + uncensored = np.array([2, 3, 9]) + right = np.array([10, 10]) + interval = np.array([[0, 1]]) + data = CensoredData(uncensored, right=right, interval=interval) + loc, scale = gumbel_r.fit(data, optimizer=optimizer) + assert_allclose(loc, 4.487853, rtol=5e-6) + assert_allclose(scale, 4.843640, rtol=5e-6) + + # Negate the data and reverse the intervals, and test with gumbel_l. + data2 = CensoredData(-uncensored, left=-right, + interval=-interval[:, ::-1]) + # Fitting gumbel_l to data2 should give the same result as above, but + # with loc negated. + loc2, scale2 = gumbel_l.fit(data2, optimizer=optimizer) + assert_allclose(loc2, -4.487853, rtol=5e-6) + assert_allclose(scale2, 4.843640, rtol=5e-6) + + +def test_invgauss(): + """ + Fit just the shape parameter of invgauss to data with one value + left-censored and one value right-censored. + + Calculation in R; using a fixed dispersion parameter amounts to fixing + the scale to be 1. + + > library(statmod) + > library(fitdistrplus) + > left <- c(NA, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386, + + 0.4822340, 0.3478597, 3, 0.7191797, 1.5810902, 0.4442299) + > right <- c(0.15, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386, + + 0.4822340, 0.3478597, NA, 0.7191797, 1.5810902, 0.4442299) + > data <- data.frame(left=left, right=right) + > result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12), + + fix.arg=list(dispersion=1), start=list(mean=3)) + > result + Fitting of the distribution ' invgauss ' on censored data by maximum + likelihood + Parameters: + estimate + mean 0.853469 + Fixed parameters: + value + dispersion 1 + > result$sd + mean + 0.247636 + + Here's the R calculation with the dispersion as a free parameter to + be fit. + + > result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12), + + start=list(mean=3, dispersion=1)) + > result + Fitting of the distribution ' invgauss ' on censored data by maximum + likelihood + Parameters: + estimate + mean 0.8699819 + dispersion 1.2261362 + + The parametrization of the inverse Gaussian distribution in the + `statmod` package is not the same as in SciPy (see + https://arxiv.org/abs/1603.06687 + for details). The translation from R to SciPy is + + scale = 1/dispersion + mu = mean * dispersion + + > 1/result$estimate['dispersion'] # 1/dispersion + dispersion + 0.8155701 + > result$estimate['mean'] * result$estimate['dispersion'] + mean + 1.066716 + + Those last two values are the SciPy scale and shape parameters. + """ + # One point is left-censored, and one is right-censored. + x = [0.4813096, 0.5571880, 0.5132463, 0.3801414, + 0.5904386, 0.4822340, 0.3478597, 0.7191797, + 1.5810902, 0.4442299] + data = CensoredData(uncensored=x, left=[0.15], right=[3]) + + # Fit only the shape parameter. + mu, loc, scale = invgauss.fit(data, floc=0, fscale=1, optimizer=optimizer) + + assert_allclose(mu, 0.853469, rtol=5e-5) + assert loc == 0 + assert scale == 1 + + # Fit the shape and scale. + mu, loc, scale = invgauss.fit(data, floc=0, optimizer=optimizer) + + assert_allclose(mu, 1.066716, rtol=5e-5) + assert loc == 0 + assert_allclose(scale, 0.8155701, rtol=5e-5) + + +def test_invweibull(): + """ + Fit invweibull to censored data. + + Here is the calculation in R. The 'frechet' distribution from the evd + package matches SciPy's invweibull distribution. The `loc` parameter + is fixed at 0. + + > library(evd) + > library(fitdistrplus) + > data = data.frame(left=c(0, 2, 3, 9, 10, 10), + + right=c(1, 2, 3, 9, NA, NA)) + > result = fitdistcens(data, 'frechet', + + control=list(reltol=1e-14), + + start=list(loc=4, scale=5)) + > result + Fitting of the distribution ' frechet ' on censored data by maximum + likelihood + Parameters: + estimate + scale 2.7902200 + shape 0.6379845 + Fixed parameters: + value + loc 0 + """ + # In the R data, the first value is interval-censored, and the last + # two are right-censored. The rest are not censored. + data = CensoredData(uncensored=[2, 3, 9], right=[10, 10], + interval=[[0, 1]]) + c, loc, scale = invweibull.fit(data, floc=0, optimizer=optimizer) + assert_allclose(c, 0.6379845, rtol=5e-6) + assert loc == 0 + assert_allclose(scale, 2.7902200, rtol=5e-6) + + +def test_laplace(): + """ + Fir the Laplace distribution to left- and right-censored data. + + Calculation in R: + + > library(fitdistrplus) + > dlaplace <- function(x, location=0, scale=1) { + + return(0.5*exp(-abs((x - location)/scale))/scale) + + } + > plaplace <- function(q, location=0, scale=1) { + + z <- (q - location)/scale + + s <- sign(z) + + f <- -s*0.5*exp(-abs(z)) + (s+1)/2 + + return(f) + + } + > left <- c(NA, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684, + + -19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372, + + 25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862, + + 32.8921, 9.0448, -27.4591, NA, 19.5083, -9.7199) + > right <- c(-50.0, -41.564, NA, 15.7384, NA, 10.0452, -2.0684, + + -19.5399, NA, 9.0005, 27.1227, 4.3113, -3.7372, + + 25.3111, 14.7987, 34.0887, NA, 42.8496, 18.5862, + + 32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199) + > data <- data.frame(left=left, right=right) + > result <- fitdistcens(data, 'laplace', start=list(location=10, scale=10), + + control=list(reltol=1e-13)) + > result + Fitting of the distribution ' laplace ' on censored data by maximum + likelihood + Parameters: + estimate + location 14.79870 + scale 30.93601 + > result$sd + location scale + 0.1758864 7.0972125 + """ + # The value -50 is left-censored, and the value 50 is right-censored. + obs = np.array([-50.0, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684, + -19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372, + 25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862, + 32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199]) + x = obs[(obs != -50.0) & (obs != 50)] + left = obs[obs == -50.0] + right = obs[obs == 50.0] + data = CensoredData(uncensored=x, left=left, right=right) + loc, scale = laplace.fit(data, loc=10, scale=10, optimizer=optimizer) + assert_allclose(loc, 14.79870, rtol=5e-6) + assert_allclose(scale, 30.93601, rtol=5e-6) + + +def test_logistic(): + """ + Fit the logistic distribution to left-censored data. + + Calculation in R: + > library(fitdistrplus) + > left = c(13.5401, 37.4235, 11.906 , 13.998 , NA , 0.4023, NA , + + 10.9044, 21.0629, 9.6985, NA , 12.9016, 39.164 , 34.6396, + + NA , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949, + + 3.4041, NA , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977, + + 16.3391, 36.0541) + > right = c(13.5401, 37.4235, 11.906 , 13.998 , 0. , 0.4023, 0. , + + 10.9044, 21.0629, 9.6985, 0. , 12.9016, 39.164 , 34.6396, + + 0. , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949, + + 3.4041, 0. , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977, + + 16.3391, 36.0541) + > data = data.frame(left=left, right=right) + > result = fitdistcens(data, 'logis', control=list(reltol=1e-14)) + > result + Fitting of the distribution ' logis ' on censored data by maximum + likelihood + Parameters: + estimate + location 14.633459 + scale 9.232736 + > result$sd + location scale + 2.931505 1.546879 + """ + # Values that are zero are left-censored; the true values are less than 0. + x = np.array([13.5401, 37.4235, 11.906, 13.998, 0.0, 0.4023, 0.0, 10.9044, + 21.0629, 9.6985, 0.0, 12.9016, 39.164, 34.6396, 0.0, 20.3665, + 16.5889, 18.0952, 45.3818, 35.3306, 8.4949, 3.4041, 0.0, + 7.2828, 37.1265, 6.5969, 17.6868, 17.4977, 16.3391, + 36.0541]) + data = CensoredData.left_censored(x, censored=(x == 0)) + loc, scale = logistic.fit(data, optimizer=optimizer) + assert_allclose(loc, 14.633459, rtol=5e-7) + assert_allclose(scale, 9.232736, rtol=5e-6) + + +def test_lognorm(): + """ + Ref: https://math.montana.edu/jobo/st528/documents/relc.pdf + + The data is the locomotive control time to failure example that starts + on page 8. That's the 8th page in the PDF; the page number shown in + the text is 270). + The document includes SAS output for the data. + """ + # These are the uncensored measurements. There are also 59 right-censored + # measurements where the lower bound is 135. + miles_to_fail = [22.5, 37.5, 46.0, 48.5, 51.5, 53.0, 54.5, 57.5, 66.5, + 68.0, 69.5, 76.5, 77.0, 78.5, 80.0, 81.5, 82.0, 83.0, + 84.0, 91.5, 93.5, 102.5, 107.0, 108.5, 112.5, 113.5, + 116.0, 117.0, 118.5, 119.0, 120.0, 122.5, 123.0, 127.5, + 131.0, 132.5, 134.0] + + data = CensoredData.right_censored(miles_to_fail + [135]*59, + [0]*len(miles_to_fail) + [1]*59) + sigma, loc, scale = lognorm.fit(data, floc=0) + + assert loc == 0 + # Convert the lognorm parameters to the mu and sigma of the underlying + # normal distribution. + mu = np.log(scale) + # The expected results are from the 17th page of the PDF document + # (labeled page 279), in the SAS output on the right side of the page. + assert_allclose(mu, 5.1169, rtol=5e-4) + assert_allclose(sigma, 0.7055, rtol=5e-3) + + +def test_nct(): + """ + Test fitting the noncentral t distribution to censored data. + + Calculation in R: + + > library(fitdistrplus) + > data <- data.frame(left=c(1, 2, 3, 5, 8, 10, 25, 25), + + right=c(1, 2, 3, 5, 8, 10, NA, NA)) + > result = fitdistcens(data, 't', control=list(reltol=1e-14), + + start=list(df=1, ncp=2)) + > result + Fitting of the distribution ' t ' on censored data by maximum likelihood + Parameters: + estimate + df 0.5432336 + ncp 2.8893565 + + """ + data = CensoredData.right_censored([1, 2, 3, 5, 8, 10, 25, 25], + [0, 0, 0, 0, 0, 0, 1, 1]) + # Fit just the shape parameter df and nc; loc and scale are fixed. + with np.errstate(over='ignore'): # remove context when gh-14901 is closed + df, nc, loc, scale = nct.fit(data, floc=0, fscale=1, + optimizer=optimizer) + assert_allclose(df, 0.5432336, rtol=5e-6) + assert_allclose(nc, 2.8893565, rtol=5e-6) + assert loc == 0 + assert scale == 1 + + +def test_ncx2(): + """ + Test fitting the shape parameters (df, ncp) of ncx2 to mixed data. + + Calculation in R, with + * 5 not censored values [2.7, 0.2, 6.5, 0.4, 0.1], + * 1 interval-censored value [[0.6, 1.0]], and + * 2 right-censored values [8, 8]. + + > library(fitdistrplus) + > data <- data.frame(left=c(2.7, 0.2, 6.5, 0.4, 0.1, 0.6, 8, 8), + + right=c(2.7, 0.2, 6.5, 0.4, 0.1, 1.0, NA, NA)) + > result = fitdistcens(data, 'chisq', control=list(reltol=1e-14), + + start=list(df=1, ncp=2)) + > result + Fitting of the distribution ' chisq ' on censored data by maximum + likelihood + Parameters: + estimate + df 1.052871 + ncp 2.362934 + """ + data = CensoredData(uncensored=[2.7, 0.2, 6.5, 0.4, 0.1], right=[8, 8], + interval=[[0.6, 1.0]]) + with np.errstate(over='ignore'): # remove context when gh-14901 is closed + df, ncp, loc, scale = ncx2.fit(data, floc=0, fscale=1, + optimizer=optimizer) + assert_allclose(df, 1.052871, rtol=5e-6) + assert_allclose(ncp, 2.362934, rtol=5e-6) + assert loc == 0 + assert scale == 1 + + +def test_norm(): + """ + Test fitting the normal distribution to interval-censored data. + + Calculation in R: + + > library(fitdistrplus) + > data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80), + + right=c(0.20, 0.55, 0.90, 0.95)) + > result = fitdistcens(data, 'norm', control=list(reltol=1e-14)) + + > result + Fitting of the distribution ' norm ' on censored data by maximum likelihood + Parameters: + estimate + mean 0.5919990 + sd 0.2868042 + > result$sd + mean sd + 0.1444432 0.1029451 + """ + data = CensoredData(interval=[[0.10, 0.20], + [0.50, 0.55], + [0.75, 0.90], + [0.80, 0.95]]) + + loc, scale = norm.fit(data, optimizer=optimizer) + + assert_allclose(loc, 0.5919990, rtol=5e-6) + assert_allclose(scale, 0.2868042, rtol=5e-6) + + +def test_weibull_censored1(): + # Ref: http://www.ams.sunysb.edu/~zhu/ams588/Lecture_3_likelihood.pdf + + # Survival times; '*' indicates right-censored. + s = "3,5,6*,8,10*,11*,15,20*,22,23,27*,29,32,35,40,26,28,33*,21,24*" + + times, cens = zip(*[(float(t[0]), len(t) == 2) + for t in [w.split('*') for w in s.split(',')]]) + data = CensoredData.right_censored(times, cens) + + c, loc, scale = weibull_min.fit(data, floc=0) + + # Expected values are from the reference. + assert_allclose(c, 2.149, rtol=1e-3) + assert loc == 0 + assert_allclose(scale, 28.99, rtol=1e-3) + + # Flip the sign of the data, and make the censored values + # left-censored. We should get the same parameters when we fit + # weibull_max to the flipped data. + data2 = CensoredData.left_censored(-np.array(times), cens) + + c2, loc2, scale2 = weibull_max.fit(data2, floc=0) + + assert_allclose(c2, 2.149, rtol=1e-3) + assert loc2 == 0 + assert_allclose(scale2, 28.99, rtol=1e-3) + + +def test_weibull_min_sas1(): + # Data and SAS results from + # https://support.sas.com/documentation/cdl/en/qcug/63922/HTML/default/ + # viewer.htm#qcug_reliability_sect004.htm + + text = """ + 450 0 460 1 1150 0 1150 0 1560 1 + 1600 0 1660 1 1850 1 1850 1 1850 1 + 1850 1 1850 1 2030 1 2030 1 2030 1 + 2070 0 2070 0 2080 0 2200 1 3000 1 + 3000 1 3000 1 3000 1 3100 0 3200 1 + 3450 0 3750 1 3750 1 4150 1 4150 1 + 4150 1 4150 1 4300 1 4300 1 4300 1 + 4300 1 4600 0 4850 1 4850 1 4850 1 + 4850 1 5000 1 5000 1 5000 1 6100 1 + 6100 0 6100 1 6100 1 6300 1 6450 1 + 6450 1 6700 1 7450 1 7800 1 7800 1 + 8100 1 8100 1 8200 1 8500 1 8500 1 + 8500 1 8750 1 8750 0 8750 1 9400 1 + 9900 1 10100 1 10100 1 10100 1 11500 1 + """ + + life, cens = np.array([int(w) for w in text.split()]).reshape(-1, 2).T + life = life/1000.0 + + data = CensoredData.right_censored(life, cens) + + c, loc, scale = weibull_min.fit(data, floc=0, optimizer=optimizer) + assert_allclose(c, 1.0584, rtol=1e-4) + assert_allclose(scale, 26.2968, rtol=1e-5) + assert loc == 0 + + +def test_weibull_min_sas2(): + # http://support.sas.com/documentation/cdl/en/ormpug/67517/HTML/default/ + # viewer.htm#ormpug_nlpsolver_examples06.htm + + # The last two values are right-censored. + days = np.array([143, 164, 188, 188, 190, 192, 206, 209, 213, 216, 220, + 227, 230, 234, 246, 265, 304, 216, 244]) + + data = CensoredData.right_censored(days, [0]*(len(days) - 2) + [1]*2) + + c, loc, scale = weibull_min.fit(data, 1, loc=100, scale=100, + optimizer=optimizer) + + assert_allclose(c, 2.7112, rtol=5e-4) + assert_allclose(loc, 122.03, rtol=5e-4) + assert_allclose(scale, 108.37, rtol=5e-4) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_crosstab.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_crosstab.py new file mode 100644 index 0000000000000000000000000000000000000000..35eda2de983654f0eb7d41aaccf3bd1a2e93505a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_crosstab.py @@ -0,0 +1,115 @@ +import pytest +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from scipy.stats.contingency import crosstab + + +@pytest.mark.parametrize('sparse', [False, True]) +def test_crosstab_basic(sparse): + a = [0, 0, 9, 9, 0, 0, 9] + b = [2, 1, 3, 1, 2, 3, 3] + expected_avals = [0, 9] + expected_bvals = [1, 2, 3] + expected_count = np.array([[1, 2, 1], + [1, 0, 2]]) + (avals, bvals), count = crosstab(a, b, sparse=sparse) + assert_array_equal(avals, expected_avals) + assert_array_equal(bvals, expected_bvals) + if sparse: + assert_array_equal(count.A, expected_count) + else: + assert_array_equal(count, expected_count) + + +def test_crosstab_basic_1d(): + # Verify that a single input sequence works as expected. + x = [1, 2, 3, 1, 2, 3, 3] + expected_xvals = [1, 2, 3] + expected_count = np.array([2, 2, 3]) + (xvals,), count = crosstab(x) + assert_array_equal(xvals, expected_xvals) + assert_array_equal(count, expected_count) + + +def test_crosstab_basic_3d(): + # Verify the function for three input sequences. + a = 'a' + b = 'b' + x = [0, 0, 9, 9, 0, 0, 9, 9] + y = [a, a, a, a, b, b, b, a] + z = [1, 2, 3, 1, 2, 3, 3, 1] + expected_xvals = [0, 9] + expected_yvals = [a, b] + expected_zvals = [1, 2, 3] + expected_count = np.array([[[1, 1, 0], + [0, 1, 1]], + [[2, 0, 1], + [0, 0, 1]]]) + (xvals, yvals, zvals), count = crosstab(x, y, z) + assert_array_equal(xvals, expected_xvals) + assert_array_equal(yvals, expected_yvals) + assert_array_equal(zvals, expected_zvals) + assert_array_equal(count, expected_count) + + +@pytest.mark.parametrize('sparse', [False, True]) +def test_crosstab_levels(sparse): + a = [0, 0, 9, 9, 0, 0, 9] + b = [1, 2, 3, 1, 2, 3, 3] + expected_avals = [0, 9] + expected_bvals = [0, 1, 2, 3] + expected_count = np.array([[0, 1, 2, 1], + [0, 1, 0, 2]]) + (avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]], + sparse=sparse) + assert_array_equal(avals, expected_avals) + assert_array_equal(bvals, expected_bvals) + if sparse: + assert_array_equal(count.A, expected_count) + else: + assert_array_equal(count, expected_count) + + +@pytest.mark.parametrize('sparse', [False, True]) +def test_crosstab_extra_levels(sparse): + # The pair of values (-1, 3) will be ignored, because we explicitly + # request the counted `a` values to be [0, 9]. + a = [0, 0, 9, 9, 0, 0, 9, -1] + b = [1, 2, 3, 1, 2, 3, 3, 3] + expected_avals = [0, 9] + expected_bvals = [0, 1, 2, 3] + expected_count = np.array([[0, 1, 2, 1], + [0, 1, 0, 2]]) + (avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]], + sparse=sparse) + assert_array_equal(avals, expected_avals) + assert_array_equal(bvals, expected_bvals) + if sparse: + assert_array_equal(count.A, expected_count) + else: + assert_array_equal(count, expected_count) + + +def test_validation_at_least_one(): + with pytest.raises(TypeError, match='At least one'): + crosstab() + + +def test_validation_same_lengths(): + with pytest.raises(ValueError, match='must have the same length'): + crosstab([1, 2], [1, 2, 3, 4]) + + +def test_validation_sparse_only_two_args(): + with pytest.raises(ValueError, match='only two input sequences'): + crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True) + + +def test_validation_len_levels_matches_args(): + with pytest.raises(ValueError, match='number of input sequences'): + crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],)) + + +def test_result(): + res = crosstab([0, 1], [1, 2]) + assert_equal((res.elements, res.count), res) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_basic.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..bce36b97c1f4ac9c42e414acda7447a1259543a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_basic.py @@ -0,0 +1,546 @@ +import numpy.testing as npt +from numpy.testing import assert_allclose + +import numpy as np +import pytest + +from scipy import stats +from .common_tests import (check_normalization, check_moment, + check_mean_expect, + check_var_expect, check_skew_expect, + check_kurt_expect, check_entropy, + check_private_entropy, check_edge_support, + check_named_args, check_random_state_property, + check_pickling, check_rvs_broadcast, + check_freezing,) +from scipy.stats._distr_params import distdiscrete, invdistdiscrete +from scipy.stats._distn_infrastructure import rv_discrete_frozen + +vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) +distdiscrete += [[stats.rv_discrete(values=vals), ()]] + +# For these distributions, test_discrete_basic only runs with test mode full +distslow = {'zipfian', 'nhypergeom'} + + +def cases_test_discrete_basic(): + seen = set() + for distname, arg in distdiscrete: + if distname in distslow: + yield pytest.param(distname, arg, distname, marks=pytest.mark.slow) + else: + yield distname, arg, distname not in seen + seen.add(distname) + + +@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) +def test_discrete_basic(distname, arg, first_case): + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'sample distribution' + np.random.seed(9765456) + rvs = distfn.rvs(size=2000, *arg) + supp = np.unique(rvs) + m, v = distfn.stats(*arg) + check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') + + check_pmf_cdf(distfn, arg, distname) + check_oth(distfn, arg, supp, distname + ' oth') + check_edge_support(distfn, arg) + + alpha = 0.01 + check_discrete_chisquare(distfn, arg, rvs, alpha, + distname + ' chisquare') + + if first_case: + locscale_defaults = (0,) + meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, + distfn.logsf] + # make sure arguments are within support + # for some distributions, this needs to be overridden + spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, + 'nchypergeom_wallenius': 6} + k = spec_k.get(distname, 1) + check_named_args(distfn, k, arg, locscale_defaults, meths) + if distname != 'sample distribution': + check_scale_docstring(distfn) + check_random_state_property(distfn, arg) + check_pickling(distfn, arg) + check_freezing(distfn, arg) + + # Entropy + check_entropy(distfn, arg, distname) + if distfn.__class__._entropy != stats.rv_discrete._entropy: + check_private_entropy(distfn, arg, stats.rv_discrete) + + +@pytest.mark.parametrize('distname,arg', distdiscrete) +def test_moments(distname, arg): + try: + distfn = getattr(stats, distname) + except TypeError: + distfn = distname + distname = 'sample distribution' + m, v, s, k = distfn.stats(*arg, moments='mvsk') + check_normalization(distfn, arg, distname) + + # compare `stats` and `moment` methods + check_moment(distfn, arg, m, v, distname) + check_mean_expect(distfn, arg, m, distname) + check_var_expect(distfn, arg, m, v, distname) + check_skew_expect(distfn, arg, m, v, s, distname) + if distname not in ['zipf', 'yulesimon', 'betanbinom']: + check_kurt_expect(distfn, arg, m, v, k, distname) + + # frozen distr moments + check_moment_frozen(distfn, arg, m, 1) + check_moment_frozen(distfn, arg, v+m*m, 2) + + +@pytest.mark.parametrize('dist,shape_args', distdiscrete) +def test_rvs_broadcast(dist, shape_args): + # If shape_only is True, it means the _rvs method of the + # distribution uses more than one random number to generate a random + # variate. That means the result of using rvs with broadcasting or + # with a nontrivial size will not necessarily be the same as using the + # numpy.vectorize'd version of rvs(), so we can only compare the shapes + # of the results, not the values. + # Whether or not a distribution is in the following list is an + # implementation detail of the distribution, not a requirement. If + # the implementation the rvs() method of a distribution changes, this + # test might also have to be changed. + shape_only = dist in ['betabinom', 'betanbinom', 'skellam', 'yulesimon', + 'dlaplace', 'nchypergeom_fisher', + 'nchypergeom_wallenius'] + + try: + distfunc = getattr(stats, dist) + except TypeError: + distfunc = dist + dist = f'rv_discrete(values=({dist.xk!r}, {dist.pk!r}))' + loc = np.zeros(2) + nargs = distfunc.numargs + allargs = [] + bshape = [] + # Generate shape parameter arguments... + for k in range(nargs): + shp = (k + 3,) + (1,)*(k + 1) + param_val = shape_args[k] + allargs.append(np.full(shp, param_val)) + bshape.insert(0, shp[0]) + allargs.append(loc) + bshape.append(loc.size) + # bshape holds the expected shape when loc, scale, and the shape + # parameters are all broadcast together. + check_rvs_broadcast( + distfunc, dist, allargs, bshape, shape_only, [np.dtype(int)] + ) + + +@pytest.mark.parametrize('dist,args', distdiscrete) +def test_ppf_with_loc(dist, args): + try: + distfn = getattr(stats, dist) + except TypeError: + distfn = dist + #check with a negative, no and positive relocation. + np.random.seed(1942349) + re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] + _a, _b = distfn.support(*args) + for loc in re_locs: + npt.assert_array_equal( + [_a-1+loc, _b+loc], + [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] + ) + + +@pytest.mark.parametrize('dist, args', distdiscrete) +def test_isf_with_loc(dist, args): + try: + distfn = getattr(stats, dist) + except TypeError: + distfn = dist + # check with a negative, no and positive relocation. + np.random.seed(1942349) + re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] + _a, _b = distfn.support(*args) + for loc in re_locs: + expected = _b + loc, _a - 1 + loc + res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc) + npt.assert_array_equal(expected, res) + # test broadcasting behaviour + re_locs = [np.random.randint(-10, -1, size=(5, 3)), + np.zeros((5, 3)), + np.random.randint(1, 10, size=(5, 3))] + _a, _b = distfn.support(*args) + for loc in re_locs: + expected = _b + loc, _a - 1 + loc + res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc) + npt.assert_array_equal(expected, res) + + +def check_cdf_ppf(distfn, arg, supp, msg): + # supp is assumed to be an array of integers in the support of distfn + # (but not necessarily all the integers in the support). + # This test assumes that the PMF of any value in the support of the + # distribution is greater than 1e-8. + + # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} + cdf_supp = distfn.cdf(supp, *arg) + # In very rare cases, the finite precision calculation of ppf(cdf(supp)) + # can produce an array in which an element is off by one. We nudge the + # CDF values down by 15 ULPs help to avoid this. + cdf_supp0 = cdf_supp - 15*np.spacing(cdf_supp) + npt.assert_array_equal(distfn.ppf(cdf_supp0, *arg), + supp, msg + '-roundtrip') + # Repeat the same calculation, but with the CDF values decreased by 1e-8. + npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), + supp, msg + '-roundtrip') + + if not hasattr(distfn, 'xk'): + _a, _b = distfn.support(*arg) + supp1 = supp[supp < _b] + npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), + supp1 + distfn.inc, msg + ' ppf-cdf-next') + + +def check_pmf_cdf(distfn, arg, distname): + if hasattr(distfn, 'xk'): + index = distfn.xk + else: + startind = int(distfn.ppf(0.01, *arg) - 1) + index = list(range(startind, startind + 10)) + cdfs = distfn.cdf(index, *arg) + pmfs_cum = distfn.pmf(index, *arg).cumsum() + + atol, rtol = 1e-10, 1e-10 + if distname == 'skellam': # ncx2 accuracy + atol, rtol = 1e-5, 1e-5 + npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], + atol=atol, rtol=rtol) + + # also check that pmf at non-integral k is zero + k = np.asarray(index) + k_shifted = k[:-1] + np.diff(k)/2 + npt.assert_equal(distfn.pmf(k_shifted, *arg), 0) + + # better check frozen distributions, and also when loc != 0 + loc = 0.5 + dist = distfn(loc=loc, *arg) + npt.assert_allclose(dist.pmf(k[1:] + loc), np.diff(dist.cdf(k + loc))) + npt.assert_equal(dist.pmf(k_shifted + loc), 0) + + +def check_moment_frozen(distfn, arg, m, k): + npt.assert_allclose(distfn(*arg).moment(k), m, + atol=1e-10, rtol=1e-10) + + +def check_oth(distfn, arg, supp, msg): + # checking other methods of distfn + npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), + atol=1e-10, rtol=1e-10) + + q = np.linspace(0.01, 0.99, 20) + npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), + atol=1e-10, rtol=1e-10) + + median_sf = distfn.isf(0.5, *arg) + npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) + npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) + + +def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): + """Perform chisquare test for random sample of a discrete distribution + + Parameters + ---------- + distname : string + name of distribution function + arg : sequence + parameters of distribution + alpha : float + significance level, threshold for p-value + + Returns + ------- + result : bool + 0 if test passes, 1 if test fails + + """ + wsupp = 0.05 + + # construct intervals with minimum mass `wsupp`. + # intervals are left-half-open as in a cdf difference + _a, _b = distfn.support(*arg) + lo = int(max(_a, -1000)) + high = int(min(_b, 1000)) + 1 + distsupport = range(lo, high) + last = 0 + distsupp = [lo] + distmass = [] + for ii in distsupport: + current = distfn.cdf(ii, *arg) + if current - last >= wsupp - 1e-14: + distsupp.append(ii) + distmass.append(current - last) + last = current + if current > (1 - wsupp): + break + if distsupp[-1] < _b: + distsupp.append(_b) + distmass.append(1 - last) + distsupp = np.array(distsupp) + distmass = np.array(distmass) + + # convert intervals to right-half-open as required by histogram + histsupp = distsupp + 1e-8 + histsupp[0] = _a + + # find sample frequencies and perform chisquare test + freq, hsupp = np.histogram(rvs, histsupp) + chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) + + npt.assert_( + pval > alpha, + f'chisquare - test for {msg} at arg = {str(arg)} with pval = {str(pval)}' + ) + + +def check_scale_docstring(distfn): + if distfn.__doc__ is not None: + # Docstrings can be stripped if interpreter is run with -OO + npt.assert_('scale' not in distfn.__doc__) + + +@pytest.mark.parametrize('method', ['pmf', 'logpmf', 'cdf', 'logcdf', + 'sf', 'logsf', 'ppf', 'isf']) +@pytest.mark.parametrize('distname, args', distdiscrete) +def test_methods_with_lists(method, distname, args): + # Test that the discrete distributions can accept Python lists + # as arguments. + try: + dist = getattr(stats, distname) + except TypeError: + return + if method in ['ppf', 'isf']: + z = [0.1, 0.2] + else: + z = [0, 1] + p2 = [[p]*2 for p in args] + loc = [0, 1] + result = dist.pmf(z, *p2, loc=loc) + npt.assert_allclose(result, + [dist.pmf(*v) for v in zip(z, *p2, loc)], + rtol=1e-15, atol=1e-15) + + +@pytest.mark.parametrize('distname, args', invdistdiscrete) +def test_cdf_gh13280_regression(distname, args): + # Test for nan output when shape parameters are invalid + dist = getattr(stats, distname) + x = np.arange(-2, 15) + vals = dist.cdf(x, *args) + expected = np.nan + npt.assert_equal(vals, expected) + + +def cases_test_discrete_integer_shapes(): + # distributions parameters that are only allowed to be integral when + # fitting, but are allowed to be real as input to PDF, etc. + integrality_exceptions = {'nbinom': {'n'}, 'betanbinom': {'n'}} + + seen = set() + for distname, shapes in distdiscrete: + if distname in seen: + continue + seen.add(distname) + + try: + dist = getattr(stats, distname) + except TypeError: + continue + + shape_info = dist._shape_info() + + for i, shape in enumerate(shape_info): + if (shape.name in integrality_exceptions.get(distname, set()) or + not shape.integrality): + continue + + yield distname, shape.name, shapes + + +@pytest.mark.parametrize('distname, shapename, shapes', + cases_test_discrete_integer_shapes()) +def test_integer_shapes(distname, shapename, shapes): + dist = getattr(stats, distname) + shape_info = dist._shape_info() + shape_names = [shape.name for shape in shape_info] + i = shape_names.index(shapename) # this element of params must be integral + + shapes_copy = list(shapes) + + valid_shape = shapes[i] + invalid_shape = valid_shape - 0.5 # arbitrary non-integral value + new_valid_shape = valid_shape - 1 + shapes_copy[i] = [[valid_shape], [invalid_shape], [new_valid_shape]] + + a, b = dist.support(*shapes) + x = np.round(np.linspace(a, b, 5)) + + pmf = dist.pmf(x, *shapes_copy) + assert not np.any(np.isnan(pmf[0, :])) + assert np.all(np.isnan(pmf[1, :])) + assert not np.any(np.isnan(pmf[2, :])) + + +def test_frozen_attributes(): + # gh-14827 reported that all frozen distributions had both pmf and pdf + # attributes; continuous should have pdf and discrete should have pmf. + message = "'rv_discrete_frozen' object has no attribute" + with pytest.raises(AttributeError, match=message): + stats.binom(10, 0.5).pdf + with pytest.raises(AttributeError, match=message): + stats.binom(10, 0.5).logpdf + stats.binom.pdf = "herring" + frozen_binom = stats.binom(10, 0.5) + assert isinstance(frozen_binom, rv_discrete_frozen) + delattr(stats.binom, 'pdf') + + +@pytest.mark.parametrize('distname, shapes', distdiscrete) +def test_interval(distname, shapes): + # gh-11026 reported that `interval` returns incorrect values when + # `confidence=1`. The values were not incorrect, but it was not intuitive + # that the left end of the interval should extend beyond the support of the + # distribution. Confirm that this is the behavior for all distributions. + if isinstance(distname, str): + dist = getattr(stats, distname) + else: + dist = distname + a, b = dist.support(*shapes) + npt.assert_equal(dist.ppf([0, 1], *shapes), (a-1, b)) + npt.assert_equal(dist.isf([1, 0], *shapes), (a-1, b)) + npt.assert_equal(dist.interval(1, *shapes), (a-1, b)) + + +@pytest.mark.xfail_on_32bit("Sensible to machine precision") +def test_rv_sample(): + # Thoroughly test rv_sample and check that gh-3758 is resolved + + # Generate a random discrete distribution + rng = np.random.default_rng(98430143469) + xk = np.sort(rng.random(10) * 10) + pk = rng.random(10) + pk /= np.sum(pk) + dist = stats.rv_discrete(values=(xk, pk)) + + # Generate points to the left and right of xk + xk_left = (np.array([0] + xk[:-1].tolist()) + xk)/2 + xk_right = (np.array(xk[1:].tolist() + [xk[-1]+1]) + xk)/2 + + # Generate points to the left and right of cdf + cdf2 = np.cumsum(pk) + cdf2_left = (np.array([0] + cdf2[:-1].tolist()) + cdf2)/2 + cdf2_right = (np.array(cdf2[1:].tolist() + [1]) + cdf2)/2 + + # support - leftmost and rightmost xk + a, b = dist.support() + assert_allclose(a, xk[0]) + assert_allclose(b, xk[-1]) + + # pmf - supported only on the xk + assert_allclose(dist.pmf(xk), pk) + assert_allclose(dist.pmf(xk_right), 0) + assert_allclose(dist.pmf(xk_left), 0) + + # logpmf is log of the pmf; log(0) = -np.inf + with np.errstate(divide='ignore'): + assert_allclose(dist.logpmf(xk), np.log(pk)) + assert_allclose(dist.logpmf(xk_right), -np.inf) + assert_allclose(dist.logpmf(xk_left), -np.inf) + + # cdf - the cumulative sum of the pmf + assert_allclose(dist.cdf(xk), cdf2) + assert_allclose(dist.cdf(xk_right), cdf2) + assert_allclose(dist.cdf(xk_left), [0]+cdf2[:-1].tolist()) + + with np.errstate(divide='ignore'): + assert_allclose(dist.logcdf(xk), np.log(dist.cdf(xk)), + atol=1e-15) + assert_allclose(dist.logcdf(xk_right), np.log(dist.cdf(xk_right)), + atol=1e-15) + assert_allclose(dist.logcdf(xk_left), np.log(dist.cdf(xk_left)), + atol=1e-15) + + # sf is 1-cdf + assert_allclose(dist.sf(xk), 1-dist.cdf(xk)) + assert_allclose(dist.sf(xk_right), 1-dist.cdf(xk_right)) + assert_allclose(dist.sf(xk_left), 1-dist.cdf(xk_left)) + + with np.errstate(divide='ignore'): + assert_allclose(dist.logsf(xk), np.log(dist.sf(xk)), + atol=1e-15) + assert_allclose(dist.logsf(xk_right), np.log(dist.sf(xk_right)), + atol=1e-15) + assert_allclose(dist.logsf(xk_left), np.log(dist.sf(xk_left)), + atol=1e-15) + + # ppf + assert_allclose(dist.ppf(cdf2), xk) + assert_allclose(dist.ppf(cdf2_left), xk) + assert_allclose(dist.ppf(cdf2_right)[:-1], xk[1:]) + assert_allclose(dist.ppf(0), a - 1) + assert_allclose(dist.ppf(1), b) + + # isf + sf2 = dist.sf(xk) + assert_allclose(dist.isf(sf2), xk) + assert_allclose(dist.isf(1-cdf2_left), dist.ppf(cdf2_left)) + assert_allclose(dist.isf(1-cdf2_right), dist.ppf(cdf2_right)) + assert_allclose(dist.isf(0), b) + assert_allclose(dist.isf(1), a - 1) + + # interval is (ppf(alpha/2), isf(alpha/2)) + ps = np.linspace(0.01, 0.99, 10) + int2 = dist.ppf(ps/2), dist.isf(ps/2) + assert_allclose(dist.interval(1-ps), int2) + assert_allclose(dist.interval(0), dist.median()) + assert_allclose(dist.interval(1), (a-1, b)) + + # median is simply ppf(0.5) + med2 = dist.ppf(0.5) + assert_allclose(dist.median(), med2) + + # all four stats (mean, var, skew, and kurtosis) from the definitions + mean2 = np.sum(xk*pk) + var2 = np.sum((xk - mean2)**2 * pk) + skew2 = np.sum((xk - mean2)**3 * pk) / var2**(3/2) + kurt2 = np.sum((xk - mean2)**4 * pk) / var2**2 - 3 + assert_allclose(dist.mean(), mean2) + assert_allclose(dist.std(), np.sqrt(var2)) + assert_allclose(dist.var(), var2) + assert_allclose(dist.stats(moments='mvsk'), (mean2, var2, skew2, kurt2)) + + # noncentral moment against definition + mom3 = np.sum((xk**3) * pk) + assert_allclose(dist.moment(3), mom3) + + # expect - check against moments + assert_allclose(dist.expect(lambda x: 1), 1) + assert_allclose(dist.expect(), mean2) + assert_allclose(dist.expect(lambda x: x**3), mom3) + + # entropy is the negative of the expected value of log(p) + with np.errstate(divide='ignore'): + assert_allclose(-dist.expect(lambda x: dist.logpmf(x)), dist.entropy()) + + # RVS is just ppf of uniform random variates + rng = np.random.default_rng(98430143469) + rvs = dist.rvs(size=100, random_state=rng) + rng = np.random.default_rng(98430143469) + rvs0 = dist.ppf(rng.random(size=100)) + assert_allclose(rvs, rvs0) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_distns.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_distns.py new file mode 100644 index 0000000000000000000000000000000000000000..c5993ebde3750efb055945cc235068b76eae3b7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_discrete_distns.py @@ -0,0 +1,629 @@ +import pytest +import itertools + +from scipy.stats import (betabinom, betanbinom, hypergeom, nhypergeom, + bernoulli, boltzmann, skellam, zipf, zipfian, binom, + nbinom, nchypergeom_fisher, nchypergeom_wallenius, + randint) + +import numpy as np +from numpy.testing import ( + assert_almost_equal, assert_equal, assert_allclose, suppress_warnings +) +from scipy.special import binom as special_binom +from scipy.optimize import root_scalar +from scipy.integrate import quad + + +# The expected values were computed with Wolfram Alpha, using +# the expression CDF[HypergeometricDistribution[N, n, M], k]. +@pytest.mark.parametrize('k, M, n, N, expected, rtol', + [(3, 10, 4, 5, + 0.9761904761904762, 1e-15), + (107, 10000, 3000, 215, + 0.9999999997226765, 1e-15), + (10, 10000, 3000, 215, + 2.681682217692179e-21, 5e-11)]) +def test_hypergeom_cdf(k, M, n, N, expected, rtol): + p = hypergeom.cdf(k, M, n, N) + assert_allclose(p, expected, rtol=rtol) + + +# The expected values were computed with Wolfram Alpha, using +# the expression SurvivalFunction[HypergeometricDistribution[N, n, M], k]. +@pytest.mark.parametrize('k, M, n, N, expected, rtol', + [(25, 10000, 3000, 215, + 0.9999999999052958, 1e-15), + (125, 10000, 3000, 215, + 1.4416781705752128e-18, 5e-11)]) +def test_hypergeom_sf(k, M, n, N, expected, rtol): + p = hypergeom.sf(k, M, n, N) + assert_allclose(p, expected, rtol=rtol) + + +def test_hypergeom_logpmf(): + # symmetries test + # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K) + k = 5 + N = 50 + K = 10 + n = 5 + logpmf1 = hypergeom.logpmf(k, N, K, n) + logpmf2 = hypergeom.logpmf(n - k, N, N - K, n) + logpmf3 = hypergeom.logpmf(K - k, N, K, N - n) + logpmf4 = hypergeom.logpmf(k, N, n, K) + assert_almost_equal(logpmf1, logpmf2, decimal=12) + assert_almost_equal(logpmf1, logpmf3, decimal=12) + assert_almost_equal(logpmf1, logpmf4, decimal=12) + + # test related distribution + # Bernoulli distribution if n = 1 + k = 1 + N = 10 + K = 7 + n = 1 + hypergeom_logpmf = hypergeom.logpmf(k, N, K, n) + bernoulli_logpmf = bernoulli.logpmf(k, K/N) + assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12) + + +def test_nhypergeom_pmf(): + # test with hypergeom + M, n, r = 45, 13, 8 + k = 6 + NHG = nhypergeom.pmf(k, M, n, r) + HG = hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1)) + assert_allclose(HG, NHG, rtol=1e-10) + + +def test_nhypergeom_pmfcdf(): + # test pmf and cdf with arbitrary values. + M = 8 + n = 3 + r = 4 + support = np.arange(n+1) + pmf = nhypergeom.pmf(support, M, n, r) + cdf = nhypergeom.cdf(support, M, n, r) + assert_allclose(pmf, [1/14, 3/14, 5/14, 5/14], rtol=1e-13) + assert_allclose(cdf, [1/14, 4/14, 9/14, 1.0], rtol=1e-13) + + +def test_nhypergeom_r0(): + # test with `r = 0`. + M = 10 + n = 3 + r = 0 + pmf = nhypergeom.pmf([[0, 1, 2, 0], [1, 2, 0, 3]], M, n, r) + assert_allclose(pmf, [[1, 0, 0, 1], [0, 0, 1, 0]], rtol=1e-13) + + +def test_nhypergeom_rvs_shape(): + # Check that when given a size with more dimensions than the + # dimensions of the broadcast parameters, rvs returns an array + # with the correct shape. + x = nhypergeom.rvs(22, [7, 8, 9], [[12], [13]], size=(5, 1, 2, 3)) + assert x.shape == (5, 1, 2, 3) + + +def test_nhypergeom_accuracy(): + # Check that nhypergeom.rvs post-gh-13431 gives the same values as + # inverse transform sampling + np.random.seed(0) + x = nhypergeom.rvs(22, 7, 11, size=100) + np.random.seed(0) + p = np.random.uniform(size=100) + y = nhypergeom.ppf(p, 22, 7, 11) + assert_equal(x, y) + + +def test_boltzmann_upper_bound(): + k = np.arange(-3, 5) + + N = 1 + p = boltzmann.pmf(k, 0.123, N) + expected = k == 0 + assert_equal(p, expected) + + lam = np.log(2) + N = 3 + p = boltzmann.pmf(k, lam, N) + expected = [0, 0, 0, 4/7, 2/7, 1/7, 0, 0] + assert_allclose(p, expected, rtol=1e-13) + + c = boltzmann.cdf(k, lam, N) + expected = [0, 0, 0, 4/7, 6/7, 1, 1, 1] + assert_allclose(c, expected, rtol=1e-13) + + +def test_betabinom_a_and_b_unity(): + # test limiting case that betabinom(n, 1, 1) is a discrete uniform + # distribution from 0 to n + n = 20 + k = np.arange(n + 1) + p = betabinom(n, 1, 1).pmf(k) + expected = np.repeat(1 / (n + 1), n + 1) + assert_almost_equal(p, expected) + + +@pytest.mark.parametrize('dtypes', itertools.product(*[(int, float)]*3)) +def test_betabinom_stats_a_and_b_integers_gh18026(dtypes): + # gh-18026 reported that `betabinom` kurtosis calculation fails when some + # parameters are integers. Check that this is resolved. + n_type, a_type, b_type = dtypes + n, a, b = n_type(10), a_type(2), b_type(3) + assert_allclose(betabinom.stats(n, a, b, moments='k'), -0.6904761904761907) + + +def test_betabinom_bernoulli(): + # test limiting case that betabinom(1, a, b) = bernoulli(a / (a + b)) + a = 2.3 + b = 0.63 + k = np.arange(2) + p = betabinom(1, a, b).pmf(k) + expected = bernoulli(a / (a + b)).pmf(k) + assert_almost_equal(p, expected) + + +def test_issue_10317(): + alpha, n, p = 0.9, 10, 1 + assert_equal(nbinom.interval(confidence=alpha, n=n, p=p), (0, 0)) + + +def test_issue_11134(): + alpha, n, p = 0.95, 10, 0 + assert_equal(binom.interval(confidence=alpha, n=n, p=p), (0, 0)) + + +def test_issue_7406(): + np.random.seed(0) + assert_equal(binom.ppf(np.random.rand(10), 0, 0.5), 0) + + # Also check that endpoints (q=0, q=1) are correct + assert_equal(binom.ppf(0, 0, 0.5), -1) + assert_equal(binom.ppf(1, 0, 0.5), 0) + + +def test_issue_5122(): + p = 0 + n = np.random.randint(100, size=10) + + x = 0 + ppf = binom.ppf(x, n, p) + assert_equal(ppf, -1) + + x = np.linspace(0.01, 0.99, 10) + ppf = binom.ppf(x, n, p) + assert_equal(ppf, 0) + + x = 1 + ppf = binom.ppf(x, n, p) + assert_equal(ppf, n) + + +def test_issue_1603(): + assert_equal(binom(1000, np.logspace(-3, -100)).ppf(0.01), 0) + + +def test_issue_5503(): + p = 0.5 + x = np.logspace(3, 14, 12) + assert_allclose(binom.cdf(x, 2*x, p), 0.5, atol=1e-2) + + +@pytest.mark.parametrize('x, n, p, cdf_desired', [ + (300, 1000, 3/10, 0.51559351981411995636), + (3000, 10000, 3/10, 0.50493298381929698016), + (30000, 100000, 3/10, 0.50156000591726422864), + (300000, 1000000, 3/10, 0.50049331906666960038), + (3000000, 10000000, 3/10, 0.50015600124585261196), + (30000000, 100000000, 3/10, 0.50004933192735230102), + (30010000, 100000000, 3/10, 0.98545384016570790717), + (29990000, 100000000, 3/10, 0.01455017177985268670), + (29950000, 100000000, 3/10, 5.02250963487432024943e-28), +]) +def test_issue_5503pt2(x, n, p, cdf_desired): + assert_allclose(binom.cdf(x, n, p), cdf_desired) + + +def test_issue_5503pt3(): + # From Wolfram Alpha: CDF[BinomialDistribution[1e12, 1e-12], 2] + assert_allclose(binom.cdf(2, 10**12, 10**-12), 0.91969860292869777384) + + +def test_issue_6682(): + # Reference value from R: + # options(digits=16) + # print(pnbinom(250, 50, 32/63, lower.tail=FALSE)) + assert_allclose(nbinom.sf(250, 50, 32./63.), 1.460458510976452e-35) + + +def test_issue_19747(): + # test that negative k does not raise an error in nbinom.logcdf + result = nbinom.logcdf([5, -1, 1], 5, 0.5) + reference = [-0.47313352, -np.inf, -2.21297293] + assert_allclose(result, reference) + + +def test_boost_divide_by_zero_issue_15101(): + n = 1000 + p = 0.01 + k = 996 + assert_allclose(binom.pmf(k, n, p), 0.0) + + +def test_skellam_gh11474(): + # test issue reported in gh-11474 caused by `cdfchn` + mu = [1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000] + cdf = skellam.cdf(0, mu, mu) + # generated in R + # library(skellam) + # options(digits = 16) + # mu = c(1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000) + # pskellam(0, mu, mu, TRUE) + cdf_expected = [0.6542541612768356, 0.5448901559424127, 0.5141135799745580, + 0.5044605891382528, 0.5019947363350450, 0.5019848365953181, + 0.5019750827993392, 0.5019466621805060, 0.5018209330219539] + assert_allclose(cdf, cdf_expected) + + +class TestZipfian: + def test_zipfian_asymptotic(self): + # test limiting case that zipfian(a, n) -> zipf(a) as n-> oo + a = 6.5 + N = 10000000 + k = np.arange(1, 21) + assert_allclose(zipfian.pmf(k, a, N), zipf.pmf(k, a)) + assert_allclose(zipfian.cdf(k, a, N), zipf.cdf(k, a)) + assert_allclose(zipfian.sf(k, a, N), zipf.sf(k, a)) + assert_allclose(zipfian.stats(a, N, moments='msvk'), + zipf.stats(a, moments='msvk')) + + def test_zipfian_continuity(self): + # test that zipfian(0.999999, n) ~ zipfian(1.000001, n) + # (a = 1 switches between methods of calculating harmonic sum) + alt1, agt1 = 0.99999999, 1.00000001 + N = 30 + k = np.arange(1, N + 1) + assert_allclose(zipfian.pmf(k, alt1, N), zipfian.pmf(k, agt1, N), + rtol=5e-7) + assert_allclose(zipfian.cdf(k, alt1, N), zipfian.cdf(k, agt1, N), + rtol=5e-7) + assert_allclose(zipfian.sf(k, alt1, N), zipfian.sf(k, agt1, N), + rtol=5e-7) + assert_allclose(zipfian.stats(alt1, N, moments='msvk'), + zipfian.stats(agt1, N, moments='msvk'), rtol=5e-7) + + def test_zipfian_R(self): + # test against R VGAM package + # library(VGAM) + # k <- c(13, 16, 1, 4, 4, 8, 10, 19, 5, 7) + # a <- c(1.56712977, 3.72656295, 5.77665117, 9.12168729, 5.79977172, + # 4.92784796, 9.36078764, 4.3739616 , 7.48171872, 4.6824154) + # n <- c(70, 80, 48, 65, 83, 89, 50, 30, 20, 20) + # pmf <- dzipf(k, N = n, shape = a) + # cdf <- pzipf(k, N = n, shape = a) + # print(pmf) + # print(cdf) + np.random.seed(0) + k = np.random.randint(1, 20, size=10) + a = np.random.rand(10)*10 + 1 + n = np.random.randint(1, 100, size=10) + pmf = [8.076972e-03, 2.950214e-05, 9.799333e-01, 3.216601e-06, + 3.158895e-04, 3.412497e-05, 4.350472e-10, 2.405773e-06, + 5.860662e-06, 1.053948e-04] + cdf = [0.8964133, 0.9998666, 0.9799333, 0.9999995, 0.9998584, + 0.9999458, 1.0000000, 0.9999920, 0.9999977, 0.9998498] + # skip the first point; zipUC is not accurate for low a, n + assert_allclose(zipfian.pmf(k, a, n)[1:], pmf[1:], rtol=1e-6) + assert_allclose(zipfian.cdf(k, a, n)[1:], cdf[1:], rtol=5e-5) + + np.random.seed(0) + naive_tests = np.vstack((np.logspace(-2, 1, 10), + np.random.randint(2, 40, 10))).T + + @pytest.mark.parametrize("a, n", naive_tests) + def test_zipfian_naive(self, a, n): + # test against bare-bones implementation + + @np.vectorize + def Hns(n, s): + """Naive implementation of harmonic sum""" + return (1/np.arange(1, n+1)**s).sum() + + @np.vectorize + def pzip(k, a, n): + """Naive implementation of zipfian pmf""" + if k < 1 or k > n: + return 0. + else: + return 1 / k**a / Hns(n, a) + + k = np.arange(n+1) + pmf = pzip(k, a, n) + cdf = np.cumsum(pmf) + mean = np.average(k, weights=pmf) + var = np.average((k - mean)**2, weights=pmf) + std = var**0.5 + skew = np.average(((k-mean)/std)**3, weights=pmf) + kurtosis = np.average(((k-mean)/std)**4, weights=pmf) - 3 + assert_allclose(zipfian.pmf(k, a, n), pmf) + assert_allclose(zipfian.cdf(k, a, n), cdf) + assert_allclose(zipfian.stats(a, n, moments="mvsk"), + [mean, var, skew, kurtosis]) + + +class TestNCH: + np.random.seed(2) # seeds 0 and 1 had some xl = xu; randint failed + shape = (2, 4, 3) + max_m = 100 + m1 = np.random.randint(1, max_m, size=shape) # red balls + m2 = np.random.randint(1, max_m, size=shape) # white balls + N = m1 + m2 # total balls + n = randint.rvs(0, N, size=N.shape) # number of draws + xl = np.maximum(0, n-m2) # lower bound of support + xu = np.minimum(n, m1) # upper bound of support + x = randint.rvs(xl, xu, size=xl.shape) + odds = np.random.rand(*x.shape)*2 + + # test output is more readable when function names (strings) are passed + @pytest.mark.parametrize('dist_name', + ['nchypergeom_fisher', 'nchypergeom_wallenius']) + def test_nch_hypergeom(self, dist_name): + # Both noncentral hypergeometric distributions reduce to the + # hypergeometric distribution when odds = 1 + dists = {'nchypergeom_fisher': nchypergeom_fisher, + 'nchypergeom_wallenius': nchypergeom_wallenius} + dist = dists[dist_name] + x, N, m1, n = self.x, self.N, self.m1, self.n + assert_allclose(dist.pmf(x, N, m1, n, odds=1), + hypergeom.pmf(x, N, m1, n)) + + def test_nchypergeom_fisher_naive(self): + # test against a very simple implementation + x, N, m1, n, odds = self.x, self.N, self.m1, self.n, self.odds + + @np.vectorize + def pmf_mean_var(x, N, m1, n, w): + # simple implementation of nchypergeom_fisher pmf + m2 = N - m1 + xl = np.maximum(0, n-m2) + xu = np.minimum(n, m1) + + def f(x): + t1 = special_binom(m1, x) + t2 = special_binom(m2, n - x) + return t1 * t2 * w**x + + def P(k): + return sum(f(y)*y**k for y in range(xl, xu + 1)) + + P0 = P(0) + P1 = P(1) + P2 = P(2) + pmf = f(x) / P0 + mean = P1 / P0 + var = P2 / P0 - (P1 / P0)**2 + return pmf, mean, var + + pmf, mean, var = pmf_mean_var(x, N, m1, n, odds) + assert_allclose(nchypergeom_fisher.pmf(x, N, m1, n, odds), pmf) + assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='m'), + mean) + assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='v'), + var) + + def test_nchypergeom_wallenius_naive(self): + # test against a very simple implementation + + np.random.seed(2) + shape = (2, 4, 3) + max_m = 100 + m1 = np.random.randint(1, max_m, size=shape) + m2 = np.random.randint(1, max_m, size=shape) + N = m1 + m2 + n = randint.rvs(0, N, size=N.shape) + xl = np.maximum(0, n-m2) + xu = np.minimum(n, m1) + x = randint.rvs(xl, xu, size=xl.shape) + w = np.random.rand(*x.shape)*2 + + def support(N, m1, n, w): + m2 = N - m1 + xl = np.maximum(0, n-m2) + xu = np.minimum(n, m1) + return xl, xu + + @np.vectorize + def mean(N, m1, n, w): + m2 = N - m1 + xl, xu = support(N, m1, n, w) + + def fun(u): + return u/m1 + (1 - (n-u)/m2)**w - 1 + + return root_scalar(fun, bracket=(xl, xu)).root + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + message="invalid value encountered in mean") + assert_allclose(nchypergeom_wallenius.mean(N, m1, n, w), + mean(N, m1, n, w), rtol=2e-2) + + @np.vectorize + def variance(N, m1, n, w): + m2 = N - m1 + u = mean(N, m1, n, w) + a = u * (m1 - u) + b = (n-u)*(u + m2 - n) + return N*a*b / ((N-1) * (m1*b + m2*a)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + message="invalid value encountered in mean") + assert_allclose( + nchypergeom_wallenius.stats(N, m1, n, w, moments='v'), + variance(N, m1, n, w), + rtol=5e-2 + ) + + @np.vectorize + def pmf(x, N, m1, n, w): + m2 = N - m1 + xl, xu = support(N, m1, n, w) + + def integrand(t): + D = w*(m1 - x) + (m2 - (n-x)) + res = (1-t**(w/D))**x * (1-t**(1/D))**(n-x) + return res + + def f(x): + t1 = special_binom(m1, x) + t2 = special_binom(m2, n - x) + the_integral = quad(integrand, 0, 1, + epsrel=1e-16, epsabs=1e-16) + return t1 * t2 * the_integral[0] + + return f(x) + + pmf0 = pmf(x, N, m1, n, w) + pmf1 = nchypergeom_wallenius.pmf(x, N, m1, n, w) + + atol, rtol = 1e-6, 1e-6 + i = np.abs(pmf1 - pmf0) < atol + rtol*np.abs(pmf0) + assert i.sum() > np.prod(shape) / 2 # works at least half the time + + # for those that fail, discredit the naive implementation + for N, m1, n, w in zip(N[~i], m1[~i], n[~i], w[~i]): + # get the support + m2 = N - m1 + xl, xu = support(N, m1, n, w) + x = np.arange(xl, xu + 1) + + # calculate sum of pmf over the support + # the naive implementation is very wrong in these cases + assert pmf(x, N, m1, n, w).sum() < .5 + assert_allclose(nchypergeom_wallenius.pmf(x, N, m1, n, w).sum(), 1) + + def test_wallenius_against_mpmath(self): + # precompute data with mpmath since naive implementation above + # is not reliable. See source code in gh-13330. + M = 50 + n = 30 + N = 20 + odds = 2.25 + # Expected results, computed with mpmath. + sup = np.arange(21) + pmf = np.array([3.699003068656875e-20, + 5.89398584245431e-17, + 2.1594437742911123e-14, + 3.221458044649955e-12, + 2.4658279241205077e-10, + 1.0965862603981212e-08, + 3.057890479665704e-07, + 5.622818831643761e-06, + 7.056482841531681e-05, + 0.000618899425358671, + 0.003854172932571669, + 0.01720592676256026, + 0.05528844897093792, + 0.12772363313574242, + 0.21065898367825722, + 0.24465958845359234, + 0.1955114898110033, + 0.10355390084949237, + 0.03414490375225675, + 0.006231989845775931, + 0.0004715577304677075]) + mean = 14.808018384813426 + var = 2.6085975877923717 + + # nchypergeom_wallenius.pmf returns 0 for pmf(0) and pmf(1), and pmf(2) + # has only three digits of accuracy (~ 2.1511e-14). + assert_allclose(nchypergeom_wallenius.pmf(sup, M, n, N, odds), pmf, + rtol=1e-13, atol=1e-13) + assert_allclose(nchypergeom_wallenius.mean(M, n, N, odds), + mean, rtol=1e-13) + assert_allclose(nchypergeom_wallenius.var(M, n, N, odds), + var, rtol=1e-11) + + @pytest.mark.parametrize('dist_name', + ['nchypergeom_fisher', 'nchypergeom_wallenius']) + def test_rvs_shape(self, dist_name): + # Check that when given a size with more dimensions than the + # dimensions of the broadcast parameters, rvs returns an array + # with the correct shape. + dists = {'nchypergeom_fisher': nchypergeom_fisher, + 'nchypergeom_wallenius': nchypergeom_wallenius} + dist = dists[dist_name] + x = dist.rvs(50, 30, [[10], [20]], [0.5, 1.0, 2.0], size=(5, 1, 2, 3)) + assert x.shape == (5, 1, 2, 3) + + +@pytest.mark.parametrize("mu, q, expected", + [[10, 120, -1.240089881791596e-38], + [1500, 0, -86.61466680572661]]) +def test_nbinom_11465(mu, q, expected): + # test nbinom.logcdf at extreme tails + size = 20 + n, p = size, size/(size+mu) + # In R: + # options(digits=16) + # pnbinom(mu=10, size=20, q=120, log.p=TRUE) + assert_allclose(nbinom.logcdf(q, n, p), expected) + + +def test_gh_17146(): + # Check that discrete distributions return PMF of zero at non-integral x. + # See gh-17146. + x = np.linspace(0, 1, 11) + p = 0.8 + pmf = bernoulli(p).pmf(x) + i = (x % 1 == 0) + assert_allclose(pmf[-1], p) + assert_allclose(pmf[0], 1-p) + assert_equal(pmf[~i], 0) + + +class TestBetaNBinom: + @pytest.mark.parametrize('x, n, a, b, ref', + [[5, 5e6, 5, 20, 1.1520944824139114e-107], + [100, 50, 5, 20, 0.002855762954310226], + [10000, 1000, 5, 20, 1.9648515726019154e-05]]) + def test_betanbinom_pmf(self, x, n, a, b, ref): + # test that PMF stays accurate in the distribution tails + # reference values computed with mpmath + # from mpmath import mp + # mp.dps = 500 + # def betanbinom_pmf(k, n, a, b): + # k = mp.mpf(k) + # a = mp.mpf(a) + # b = mp.mpf(b) + # n = mp.mpf(n) + # return float(mp.binomial(n + k - mp.one, k) + # * mp.beta(a + n, b + k) / mp.beta(a, b)) + assert_allclose(betanbinom.pmf(x, n, a, b), ref, rtol=1e-10) + + + @pytest.mark.parametrize('n, a, b, ref', + [[10000, 5000, 50, 0.12841520515722202], + [10, 9, 9, 7.9224400871459695], + [100, 1000, 10, 1.5849602176622748]]) + def test_betanbinom_kurtosis(self, n, a, b, ref): + # reference values were computed via mpmath + # from mpmath import mp + # def kurtosis_betanegbinom(n, a, b): + # n = mp.mpf(n) + # a = mp.mpf(a) + # b = mp.mpf(b) + # four = mp.mpf(4.) + # mean = n * b / (a - mp.one) + # var = (n * b * (n + a - 1.) * (a + b - 1.) + # / ((a - 2.) * (a - 1.)**2.)) + # def f(k): + # return (mp.binomial(n + k - mp.one, k) + # * mp.beta(a + n, b + k) / mp.beta(a, b) + # * (k - mean)**four) + # fourth_moment = mp.nsum(f, [0, mp.inf]) + # return float(fourth_moment/var**2 - 3.) + assert_allclose(betanbinom.stats(n, a, b, moments="k"), + ref, rtol=3e-15) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_distributions.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..6d91bb8a6c3354102d498ac246086511e9c94058 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_distributions.py @@ -0,0 +1,9676 @@ +""" +Test functions for stats module +""" +import warnings +import re +import sys +import pickle +from pathlib import Path +import os +import json +import platform + +from numpy.testing import (assert_equal, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, + assert_allclose, assert_, assert_warns, + assert_array_less, suppress_warnings, IS_PYPY) +import pytest +from pytest import raises as assert_raises + +import numpy +import numpy as np +from numpy import typecodes, array +from numpy.lib.recfunctions import rec_append_fields +from scipy import special +from scipy._lib._util import check_random_state +from scipy.integrate import (IntegrationWarning, quad, trapezoid, + cumulative_trapezoid) +import scipy.stats as stats +from scipy.stats._distn_infrastructure import argsreduce +import scipy.stats.distributions + +from scipy.special import xlogy, polygamma, entr +from scipy.stats._distr_params import distcont, invdistcont +from .test_discrete_basic import distdiscrete, invdistdiscrete +from scipy.stats._continuous_distns import FitDataError, _argus_phi +from scipy.optimize import root, fmin, differential_evolution +from itertools import product + +# python -OO strips docstrings +DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 + +# Failing on macOS 11, Intel CPUs. See gh-14901 +MACOS_INTEL = (sys.platform == 'darwin') and (platform.machine() == 'x86_64') + + +# distributions to skip while testing the fix for the support method +# introduced in gh-13294. These distributions are skipped as they +# always return a non-nan support for every parametrization. +skip_test_support_gh13294_regression = ['tukeylambda', 'pearson3'] + + +def _assert_hasattr(a, b, msg=None): + if msg is None: + msg = f'{a} does not have attribute {b}' + assert_(hasattr(a, b), msg=msg) + + +def test_api_regression(): + # https://github.com/scipy/scipy/issues/3802 + _assert_hasattr(scipy.stats.distributions, 'f_gen') + + +def test_distributions_submodule(): + actual = set(scipy.stats.distributions.__all__) + continuous = [dist[0] for dist in distcont] # continuous dist names + discrete = [dist[0] for dist in distdiscrete] # discrete dist names + other = ['rv_discrete', 'rv_continuous', 'rv_histogram', + 'entropy', 'trapz'] + expected = continuous + discrete + other + + # need to remove, e.g., + # + expected = set(filter(lambda s: not str(s).startswith('<'), expected)) + + assert actual == expected + + +class TestVonMises: + @pytest.mark.parametrize('k', [0.1, 1, 101]) + @pytest.mark.parametrize('x', [0, 1, np.pi, 10, 100]) + def test_vonmises_periodic(self, k, x): + def check_vonmises_pdf_periodic(k, L, s, x): + vm = stats.vonmises(k, loc=L, scale=s) + assert_almost_equal(vm.pdf(x), vm.pdf(x % (2 * np.pi * s))) + + def check_vonmises_cdf_periodic(k, L, s, x): + vm = stats.vonmises(k, loc=L, scale=s) + assert_almost_equal(vm.cdf(x) % 1, + vm.cdf(x % (2 * np.pi * s)) % 1) + + check_vonmises_pdf_periodic(k, 0, 1, x) + check_vonmises_pdf_periodic(k, 1, 1, x) + check_vonmises_pdf_periodic(k, 0, 10, x) + + check_vonmises_cdf_periodic(k, 0, 1, x) + check_vonmises_cdf_periodic(k, 1, 1, x) + check_vonmises_cdf_periodic(k, 0, 10, x) + + def test_vonmises_line_support(self): + assert_equal(stats.vonmises_line.a, -np.pi) + assert_equal(stats.vonmises_line.b, np.pi) + + def test_vonmises_numerical(self): + vm = stats.vonmises(800) + assert_almost_equal(vm.cdf(0), 0.5) + + # Expected values of the vonmises PDF were computed using + # mpmath with 50 digits of precision: + # + # def vmpdf_mp(x, kappa): + # x = mpmath.mpf(x) + # kappa = mpmath.mpf(kappa) + # num = mpmath.exp(kappa*mpmath.cos(x)) + # den = 2 * mpmath.pi * mpmath.besseli(0, kappa) + # return num/den + + @pytest.mark.parametrize('x, kappa, expected_pdf', + [(0.1, 0.01, 0.16074242744907072), + (0.1, 25.0, 1.7515464099118245), + (0.1, 800, 0.2073272544458798), + (2.0, 0.01, 0.15849003875385817), + (2.0, 25.0, 8.356882934278192e-16), + (2.0, 800, 0.0)]) + def test_vonmises_pdf(self, x, kappa, expected_pdf): + pdf = stats.vonmises.pdf(x, kappa) + assert_allclose(pdf, expected_pdf, rtol=1e-15) + + # Expected values of the vonmises entropy were computed using + # mpmath with 50 digits of precision: + # + # def vonmises_entropy(kappa): + # kappa = mpmath.mpf(kappa) + # return (-kappa * mpmath.besseli(1, kappa) / + # mpmath.besseli(0, kappa) + mpmath.log(2 * mpmath.pi * + # mpmath.besseli(0, kappa))) + # >>> float(vonmises_entropy(kappa)) + + @pytest.mark.parametrize('kappa, expected_entropy', + [(1, 1.6274014590199897), + (5, 0.6756431570114528), + (100, -0.8811275441649473), + (1000, -2.03468891852547), + (2000, -2.3813876496587847)]) + def test_vonmises_entropy(self, kappa, expected_entropy): + entropy = stats.vonmises.entropy(kappa) + assert_allclose(entropy, expected_entropy, rtol=1e-13) + + def test_vonmises_rvs_gh4598(self): + # check that random variates wrap around as discussed in gh-4598 + seed = 30899520 + rng1 = np.random.default_rng(seed) + rng2 = np.random.default_rng(seed) + rng3 = np.random.default_rng(seed) + rvs1 = stats.vonmises(1, loc=0, scale=1).rvs(random_state=rng1) + rvs2 = stats.vonmises(1, loc=2*np.pi, scale=1).rvs(random_state=rng2) + rvs3 = stats.vonmises(1, loc=0, + scale=(2*np.pi/abs(rvs1)+1)).rvs(random_state=rng3) + assert_allclose(rvs1, rvs2, atol=1e-15) + assert_allclose(rvs1, rvs3, atol=1e-15) + + # Expected values of the vonmises LOGPDF were computed + # using wolfram alpha: + # kappa * cos(x) - log(2*pi*I0(kappa)) + @pytest.mark.parametrize('x, kappa, expected_logpdf', + [(0.1, 0.01, -1.8279520246003170), + (0.1, 25.0, 0.5604990605420549), + (0.1, 800, -1.5734567947337514), + (2.0, 0.01, -1.8420635346185686), + (2.0, 25.0, -34.7182759850871489), + (2.0, 800, -1130.4942582548682739)]) + def test_vonmises_logpdf(self, x, kappa, expected_logpdf): + logpdf = stats.vonmises.logpdf(x, kappa) + assert_allclose(logpdf, expected_logpdf, rtol=1e-15) + + def test_vonmises_expect(self): + """ + Test that the vonmises expectation values are + computed correctly. This test checks that the + numeric integration estimates the correct normalization + (1) and mean angle (loc). These expectations are + independent of the chosen 2pi interval. + """ + rng = np.random.default_rng(6762668991392531563) + + loc, kappa, lb = rng.random(3) * 10 + res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1) + assert_allclose(res, 1) + assert np.issubdtype(res.dtype, np.floating) + + bounds = lb, lb + 2 * np.pi + res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1, *bounds) + assert_allclose(res, 1) + assert np.issubdtype(res.dtype, np.floating) + + bounds = lb, lb + 2 * np.pi + res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: np.exp(1j*x), + *bounds, complex_func=1) + assert_allclose(np.angle(res), loc % (2*np.pi)) + assert np.issubdtype(res.dtype, np.complexfloating) + + @pytest.mark.xslow + @pytest.mark.parametrize("rvs_loc", [0, 2]) + @pytest.mark.parametrize("rvs_shape", [1, 100, 1e8]) + @pytest.mark.parametrize('fix_loc', [True, False]) + @pytest.mark.parametrize('fix_shape', [True, False]) + def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_shape, + fix_loc, fix_shape): + if fix_shape and fix_loc: + pytest.skip("Nothing to fit.") + + rng = np.random.default_rng(6762668991392531563) + data = stats.vonmises.rvs(rvs_shape, size=1000, loc=rvs_loc, + random_state=rng) + + kwds = {'fscale': 1} + if fix_loc: + kwds['floc'] = rvs_loc + if fix_shape: + kwds['f0'] = rvs_shape + + _assert_less_or_close_loglike(stats.vonmises, data, + stats.vonmises.nnlf, **kwds) + + def test_vonmises_fit_bad_floc(self): + data = [-0.92923506, -0.32498224, 0.13054989, -0.97252014, 2.79658071, + -0.89110948, 1.22520295, 1.44398065, 2.49163859, 1.50315096, + 3.05437696, -2.73126329, -3.06272048, 1.64647173, 1.94509247, + -1.14328023, 0.8499056, 2.36714682, -1.6823179, -0.88359996] + data = np.asarray(data) + loc = -0.5 * np.pi + kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data, floc=loc) + assert kappa_fit == np.finfo(float).tiny + _assert_less_or_close_loglike(stats.vonmises, data, + stats.vonmises.nnlf, fscale=1, floc=loc) + + @pytest.mark.parametrize('sign', [-1, 1]) + def test_vonmises_fit_unwrapped_data(self, sign): + rng = np.random.default_rng(6762668991392531563) + data = stats.vonmises(loc=sign*0.5*np.pi, kappa=10).rvs(100000, + random_state=rng) + shifted_data = data + 4*np.pi + kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data) + kappa_fit_shifted, loc_fit_shifted, _ = stats.vonmises.fit(shifted_data) + assert_allclose(loc_fit, loc_fit_shifted) + assert_allclose(kappa_fit, kappa_fit_shifted) + assert scale_fit == 1 + assert -np.pi < loc_fit < np.pi + + def test_vonmises_kappa_0_gh18166(self): + # Check that kappa = 0 is supported. + dist = stats.vonmises(0) + assert_allclose(dist.pdf(0), 1 / (2 * np.pi), rtol=1e-15) + assert_allclose(dist.cdf(np.pi/2), 0.75, rtol=1e-15) + assert_allclose(dist.sf(-np.pi/2), 0.75, rtol=1e-15) + assert_allclose(dist.ppf(0.9), np.pi*0.8, rtol=1e-15) + assert_allclose(dist.mean(), 0, atol=1e-15) + assert_allclose(dist.expect(), 0, atol=1e-15) + assert np.all(np.abs(dist.rvs(size=10, random_state=1234)) <= np.pi) + + def test_vonmises_fit_equal_data(self): + # When all data are equal, expect kappa = 1e16. + kappa, loc, scale = stats.vonmises.fit([0]) + assert kappa == 1e16 and loc == 0 and scale == 1 + + def test_vonmises_fit_bounds(self): + # For certain input data, the root bracket is violated numerically. + # Test that this situation is handled. The input data below are + # crafted to trigger the bound violation for the current choice of + # bounds and the specific way the bounds and the objective function + # are computed. + + # Test that no exception is raised when the lower bound is violated. + scipy.stats.vonmises.fit([0, 3.7e-08], floc=0) + + # Test that no exception is raised when the upper bound is violated. + scipy.stats.vonmises.fit([np.pi/2*(1-4.86e-9)], floc=0) + + +def _assert_less_or_close_loglike(dist, data, func=None, maybe_identical=False, + **kwds): + """ + This utility function checks that the negative log-likelihood function + (or `func`) of the result computed using dist.fit() is less than or equal + to the result computed using the generic fit method. Because of + normal numerical imprecision, the "equality" check is made using + `np.allclose` with a relative tolerance of 1e-15. + """ + if func is None: + func = dist.nnlf + + mle_analytical = dist.fit(data, **kwds) + numerical_opt = super(type(dist), dist).fit(data, **kwds) + + # Sanity check that the analytical MLE is actually executed. + # Due to floating point arithmetic, the generic MLE is unlikely + # to produce the exact same result as the analytical MLE. + if not maybe_identical: + assert np.any(mle_analytical != numerical_opt) + + ll_mle_analytical = func(mle_analytical, data) + ll_numerical_opt = func(numerical_opt, data) + assert (ll_mle_analytical <= ll_numerical_opt or + np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15)) + + # Ideally we'd check that shapes are correctly fixed, too, but that is + # complicated by the many ways of fixing them (e.g. f0, fix_a, fa). + if 'floc' in kwds: + assert mle_analytical[-2] == kwds['floc'] + if 'fscale' in kwds: + assert mle_analytical[-1] == kwds['fscale'] + + +def assert_fit_warnings(dist): + param = ['floc', 'fscale'] + if dist.shapes: + nshapes = len(dist.shapes.split(",")) + param += ['f0', 'f1', 'f2'][:nshapes] + all_fixed = dict(zip(param, np.arange(len(param)))) + data = [1, 2, 3] + with pytest.raises(RuntimeError, + match="All parameters fixed. There is nothing " + "to optimize."): + dist.fit(data, **all_fixed) + with pytest.raises(ValueError, + match="The data contains non-finite values"): + dist.fit([np.nan]) + with pytest.raises(ValueError, + match="The data contains non-finite values"): + dist.fit([np.inf]) + with pytest.raises(TypeError, match="Unknown keyword arguments:"): + dist.fit(data, extra_keyword=2) + with pytest.raises(TypeError, match="Too many positional arguments."): + dist.fit(data, *[1]*(len(param) - 1)) + + +@pytest.mark.parametrize('dist', + ['alpha', 'betaprime', + 'fatiguelife', 'invgamma', 'invgauss', 'invweibull', + 'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gibrat', + 'powerlognorm', 'rayleigh', 'wald']) +def test_support(dist): + """gh-6235""" + dct = dict(distcont) + args = dct[dist] + + dist = getattr(stats, dist) + + assert_almost_equal(dist.pdf(dist.a, *args), 0) + assert_equal(dist.logpdf(dist.a, *args), -np.inf) + assert_almost_equal(dist.pdf(dist.b, *args), 0) + assert_equal(dist.logpdf(dist.b, *args), -np.inf) + + +class TestRandInt: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.randint.rvs(5, 30, size=100) + assert_(numpy.all(vals < 30) & numpy.all(vals >= 5)) + assert_(len(vals) == 100) + vals = stats.randint.rvs(5, 30, size=(2, 50)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.randint.rvs(15, 46) + assert_((val >= 15) & (val < 46)) + assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val))) + val = stats.randint(15, 46).rvs(3) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pdf(self): + k = numpy.r_[0:36] + out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0) + vals = stats.randint.pmf(k, 5, 30) + assert_array_almost_equal(vals, out) + + def test_cdf(self): + x = np.linspace(0, 36, 100) + k = numpy.floor(x) + out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0) + vals = stats.randint.cdf(x, 5, 30) + assert_array_almost_equal(vals, out, decimal=12) + + +class TestBinom: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.binom.rvs(10, 0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.binom.rvs(10, 0.75) + assert_(isinstance(val, int)) + val = stats.binom(10, 0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf(self): + # regression test for Ticket #1842 + vals1 = stats.binom.pmf(100, 100, 1) + vals2 = stats.binom.pmf(0, 100, 0) + assert_allclose(vals1, 1.0, rtol=1e-15, atol=0) + assert_allclose(vals2, 1.0, rtol=1e-15, atol=0) + + def test_entropy(self): + # Basic entropy tests. + b = stats.binom(2, 0.5) + expected_p = np.array([0.25, 0.5, 0.25]) + expected_h = -sum(xlogy(expected_p, expected_p)) + h = b.entropy() + assert_allclose(h, expected_h) + + b = stats.binom(2, 0.0) + h = b.entropy() + assert_equal(h, 0.0) + + b = stats.binom(2, 1.0) + h = b.entropy() + assert_equal(h, 0.0) + + def test_warns_p0(self): + # no spurious warnings are generated for p=0; gh-3817 + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + assert_equal(stats.binom(n=2, p=0).mean(), 0) + assert_equal(stats.binom(n=2, p=0).std(), 0) + + def test_ppf_p1(self): + # Check that gh-17388 is resolved: PPF == n when p = 1 + n = 4 + assert stats.binom.ppf(q=0.3, n=n, p=1.0) == n + + def test_pmf_poisson(self): + # Check that gh-17146 is resolved: binom -> poisson + n = 1541096362225563.0 + p = 1.0477878413173978e-18 + x = np.arange(3) + res = stats.binom.pmf(x, n=n, p=p) + ref = stats.poisson.pmf(x, n * p) + assert_allclose(res, ref, atol=1e-16) + + def test_pmf_cdf(self): + # Check that gh-17809 is resolved: binom.pmf(0) ~ binom.cdf(0) + n = 25.0 * 10 ** 21 + p = 1.0 * 10 ** -21 + r = 0 + res = stats.binom.pmf(r, n, p) + ref = stats.binom.cdf(r, n, p) + assert_allclose(res, ref, atol=1e-16) + + def test_pmf_gh15101(self): + # Check that gh-15101 is resolved (no divide warnings when p~1, n~oo) + res = stats.binom.pmf(3, 2000, 0.999) + assert_allclose(res, 0, atol=1e-16) + + +class TestArcsine: + + def test_endpoints(self): + # Regression test for gh-13697. The following calculation + # should not generate a warning. + p = stats.arcsine.pdf([0, 1]) + assert_equal(p, [np.inf, np.inf]) + + +class TestBernoulli: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.bernoulli.rvs(0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.bernoulli.rvs(0.75) + assert_(isinstance(val, int)) + val = stats.bernoulli(0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_entropy(self): + # Simple tests of entropy. + b = stats.bernoulli(0.25) + expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75) + h = b.entropy() + assert_allclose(h, expected_h) + + b = stats.bernoulli(0.0) + h = b.entropy() + assert_equal(h, 0.0) + + b = stats.bernoulli(1.0) + h = b.entropy() + assert_equal(h, 0.0) + + +class TestBradford: + # gh-6216 + def test_cdf_ppf(self): + c = 0.1 + x = np.logspace(-20, -4) + q = stats.bradford.cdf(x, c) + xx = stats.bradford.ppf(q, c) + assert_allclose(x, xx) + + +class TestChi: + + # "Exact" value of chi.sf(10, 4), as computed by Wolfram Alpha with + # 1 - CDF[ChiDistribution[4], 10] + CHI_SF_10_4 = 9.83662422461598e-21 + # "Exact" value of chi.mean(df=1000) as computed by Wolfram Alpha with + # Mean[ChiDistribution[1000]] + CHI_MEAN_1000 = 31.614871896980 + + def test_sf(self): + s = stats.chi.sf(10, 4) + assert_allclose(s, self.CHI_SF_10_4, rtol=1e-15) + + def test_isf(self): + x = stats.chi.isf(self.CHI_SF_10_4, 4) + assert_allclose(x, 10, rtol=1e-15) + + # reference value for 1e14 was computed via mpmath + # from mpmath import mp + # mp.dps = 500 + # df = mp.mpf(1e14) + # float(mp.rf(mp.mpf(0.5) * df, mp.mpf(0.5)) * mp.sqrt(2.)) + + @pytest.mark.parametrize('df, ref', + [(1e3, CHI_MEAN_1000), + (1e14, 9999999.999999976)] + ) + def test_mean(self, df, ref): + assert_allclose(stats.chi.mean(df), ref, rtol=1e-12) + + # Entropy references values were computed with the following mpmath code + # from mpmath import mp + # mp.dps = 50 + # def chi_entropy_mpmath(df): + # df = mp.mpf(df) + # half_df = 0.5 * df + # entropy = mp.log(mp.gamma(half_df)) + 0.5 * \ + # (df - mp.log(2) - (df - mp.one) * mp.digamma(half_df)) + # return float(entropy) + + @pytest.mark.parametrize('df, ref', + [(1e-4, -9989.7316027504), + (1, 0.7257913526447274), + (1e3, 1.0721981095025448), + (1e10, 1.0723649429080335), + (1e100, 1.0723649429247002)]) + def test_entropy(self, df, ref): + assert_allclose(stats.chi(df).entropy(), ref, rtol=1e-15) + + +class TestNBinom: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.nbinom.rvs(10, 0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.nbinom.rvs(10, 0.75) + assert_(isinstance(val, int)) + val = stats.nbinom(10, 0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf(self): + # regression test for ticket 1779 + assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)), + stats.nbinom.pmf(700, 721, 0.52)) + # logpmf(0,1,1) shouldn't return nan (regression test for gh-4029) + val = scipy.stats.nbinom.logpmf(0, 1, 1) + assert_equal(val, 0) + + def test_logcdf_gh16159(self): + # check that gh16159 is resolved. + vals = stats.nbinom.logcdf([0, 5, 0, 5], n=4.8, p=0.45) + ref = np.log(stats.nbinom.cdf([0, 5, 0, 5], n=4.8, p=0.45)) + assert_allclose(vals, ref) + + +class TestGenInvGauss: + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.slow + def test_rvs_with_mode_shift(self): + # ratio_unif w/ mode shift + gig = stats.geninvgauss(2.3, 1.5) + _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf) + assert_equal(p > 0.05, True) + + @pytest.mark.slow + def test_rvs_without_mode_shift(self): + # ratio_unif w/o mode shift + gig = stats.geninvgauss(0.9, 0.75) + _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf) + assert_equal(p > 0.05, True) + + @pytest.mark.slow + def test_rvs_new_method(self): + # new algorithm of Hoermann / Leydold + gig = stats.geninvgauss(0.1, 0.2) + _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf) + assert_equal(p > 0.05, True) + + @pytest.mark.slow + def test_rvs_p_zero(self): + def my_ks_check(p, b): + gig = stats.geninvgauss(p, b) + rvs = gig.rvs(size=1500, random_state=1234) + return stats.kstest(rvs, gig.cdf)[1] > 0.05 + # boundary cases when p = 0 + assert_equal(my_ks_check(0, 0.2), True) # new algo + assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift + assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift + + def test_rvs_negative_p(self): + # if p negative, return inverse + assert_equal( + stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234), + 1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234)) + + def test_invgauss(self): + # test that invgauss is special case + ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234) + assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True) + # test pdf and cdf + mu, x = 100, np.linspace(0.01, 1, 10) + pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu) + assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x)) + cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu) + assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x)) + + def test_pdf_R(self): + # test against R package GIGrvg + # x <- seq(0.01, 5, length.out = 10) + # GIGrvg::dgig(x, 0.5, 1, 1) + vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01, + 2.693297528e-01, 1.905637275e-01, 1.351476913e-01, + 9.636538981e-02, 6.909040154e-02, 4.978006801e-02, + 3.602084467e-02]) + x = np.linspace(0.01, 5, 10) + assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1)) + + def test_pdf_zero(self): + # pdf at 0 is 0, needs special treatment to avoid 1/x in pdf + assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0) + # if x is large and p is moderate, make sure that pdf does not + # overflow because of x**(p-1); exp(-b*x) forces pdf to zero + assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0) + + +class TestGenHyperbolic: + def setup_method(self): + np.random.seed(1234) + + def test_pdf_r(self): + # test against R package GeneralizedHyperbolic + # x <- seq(-10, 10, length.out = 10) + # GeneralizedHyperbolic::dghyp( + # x = x, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5 + # ) + vals_R = np.array([ + 2.94895678275316e-13, 1.75746848647696e-10, 9.48149804073045e-08, + 4.17862521692026e-05, 0.0103947630463822, 0.240864958986839, + 0.162833527161649, 0.0374609592899472, 0.00634894847327781, + 0.000941920705790324 + ]) + + lmbda, alpha, beta = 2, 2, 1 + mu, delta = 0.5, 1.5 + args = (lmbda, alpha*delta, beta*delta) + + gh = stats.genhyperbolic(*args, loc=mu, scale=delta) + x = np.linspace(-10, 10, 10) + + assert_allclose(gh.pdf(x), vals_R, atol=0, rtol=1e-13) + + def test_cdf_r(self): + # test against R package GeneralizedHyperbolic + # q <- seq(-10, 10, length.out = 10) + # GeneralizedHyperbolic::pghyp( + # q = q, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5 + # ) + vals_R = np.array([ + 1.01881590921421e-13, 6.13697274983578e-11, 3.37504977637992e-08, + 1.55258698166181e-05, 0.00447005453832497, 0.228935323956347, + 0.755759458895243, 0.953061062884484, 0.992598013917513, + 0.998942646586662 + ]) + + lmbda, alpha, beta = 2, 2, 1 + mu, delta = 0.5, 1.5 + args = (lmbda, alpha*delta, beta*delta) + + gh = stats.genhyperbolic(*args, loc=mu, scale=delta) + x = np.linspace(-10, 10, 10) + + assert_allclose(gh.cdf(x), vals_R, atol=0, rtol=1e-6) + + # The reference values were computed by implementing the PDF with mpmath + # and integrating it with mp.quad. The values were computed with + # mp.dps=250, and then again with mp.dps=400 to ensure the full 64 bit + # precision was computed. + @pytest.mark.parametrize( + 'x, p, a, b, loc, scale, ref', + [(-15, 2, 3, 1.5, 0.5, 1.5, 4.770036428808252e-20), + (-15, 10, 1.5, 0.25, 1, 5, 0.03282964575089294), + (-15, 10, 1.5, 1.375, 0, 1, 3.3711159600215594e-23), + (-15, 0.125, 1.5, 1.49995, 0, 1, 4.729401428898605e-23), + (-1, 0.125, 1.5, 1.49995, 0, 1, 0.0003565725914786859), + (5, -0.125, 1.5, 1.49995, 0, 1, 0.2600651974023352), + (5, -0.125, 1000, 999, 0, 1, 5.923270556517253e-28), + (20, -0.125, 1000, 999, 0, 1, 0.23452293711665634), + (40, -0.125, 1000, 999, 0, 1, 0.9999648749561968), + (60, -0.125, 1000, 999, 0, 1, 0.9999999999975475)] + ) + def test_cdf_mpmath(self, x, p, a, b, loc, scale, ref): + cdf = stats.genhyperbolic.cdf(x, p, a, b, loc=loc, scale=scale) + assert_allclose(cdf, ref, rtol=5e-12) + + # The reference values were computed by implementing the PDF with mpmath + # and integrating it with mp.quad. The values were computed with + # mp.dps=250, and then again with mp.dps=400 to ensure the full 64 bit + # precision was computed. + @pytest.mark.parametrize( + 'x, p, a, b, loc, scale, ref', + [(0, 1e-6, 12, -1, 0, 1, 0.38520358671350524), + (-1, 3, 2.5, 2.375, 1, 3, 0.9999901774267577), + (-20, 3, 2.5, 2.375, 1, 3, 1.0), + (25, 2, 3, 1.5, 0.5, 1.5, 8.593419916523976e-10), + (300, 10, 1.5, 0.25, 1, 5, 6.137415609872158e-24), + (60, -0.125, 1000, 999, 0, 1, 2.4524915075944173e-12), + (75, -0.125, 1000, 999, 0, 1, 2.9435194886214633e-18)] + ) + def test_sf_mpmath(self, x, p, a, b, loc, scale, ref): + sf = stats.genhyperbolic.sf(x, p, a, b, loc=loc, scale=scale) + assert_allclose(sf, ref, rtol=5e-12) + + def test_moments_r(self): + # test against R package GeneralizedHyperbolic + # sapply(1:4, + # function(x) GeneralizedHyperbolic::ghypMom( + # order = x, lambda = 2, alpha = 2, + # beta = 1, delta = 1.5, mu = 0.5, + # momType = 'raw') + # ) + + vals_R = [2.36848366948115, 8.4739346779246, + 37.8870502710066, 205.76608511485] + + lmbda, alpha, beta = 2, 2, 1 + mu, delta = 0.5, 1.5 + args = (lmbda, alpha*delta, beta*delta) + + vals_us = [ + stats.genhyperbolic(*args, loc=mu, scale=delta).moment(i) + for i in range(1, 5) + ] + + assert_allclose(vals_us, vals_R, atol=0, rtol=1e-13) + + def test_rvs(self): + # Kolmogorov-Smirnov test to ensure alignment + # of analytical and empirical cdfs + + lmbda, alpha, beta = 2, 2, 1 + mu, delta = 0.5, 1.5 + args = (lmbda, alpha*delta, beta*delta) + + gh = stats.genhyperbolic(*args, loc=mu, scale=delta) + _, p = stats.kstest(gh.rvs(size=1500, random_state=1234), gh.cdf) + + assert_equal(p > 0.05, True) + + def test_pdf_t(self): + # Test Against T-Student with 1 - 30 df + df = np.linspace(1, 30, 10) + + # in principle alpha should be zero in practice for big lmbdas + # alpha cannot be too small else pdf does not integrate + alpha, beta = np.float_power(df, 2)*np.finfo(np.float32).eps, 0 + mu, delta = 0, np.sqrt(df) + args = (-df/2, alpha, beta) + + gh = stats.genhyperbolic(*args, loc=mu, scale=delta) + x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis] + + assert_allclose( + gh.pdf(x), stats.t.pdf(x, df), + atol=0, rtol=1e-6 + ) + + def test_pdf_cauchy(self): + # Test Against Cauchy distribution + + # in principle alpha should be zero in practice for big lmbdas + # alpha cannot be too small else pdf does not integrate + lmbda, alpha, beta = -0.5, np.finfo(np.float32).eps, 0 + mu, delta = 0, 1 + args = (lmbda, alpha, beta) + + gh = stats.genhyperbolic(*args, loc=mu, scale=delta) + x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis] + + assert_allclose( + gh.pdf(x), stats.cauchy.pdf(x), + atol=0, rtol=1e-6 + ) + + def test_pdf_laplace(self): + # Test Against Laplace with location param [-10, 10] + loc = np.linspace(-10, 10, 10) + + # in principle delta should be zero in practice for big loc delta + # cannot be too small else pdf does not integrate + delta = np.finfo(np.float32).eps + + lmbda, alpha, beta = 1, 1, 0 + args = (lmbda, alpha*delta, beta*delta) + + # ppf does not integrate for scale < 5e-4 + # therefore using simple linspace to define the support + gh = stats.genhyperbolic(*args, loc=loc, scale=delta) + x = np.linspace(-20, 20, 50)[:, np.newaxis] + + assert_allclose( + gh.pdf(x), stats.laplace.pdf(x, loc=loc, scale=1), + atol=0, rtol=1e-11 + ) + + def test_pdf_norminvgauss(self): + # Test Against NIG with varying alpha/beta/delta/mu + + alpha, beta, delta, mu = ( + np.linspace(1, 20, 10), + np.linspace(0, 19, 10)*np.float_power(-1, range(10)), + np.linspace(1, 1, 10), + np.linspace(-100, 100, 10) + ) + + lmbda = - 0.5 + args = (lmbda, alpha * delta, beta * delta) + + gh = stats.genhyperbolic(*args, loc=mu, scale=delta) + x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis] + + assert_allclose( + gh.pdf(x), stats.norminvgauss.pdf( + x, a=alpha, b=beta, loc=mu, scale=delta), + atol=0, rtol=1e-13 + ) + + +class TestHypSecant: + + # Reference values were computed with the mpmath expression + # float((2/mp.pi)*mp.atan(mp.exp(-x))) + # and mp.dps = 50. + @pytest.mark.parametrize('x, reference', + [(30, 5.957247804324683e-14), + (50, 1.2278802891647964e-22)]) + def test_sf(self, x, reference): + sf = stats.hypsecant.sf(x) + assert_allclose(sf, reference, rtol=5e-15) + + # Reference values were computed with the mpmath expression + # float(-mp.log(mp.tan((mp.pi/2)*p))) + # and mp.dps = 50. + @pytest.mark.parametrize('p, reference', + [(1e-6, 13.363927852673998), + (1e-12, 27.179438410639094)]) + def test_isf(self, p, reference): + x = stats.hypsecant.isf(p) + assert_allclose(x, reference, rtol=5e-15) + + +class TestNormInvGauss: + def setup_method(self): + np.random.seed(1234) + + def test_cdf_R(self): + # test pdf and cdf vals against R + # require("GeneralizedHyperbolic") + # x_test <- c(-7, -5, 0, 8, 15) + # r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5) + # r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5) + r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01, + 9.988650664e-01, 9.999848769e-01]) + x_test = np.array([-7, -5, 0, 8, 15]) + vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5) + assert_allclose(vals_cdf, r_cdf, atol=1e-9) + + def test_pdf_R(self): + # values from R as defined in test_cdf_R + r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01, + 7.450485342e-04, 8.917889931e-06]) + x_test = np.array([-7, -5, 0, 8, 15]) + vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5) + assert_allclose(vals_pdf, r_pdf, atol=1e-9) + + @pytest.mark.parametrize('x, a, b, sf, rtol', + [(-1, 1, 0, 0.8759652211005315, 1e-13), + (25, 1, 0, 1.1318690184042579e-13, 1e-4), + (1, 5, -1.5, 0.002066711134653577, 1e-12), + (10, 5, -1.5, 2.308435233930669e-29, 1e-9)]) + def test_sf_isf_mpmath(self, x, a, b, sf, rtol): + # Reference data generated with `reference_distributions.NormInvGauss`, + # e.g. `NormInvGauss(alpha=1, beta=0).sf(-1)` with mp.dps = 50 + s = stats.norminvgauss.sf(x, a, b) + assert_allclose(s, sf, rtol=rtol) + i = stats.norminvgauss.isf(sf, a, b) + assert_allclose(i, x, rtol=rtol) + + def test_sf_isf_mpmath_vectorized(self): + x = [-1, 25] + a = [1, 1] + b = 0 + sf = [0.8759652211005315, 1.1318690184042579e-13] # see previous test + s = stats.norminvgauss.sf(x, a, b) + assert_allclose(s, sf, rtol=1e-13, atol=1e-16) + i = stats.norminvgauss.isf(sf, a, b) + # Not perfect, but better than it was. See gh-13338. + assert_allclose(i, x, rtol=1e-6) + + def test_gh8718(self): + # Add test that gh-13338 resolved gh-8718 + dst = stats.norminvgauss(1, 0) + x = np.arange(0, 20, 2) + sf = dst.sf(x) + isf = dst.isf(sf) + assert_allclose(isf, x) + + def test_stats(self): + a, b = 1, 0.5 + gamma = np.sqrt(a**2 - b**2) + v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)), + 3.0 * (1 + 4 * b**2 / a**2) / gamma) + assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk')) + + def test_ppf(self): + a, b = 1, 0.5 + x_test = np.array([0.001, 0.5, 0.999]) + vals = stats.norminvgauss.ppf(x_test, a, b) + assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b)) + + +class TestGeom: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.geom.rvs(0.75, size=(2, 50)) + assert_(numpy.all(vals >= 0)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.geom.rvs(0.75) + assert_(isinstance(val, int)) + val = stats.geom(0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_rvs_9313(self): + # previously, RVS were converted to `np.int32` on some platforms, + # causing overflow for moderately large integer output (gh-9313). + # Check that this is resolved to the extent possible w/ `np.int64`. + rng = np.random.default_rng(649496242618848) + rvs = stats.geom.rvs(np.exp(-35), size=5, random_state=rng) + assert rvs.dtype == np.int64 + assert np.all(rvs > np.iinfo(np.int32).max) + + def test_pmf(self): + vals = stats.geom.pmf([1, 2, 3], 0.5) + assert_array_almost_equal(vals, [0.5, 0.25, 0.125]) + + def test_logpmf(self): + # regression test for ticket 1793 + vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5)) + vals2 = stats.geom.logpmf([1, 2, 3], 0.5) + assert_allclose(vals1, vals2, rtol=1e-15, atol=0) + + # regression test for gh-4028 + val = stats.geom.logpmf(1, 1) + assert_equal(val, 0.0) + + def test_cdf_sf(self): + vals = stats.geom.cdf([1, 2, 3], 0.5) + vals_sf = stats.geom.sf([1, 2, 3], 0.5) + expected = array([0.5, 0.75, 0.875]) + assert_array_almost_equal(vals, expected) + assert_array_almost_equal(vals_sf, 1-expected) + + def test_logcdf_logsf(self): + vals = stats.geom.logcdf([1, 2, 3], 0.5) + vals_sf = stats.geom.logsf([1, 2, 3], 0.5) + expected = array([0.5, 0.75, 0.875]) + assert_array_almost_equal(vals, np.log(expected)) + assert_array_almost_equal(vals_sf, np.log1p(-expected)) + + def test_ppf(self): + vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5) + expected = array([1.0, 2.0, 3.0]) + assert_array_almost_equal(vals, expected) + + def test_ppf_underflow(self): + # this should not underflow + assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14) + + def test_entropy_gh18226(self): + # gh-18226 reported that `geom.entropy` produced a warning and + # inaccurate output for small p. Check that this is resolved. + h = stats.geom(0.0146).entropy() + assert_allclose(h, 5.219397961962308, rtol=1e-15) + + +class TestPlanck: + def setup_method(self): + np.random.seed(1234) + + def test_sf(self): + vals = stats.planck.sf([1, 2, 3], 5.) + expected = array([4.5399929762484854e-05, + 3.0590232050182579e-07, + 2.0611536224385579e-09]) + assert_array_almost_equal(vals, expected) + + def test_logsf(self): + vals = stats.planck.logsf([1000., 2000., 3000.], 1000.) + expected = array([-1001000., -2001000., -3001000.]) + assert_array_almost_equal(vals, expected) + + +class TestGennorm: + def test_laplace(self): + # test against Laplace (special case for beta=1) + points = [1, 2, 3] + pdf1 = stats.gennorm.pdf(points, 1) + pdf2 = stats.laplace.pdf(points) + assert_almost_equal(pdf1, pdf2) + + def test_norm(self): + # test against normal (special case for beta=2) + points = [1, 2, 3] + pdf1 = stats.gennorm.pdf(points, 2) + pdf2 = stats.norm.pdf(points, scale=2**-.5) + assert_almost_equal(pdf1, pdf2) + + def test_rvs(self): + np.random.seed(0) + # 0 < beta < 1 + dist = stats.gennorm(0.5) + rvs = dist.rvs(size=1000) + assert stats.kstest(rvs, dist.cdf).pvalue > 0.1 + # beta = 1 + dist = stats.gennorm(1) + rvs = dist.rvs(size=1000) + rvs_laplace = stats.laplace.rvs(size=1000) + assert stats.ks_2samp(rvs, rvs_laplace).pvalue > 0.1 + # beta = 2 + dist = stats.gennorm(2) + rvs = dist.rvs(size=1000) + rvs_norm = stats.norm.rvs(scale=1/2**0.5, size=1000) + assert stats.ks_2samp(rvs, rvs_norm).pvalue > 0.1 + + def test_rvs_broadcasting(self): + np.random.seed(0) + dist = stats.gennorm([[0.5, 1.], [2., 5.]]) + rvs = dist.rvs(size=[1000, 2, 2]) + assert stats.kstest(rvs[:, 0, 0], stats.gennorm(0.5).cdf)[1] > 0.1 + assert stats.kstest(rvs[:, 0, 1], stats.gennorm(1.0).cdf)[1] > 0.1 + assert stats.kstest(rvs[:, 1, 0], stats.gennorm(2.0).cdf)[1] > 0.1 + assert stats.kstest(rvs[:, 1, 1], stats.gennorm(5.0).cdf)[1] > 0.1 + + +class TestGibrat: + + # sfx is sf(x). The values were computed with mpmath: + # + # from mpmath import mp + # mp.dps = 100 + # def gibrat_sf(x): + # return 1 - mp.ncdf(mp.log(x)) + # + # E.g. + # + # >>> float(gibrat_sf(1.5)) + # 0.3425678305148459 + # + @pytest.mark.parametrize('x, sfx', [(1.5, 0.3425678305148459), + (5000, 8.173334352522493e-18)]) + def test_sf_isf(self, x, sfx): + assert_allclose(stats.gibrat.sf(x), sfx, rtol=2e-14) + assert_allclose(stats.gibrat.isf(sfx), x, rtol=2e-14) + + +class TestGompertz: + + def test_gompertz_accuracy(self): + # Regression test for gh-4031 + p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1) + assert_allclose(p, 1e-100) + + # sfx is sf(x). The values were computed with mpmath: + # + # from mpmath import mp + # mp.dps = 100 + # def gompertz_sf(x, c): + # return mp.exp(-c*mp.expm1(x)) + # + # E.g. + # + # >>> float(gompertz_sf(1, 2.5)) + # 0.013626967146253437 + # + @pytest.mark.parametrize('x, c, sfx', [(1, 2.5, 0.013626967146253437), + (3, 2.5, 1.8973243273704087e-21), + (0.05, 5, 0.7738668242570479), + (2.25, 5, 3.707795833465481e-19)]) + def test_sf_isf(self, x, c, sfx): + assert_allclose(stats.gompertz.sf(x, c), sfx, rtol=1e-14) + assert_allclose(stats.gompertz.isf(sfx, c), x, rtol=1e-14) + + # reference values were computed with mpmath + # from mpmath import mp + # mp.dps = 100 + # def gompertz_entropy(c): + # c = mp.mpf(c) + # return float(mp.one - mp.log(c) - mp.exp(c)*mp.e1(c)) + + @pytest.mark.parametrize('c, ref', [(1e-4, 1.5762523017634573), + (1, 0.4036526376768059), + (1000, -5.908754280976161), + (1e10, -22.025850930040455)]) + def test_entropy(self, c, ref): + assert_allclose(stats.gompertz.entropy(c), ref, rtol=1e-14) + + +class TestFoldNorm: + + # reference values were computed with mpmath with 50 digits of precision + # from mpmath import mp + # mp.dps = 50 + # mp.mpf(0.5) * (mp.erf((x - c)/mp.sqrt(2)) + mp.erf((x + c)/mp.sqrt(2))) + + @pytest.mark.parametrize('x, c, ref', [(1e-4, 1e-8, 7.978845594730578e-05), + (1e-4, 1e-4, 7.97884555483635e-05)]) + def test_cdf(self, x, c, ref): + assert_allclose(stats.foldnorm.cdf(x, c), ref, rtol=1e-15) + + +class TestHalfNorm: + + # sfx is sf(x). The values were computed with mpmath: + # + # from mpmath import mp + # mp.dps = 100 + # def halfnorm_sf(x): + # return 2*(1 - mp.ncdf(x)) + # + # E.g. + # + # >>> float(halfnorm_sf(1)) + # 0.3173105078629141 + # + @pytest.mark.parametrize('x, sfx', [(1, 0.3173105078629141), + (10, 1.523970604832105e-23)]) + def test_sf_isf(self, x, sfx): + assert_allclose(stats.halfnorm.sf(x), sfx, rtol=1e-14) + assert_allclose(stats.halfnorm.isf(sfx), x, rtol=1e-14) + + # reference values were computed via mpmath + # from mpmath import mp + # mp.dps = 100 + # def halfnorm_cdf_mpmath(x): + # x = mp.mpf(x) + # return float(mp.erf(x/mp.sqrt(2.))) + + @pytest.mark.parametrize('x, ref', [(1e-40, 7.978845608028653e-41), + (1e-18, 7.978845608028654e-19), + (8, 0.9999999999999988)]) + def test_cdf(self, x, ref): + assert_allclose(stats.halfnorm.cdf(x), ref, rtol=1e-15) + + @pytest.mark.parametrize("rvs_loc", [1e-5, 1e10]) + @pytest.mark.parametrize("rvs_scale", [1e-2, 100, 1e8]) + @pytest.mark.parametrize('fix_loc', [True, False]) + @pytest.mark.parametrize('fix_scale', [True, False]) + def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale, + fix_loc, fix_scale): + + rng = np.random.default_rng(6762668991392531563) + data = stats.halfnorm.rvs(loc=rvs_loc, scale=rvs_scale, size=1000, + random_state=rng) + + if fix_loc and fix_scale: + error_msg = ("All parameters fixed. There is nothing to " + "optimize.") + with pytest.raises(RuntimeError, match=error_msg): + stats.halflogistic.fit(data, floc=rvs_loc, fscale=rvs_scale) + return + + kwds = {} + if fix_loc: + kwds['floc'] = rvs_loc + if fix_scale: + kwds['fscale'] = rvs_scale + + # Numerical result may equal analytical result if the initial guess + # computed from moment condition is already optimal. + _assert_less_or_close_loglike(stats.halfnorm, data, **kwds, + maybe_identical=True) + + def test_fit_error(self): + # `floc` bigger than the minimal data point + with pytest.raises(FitDataError): + stats.halfnorm.fit([1, 2, 3], floc=2) + + +class TestHalfCauchy: + + @pytest.mark.parametrize("rvs_loc", [1e-5, 1e10]) + @pytest.mark.parametrize("rvs_scale", [1e-2, 1e8]) + @pytest.mark.parametrize('fix_loc', [True, False]) + @pytest.mark.parametrize('fix_scale', [True, False]) + def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale, + fix_loc, fix_scale): + + rng = np.random.default_rng(6762668991392531563) + data = stats.halfnorm.rvs(loc=rvs_loc, scale=rvs_scale, size=1000, + random_state=rng) + + if fix_loc and fix_scale: + error_msg = ("All parameters fixed. There is nothing to " + "optimize.") + with pytest.raises(RuntimeError, match=error_msg): + stats.halfcauchy.fit(data, floc=rvs_loc, fscale=rvs_scale) + return + + kwds = {} + if fix_loc: + kwds['floc'] = rvs_loc + if fix_scale: + kwds['fscale'] = rvs_scale + + _assert_less_or_close_loglike(stats.halfcauchy, data, **kwds) + + def test_fit_error(self): + # `floc` bigger than the minimal data point + with pytest.raises(FitDataError): + stats.halfcauchy.fit([1, 2, 3], floc=2) + + +class TestHalfLogistic: + # survival function reference values were computed with mpmath + # from mpmath import mp + # mp.dps = 50 + # def sf_mpmath(x): + # x = mp.mpf(x) + # return float(mp.mpf(2.)/(mp.exp(x) + mp.one)) + + @pytest.mark.parametrize('x, ref', [(100, 7.440151952041672e-44), + (200, 2.767793053473475e-87)]) + def test_sf(self, x, ref): + assert_allclose(stats.halflogistic.sf(x), ref, rtol=1e-15) + + # inverse survival function reference values were computed with mpmath + # from mpmath import mp + # mp.dps = 200 + # def isf_mpmath(x): + # halfx = mp.mpf(x)/2 + # return float(-mp.log(halfx/(mp.one - halfx))) + + @pytest.mark.parametrize('q, ref', [(7.440151952041672e-44, 100), + (2.767793053473475e-87, 200), + (1-1e-9, 1.999999943436137e-09), + (1-1e-15, 1.9984014443252818e-15)]) + def test_isf(self, q, ref): + assert_allclose(stats.halflogistic.isf(q), ref, rtol=1e-15) + + @pytest.mark.parametrize("rvs_loc", [1e-5, 1e10]) + @pytest.mark.parametrize("rvs_scale", [1e-2, 100, 1e8]) + @pytest.mark.parametrize('fix_loc', [True, False]) + @pytest.mark.parametrize('fix_scale', [True, False]) + def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale, + fix_loc, fix_scale): + + rng = np.random.default_rng(6762668991392531563) + data = stats.halflogistic.rvs(loc=rvs_loc, scale=rvs_scale, size=1000, + random_state=rng) + + kwds = {} + if fix_loc and fix_scale: + error_msg = ("All parameters fixed. There is nothing to " + "optimize.") + with pytest.raises(RuntimeError, match=error_msg): + stats.halflogistic.fit(data, floc=rvs_loc, fscale=rvs_scale) + return + + if fix_loc: + kwds['floc'] = rvs_loc + if fix_scale: + kwds['fscale'] = rvs_scale + + # Numerical result may equal analytical result if the initial guess + # computed from moment condition is already optimal. + _assert_less_or_close_loglike(stats.halflogistic, data, **kwds, + maybe_identical=True) + + def test_fit_bad_floc(self): + msg = r" Maximum likelihood estimation with 'halflogistic' requires" + with assert_raises(FitDataError, match=msg): + stats.halflogistic.fit([0, 2, 4], floc=1) + + +class TestHalfgennorm: + def test_expon(self): + # test against exponential (special case for beta=1) + points = [1, 2, 3] + pdf1 = stats.halfgennorm.pdf(points, 1) + pdf2 = stats.expon.pdf(points) + assert_almost_equal(pdf1, pdf2) + + def test_halfnorm(self): + # test against half normal (special case for beta=2) + points = [1, 2, 3] + pdf1 = stats.halfgennorm.pdf(points, 2) + pdf2 = stats.halfnorm.pdf(points, scale=2**-.5) + assert_almost_equal(pdf1, pdf2) + + def test_gennorm(self): + # test against generalized normal + points = [1, 2, 3] + pdf1 = stats.halfgennorm.pdf(points, .497324) + pdf2 = stats.gennorm.pdf(points, .497324) + assert_almost_equal(pdf1, 2*pdf2) + + +class TestLaplaceasymmetric: + def test_laplace(self): + # test against Laplace (special case for kappa=1) + points = np.array([1, 2, 3]) + pdf1 = stats.laplace_asymmetric.pdf(points, 1) + pdf2 = stats.laplace.pdf(points) + assert_allclose(pdf1, pdf2) + + def test_asymmetric_laplace_pdf(self): + # test asymmetric Laplace + points = np.array([1, 2, 3]) + kappa = 2 + kapinv = 1/kappa + pdf1 = stats.laplace_asymmetric.pdf(points, kappa) + pdf2 = stats.laplace_asymmetric.pdf(points*(kappa**2), kapinv) + assert_allclose(pdf1, pdf2) + + def test_asymmetric_laplace_log_10_16(self): + # test asymmetric Laplace + points = np.array([-np.log(16), np.log(10)]) + kappa = 2 + pdf1 = stats.laplace_asymmetric.pdf(points, kappa) + cdf1 = stats.laplace_asymmetric.cdf(points, kappa) + sf1 = stats.laplace_asymmetric.sf(points, kappa) + pdf2 = np.array([1/10, 1/250]) + cdf2 = np.array([1/5, 1 - 1/500]) + sf2 = np.array([4/5, 1/500]) + ppf1 = stats.laplace_asymmetric.ppf(cdf2, kappa) + ppf2 = points + isf1 = stats.laplace_asymmetric.isf(sf2, kappa) + isf2 = points + assert_allclose(np.concatenate((pdf1, cdf1, sf1, ppf1, isf1)), + np.concatenate((pdf2, cdf2, sf2, ppf2, isf2))) + + +class TestTruncnorm: + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize("a, b, ref", + [(0, 100, 0.7257913526447274), + (0.6, 0.7, -2.3027610681852573), + (1e-06, 2e-06, -13.815510557964274)]) + def test_entropy(self, a, b, ref): + # All reference values were calculated with mpmath: + # import numpy as np + # from mpmath import mp + # mp.dps = 50 + # def entropy_trun(a, b): + # a, b = mp.mpf(a), mp.mpf(b) + # Z = mp.ncdf(b) - mp.ncdf(a) + # + # def pdf(x): + # return mp.npdf(x) / Z + # + # res = -mp.quad(lambda t: pdf(t) * mp.log(pdf(t)), [a, b]) + # return np.float64(res) + assert_allclose(stats.truncnorm.entropy(a, b), ref, rtol=1e-10) + + @pytest.mark.parametrize("a, b, ref", + [(1e-11, 10000000000.0, 0.725791352640738), + (1e-100, 1e+100, 0.7257913526447274), + (-1e-100, 1e+100, 0.7257913526447274), + (-1e+100, 1e+100, 1.4189385332046727)]) + def test_extreme_entropy(self, a, b, ref): + # The reference values were calculated with mpmath + # import numpy as np + # from mpmath import mp + # mp.dps = 50 + # def trunc_norm_entropy(a, b): + # a, b = mp.mpf(a), mp.mpf(b) + # Z = mp.ncdf(b) - mp.ncdf(a) + # A = mp.log(mp.sqrt(2 * mp.pi * mp.e) * Z) + # B = (a * mp.npdf(a) - b * mp.npdf(b)) / (2 * Z) + # return np.float64(A + B) + assert_allclose(stats.truncnorm.entropy(a, b), ref, rtol=1e-14) + + def test_ppf_ticket1131(self): + vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1., + loc=[3]*7, scale=2) + expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan]) + assert_array_almost_equal(vals, expected) + + def test_isf_ticket1131(self): + vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1., + loc=[3]*7, scale=2) + expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan]) + assert_array_almost_equal(vals, expected) + + def test_gh_2477_small_values(self): + # Check a case that worked in the original issue. + low, high = -11, -10 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + # Check a case that failed in the original issue. + low, high = 10, 11 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + def test_gh_2477_large_values(self): + # Check a case that used to fail because of extreme tailness. + low, high = 100, 101 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low <= x.min() <= x.max() <= high), str([low, high, x]) + + # Check some additional extreme tails + low, high = 1000, 1001 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + low, high = 10000, 10001 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + low, high = -10001, -10000 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + def test_gh_9403_nontail_values(self): + for low, high in [[3, 4], [-4, -3]]: + xvals = np.array([-np.inf, low, high, np.inf]) + xmid = (high+low)/2.0 + cdfs = stats.truncnorm.cdf(xvals, low, high) + sfs = stats.truncnorm.sf(xvals, low, high) + pdfs = stats.truncnorm.pdf(xvals, low, high) + expected_cdfs = np.array([0, 0, 1, 1]) + expected_sfs = np.array([1.0, 1.0, 0.0, 0.0]) + expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0]) + if low < 0: + expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0]) + assert_almost_equal(cdfs, expected_cdfs) + assert_almost_equal(sfs, expected_sfs) + assert_almost_equal(pdfs, expected_pdfs) + assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), + low + 0.5) + pvals = np.array([0, 0.5, 1.0]) + ppfs = stats.truncnorm.ppf(pvals, low, high) + expected_ppfs = np.array([low, np.sign(low)*3.1984741, high]) + assert_almost_equal(ppfs, expected_ppfs) + + if low < 0: + assert_almost_equal(stats.truncnorm.sf(xmid, low, high), + 0.8475544278436675) + assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), + 0.1524455721563326) + else: + assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), + 0.8475544278436675) + assert_almost_equal(stats.truncnorm.sf(xmid, low, high), + 0.1524455721563326) + pdf = stats.truncnorm.pdf(xmid, low, high) + assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2) + + def test_gh_9403_medium_tail_values(self): + for low, high in [[39, 40], [-40, -39]]: + xvals = np.array([-np.inf, low, high, np.inf]) + xmid = (high+low)/2.0 + cdfs = stats.truncnorm.cdf(xvals, low, high) + sfs = stats.truncnorm.sf(xvals, low, high) + pdfs = stats.truncnorm.pdf(xvals, low, high) + expected_cdfs = np.array([0, 0, 1, 1]) + expected_sfs = np.array([1.0, 1.0, 0.0, 0.0]) + expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0]) + if low < 0: + expected_pdfs = np.array([0, 2.73349092e-16, + 3.90256074e+01, 0]) + assert_almost_equal(cdfs, expected_cdfs) + assert_almost_equal(sfs, expected_sfs) + assert_almost_equal(pdfs, expected_pdfs) + assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), + low + 0.5) + pvals = np.array([0, 0.5, 1.0]) + ppfs = stats.truncnorm.ppf(pvals, low, high) + expected_ppfs = np.array([low, np.sign(low)*39.01775731, high]) + assert_almost_equal(ppfs, expected_ppfs) + cdfs = stats.truncnorm.cdf(ppfs, low, high) + assert_almost_equal(cdfs, pvals) + + if low < 0: + assert_almost_equal(stats.truncnorm.sf(xmid, low, high), + 0.9999999970389126) + assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), + 2.961048103554866e-09) + else: + assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), + 0.9999999970389126) + assert_almost_equal(stats.truncnorm.sf(xmid, low, high), + 2.961048103554866e-09) + pdf = stats.truncnorm.pdf(xmid, low, high) + assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2) + + xvals = np.linspace(low, high, 11) + xvals2 = -xvals[::-1] + assert_almost_equal(stats.truncnorm.cdf(xvals, low, high), + stats.truncnorm.sf(xvals2, -high, -low)[::-1]) + assert_almost_equal(stats.truncnorm.sf(xvals, low, high), + stats.truncnorm.cdf(xvals2, -high, -low)[::-1]) + assert_almost_equal(stats.truncnorm.pdf(xvals, low, high), + stats.truncnorm.pdf(xvals2, -high, -low)[::-1]) + + def test_cdf_tail_15110_14753(self): + # Check accuracy issues reported in gh-14753 and gh-155110 + # Ground truth values calculated using Wolfram Alpha, e.g. + # (CDF[NormalDistribution[0,1],83/10]-CDF[NormalDistribution[0,1],8])/ + # (1 - CDF[NormalDistribution[0,1],8]) + assert_allclose(stats.truncnorm(13., 15.).cdf(14.), + 0.9999987259565643) + assert_allclose(stats.truncnorm(8, np.inf).cdf(8.3), + 0.9163220907327540) + + # Test data for the truncnorm stats() method. + # The data in each row is: + # a, b, mean, variance, skewness, excess kurtosis. Generated using + # https://gist.github.com/WarrenWeckesser/636b537ee889679227d53543d333a720 + _truncnorm_stats_data = [ + [-30, 30, + 0.0, 1.0, 0.0, 0.0], + [-10, 10, + 0.0, 1.0, 0.0, -1.4927521335810455e-19], + [-3, 3, + 0.0, 0.9733369246625415, 0.0, -0.17111443639774404], + [-2, 2, + 0.0, 0.7737413035499232, 0.0, -0.6344632828703505], + [0, np.inf, + 0.7978845608028654, + 0.3633802276324187, + 0.995271746431156, + 0.8691773036059741], + [-np.inf, 0, + -0.7978845608028654, + 0.3633802276324187, + -0.995271746431156, + 0.8691773036059741], + [-1, 3, + 0.282786110727154, + 0.6161417353578293, + 0.5393018494027877, + -0.20582065135274694], + [-3, 1, + -0.282786110727154, + 0.6161417353578293, + -0.5393018494027877, + -0.20582065135274694], + [-10, -9, + -9.108456288012409, + 0.011448805821636248, + -1.8985607290949496, + 5.0733461105025075], + ] + _truncnorm_stats_data = np.array(_truncnorm_stats_data) + + @pytest.mark.parametrize("case", _truncnorm_stats_data) + def test_moments(self, case): + a, b, m0, v0, s0, k0 = case + m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk') + assert_allclose([m, v, s, k], [m0, v0, s0, k0], atol=1e-17) + + def test_9902_moments(self): + m, v = stats.truncnorm.stats(0, np.inf, moments='mv') + assert_almost_equal(m, 0.79788456) + assert_almost_equal(v, 0.36338023) + + def test_gh_1489_trac_962_rvs(self): + # Check the original example. + low, high = 10, 15 + x = stats.truncnorm.rvs(low, high, 0, 1, size=10) + assert_(low < x.min() < x.max() < high) + + def test_gh_11299_rvs(self): + # Arose from investigating gh-11299 + # Test multiple shape parameters simultaneously. + low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40] + high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf] + x = stats.truncnorm.rvs(low, high, size=(5, len(low))) + assert np.shape(x) == (5, len(low)) + assert_(np.all(low <= x.min(axis=0))) + assert_(np.all(x.max(axis=0) <= high)) + + def test_rvs_Generator(self): + # check that rvs can use a Generator + if hasattr(np.random, "default_rng"): + stats.truncnorm.rvs(-10, -5, size=5, + random_state=np.random.default_rng()) + + def test_logcdf_gh17064(self): + # regression test for gh-17064 - avoid roundoff error for logcdfs ~0 + a = np.array([-np.inf, -np.inf, -8, -np.inf, 10]) + b = np.array([np.inf, np.inf, 8, 10, np.inf]) + x = np.array([10, 7.5, 7.5, 9, 20]) + expected = [-7.619853024160525e-24, -3.190891672910947e-14, + -3.128682067168231e-14, -1.1285122074235991e-19, + -3.61374964828753e-66] + assert_allclose(stats.truncnorm(a, b).logcdf(x), expected) + assert_allclose(stats.truncnorm(-b, -a).logsf(-x), expected) + + def test_moments_gh18634(self): + # gh-18634 reported that moments 5 and higher didn't work; check that + # this is resolved + res = stats.truncnorm(-2, 3).moment(5) + # From Mathematica: + # Moment[TruncatedDistribution[{-2, 3}, NormalDistribution[]], 5] + ref = 1.645309620208361 + assert_allclose(res, ref) + + +class TestGenLogistic: + + # Expected values computed with mpmath with 50 digits of precision. + @pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917), + (-125, -187.09453489189184), + (0, -1.3274028432916989), + (100, -99.59453489189184), + (1000, -999.5945348918918)]) + def test_logpdf(self, x, expected): + c = 1.5 + logp = stats.genlogistic.logpdf(x, c) + assert_allclose(logp, expected, rtol=1e-13) + + # Expected values computed with mpmath with 50 digits of precision + # from mpmath import mp + # mp.dps = 50 + # def entropy_mp(c): + # c = mp.mpf(c) + # return float(-mp.log(c)+mp.one+mp.digamma(c + mp.one) + mp.euler) + + @pytest.mark.parametrize('c, ref', [(1e-100, 231.25850929940458), + (1e-4, 10.21050485336338), + (1e8, 1.577215669901533), + (1e100, 1.5772156649015328)]) + def test_entropy(self, c, ref): + assert_allclose(stats.genlogistic.entropy(c), ref, rtol=5e-15) + + # Expected values computed with mpmath with 50 digits of precision + # from mpmath import mp + # mp.dps = 1000 + # + # def genlogistic_cdf_mp(x, c): + # x = mp.mpf(x) + # c = mp.mpf(c) + # return (mp.one + mp.exp(-x)) ** (-c) + # + # def genlogistic_sf_mp(x, c): + # return mp.one - genlogistic_cdf_mp(x, c) + # + # x, c, ref = 100, 0.02, -7.440151952041672e-466 + # print(float(mp.log(genlogistic_cdf_mp(x, c)))) + # ppf/isf reference values generated by passing in `ref` (`q` is produced) + + @pytest.mark.parametrize('x, c, ref', [(200, 10, 1.3838965267367375e-86), + (500, 20, 1.424915281348257e-216)]) + def test_sf(self, x, c, ref): + assert_allclose(stats.genlogistic.sf(x, c), ref, rtol=1e-14) + + @pytest.mark.parametrize('q, c, ref', [(0.01, 200, 9.898441467379765), + (0.001, 2, 7.600152115573173)]) + def test_isf(self, q, c, ref): + assert_allclose(stats.genlogistic.isf(q, c), ref, rtol=5e-16) + + @pytest.mark.parametrize('q, c, ref', [(0.5, 200, 5.6630969187064615), + (0.99, 20, 7.595630231412436)]) + def test_ppf(self, q, c, ref): + assert_allclose(stats.genlogistic.ppf(q, c), ref, rtol=5e-16) + + @pytest.mark.parametrize('x, c, ref', [(100, 0.02, -7.440151952041672e-46), + (50, 20, -3.857499695927835e-21)]) + def test_logcdf(self, x, c, ref): + assert_allclose(stats.genlogistic.logcdf(x, c), ref, rtol=1e-15) + + +class TestHypergeom: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50)) + assert_(numpy.all(vals >= 0) & + numpy.all(vals <= 3)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.hypergeom.rvs(20, 3, 10) + assert_(isinstance(val, int)) + val = stats.hypergeom(20, 3, 10).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_precision(self): + # comparison number from mpmath + M = 2500 + n = 50 + N = 500 + tot = M + good = n + hgpmf = stats.hypergeom.pmf(2, tot, good, N) + assert_almost_equal(hgpmf, 0.0010114963068932233, 11) + + def test_args(self): + # test correct output for corner cases of arguments + # see gh-2325 + assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11) + assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11) + + assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11) + assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11) + + def test_cdf_above_one(self): + # for some values of parameters, hypergeom cdf was >1, see gh-2238 + assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0) + + def test_precision2(self): + # Test hypergeom precision for large numbers. See #1218. + # Results compared with those from R. + oranges = 9.9e4 + pears = 1.1e5 + fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4 + quantile = 2e4 + res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten) + for eaten in fruits_eaten] + expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32, + 8.265601e-11, 0.1237904, 1]) + assert_allclose(res, expected, atol=0, rtol=5e-7) + + # Test with array_like first argument + quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4] + res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4) + expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69] + assert_allclose(res2, expected2, atol=0, rtol=5e-7) + + def test_entropy(self): + # Simple tests of entropy. + hg = stats.hypergeom(4, 1, 1) + h = hg.entropy() + expected_p = np.array([0.75, 0.25]) + expected_h = -np.sum(xlogy(expected_p, expected_p)) + assert_allclose(h, expected_h) + + hg = stats.hypergeom(1, 1, 1) + h = hg.entropy() + assert_equal(h, 0.0) + + def test_logsf(self): + # Test logsf for very large numbers. See issue #4982 + # Results compare with those from R (v3.2.0): + # phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE) + # -2239.771 + + k = 1e4 + M = 1e7 + n = 1e6 + N = 5e4 + + result = stats.hypergeom.logsf(k, M, n, N) + expected = -2239.771 # From R + assert_almost_equal(result, expected, decimal=3) + + k = 1 + M = 1600 + n = 600 + N = 300 + + result = stats.hypergeom.logsf(k, M, n, N) + expected = -2.566567e-68 # From R + assert_almost_equal(result, expected, decimal=15) + + def test_logcdf(self): + # Test logcdf for very large numbers. See issue #8692 + # Results compare with those from R (v3.3.2): + # phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE) + # -5273.335 + + k = 1 + M = 1e7 + n = 1e6 + N = 5e4 + + result = stats.hypergeom.logcdf(k, M, n, N) + expected = -5273.335 # From R + assert_almost_equal(result, expected, decimal=3) + + # Same example as in issue #8692 + k = 40 + M = 1600 + n = 50 + N = 300 + + result = stats.hypergeom.logcdf(k, M, n, N) + expected = -7.565148879229e-23 # From R + assert_almost_equal(result, expected, decimal=15) + + k = 125 + M = 1600 + n = 250 + N = 500 + + result = stats.hypergeom.logcdf(k, M, n, N) + expected = -4.242688e-12 # From R + assert_almost_equal(result, expected, decimal=15) + + # test broadcasting robustness based on reviewer + # concerns in PR 9603; using an array version of + # the example from issue #8692 + k = np.array([40, 40, 40]) + M = 1600 + n = 50 + N = 300 + + result = stats.hypergeom.logcdf(k, M, n, N) + expected = np.full(3, -7.565148879229e-23) # filled from R result + assert_almost_equal(result, expected, decimal=15) + + def test_mean_gh18511(self): + # gh-18511 reported that the `mean` was incorrect for large arguments; + # check that this is resolved + M = 390_000 + n = 370_000 + N = 12_000 + + hm = stats.hypergeom.mean(M, n, N) + rm = n / M * N + assert_allclose(hm, rm) + + def test_sf_gh18506(self): + # gh-18506 reported that `sf` was incorrect for large population; + # check that this is resolved + n = 10 + N = 10**5 + i = np.arange(5, 15) + population_size = 10.**i + p = stats.hypergeom.sf(n - 1, population_size, N, n) + assert np.all(p > 0) + assert np.all(np.diff(p) < 0) + + +class TestLoggamma: + + # Expected cdf values were computed with mpmath. For given x and c, + # x = mpmath.mpf(x) + # c = mpmath.mpf(c) + # cdf = mpmath.gammainc(c, 0, mpmath.exp(x), + # regularized=True) + @pytest.mark.parametrize('x, c, cdf', + [(1, 2, 0.7546378854206702), + (-1, 14, 6.768116452566383e-18), + (-745.1, 0.001, 0.4749605142005238), + (-800, 0.001, 0.44958802911019136), + (-725, 0.1, 3.4301205868273265e-32), + (-740, 0.75, 1.0074360436599631e-241)]) + def test_cdf_ppf(self, x, c, cdf): + p = stats.loggamma.cdf(x, c) + assert_allclose(p, cdf, rtol=1e-13) + y = stats.loggamma.ppf(cdf, c) + assert_allclose(y, x, rtol=1e-13) + + # Expected sf values were computed with mpmath. For given x and c, + # x = mpmath.mpf(x) + # c = mpmath.mpf(c) + # sf = mpmath.gammainc(c, mpmath.exp(x), mpmath.inf, + # regularized=True) + @pytest.mark.parametrize('x, c, sf', + [(4, 1.5, 1.6341528919488565e-23), + (6, 100, 8.23836829202024e-74), + (-800, 0.001, 0.5504119708898086), + (-743, 0.0025, 0.8437131370024089)]) + def test_sf_isf(self, x, c, sf): + s = stats.loggamma.sf(x, c) + assert_allclose(s, sf, rtol=1e-13) + y = stats.loggamma.isf(sf, c) + assert_allclose(y, x, rtol=1e-13) + + def test_logpdf(self): + # Test logpdf with x=-500, c=2. ln(gamma(2)) = 0, and + # exp(-500) ~= 7e-218, which is far smaller than the ULP + # of c*x=-1000, so logpdf(-500, 2) = c*x - exp(x) - ln(gamma(2)) + # should give -1000.0. + lp = stats.loggamma.logpdf(-500, 2) + assert_allclose(lp, -1000.0, rtol=1e-14) + + def test_stats(self): + # The following precomputed values are from the table in section 2.2 + # of "A Statistical Study of Log-Gamma Distribution", by Ping Shing + # Chan (thesis, McMaster University, 1993). + table = np.array([ + # c, mean, var, skew, exc. kurt. + 0.5, -1.9635, 4.9348, -1.5351, 4.0000, + 1.0, -0.5772, 1.6449, -1.1395, 2.4000, + 12.0, 2.4427, 0.0869, -0.2946, 0.1735, + ]).reshape(-1, 5) + for c, mean, var, skew, kurt in table: + computed = stats.loggamma.stats(c, moments='msvk') + assert_array_almost_equal(computed, [mean, var, skew, kurt], + decimal=4) + + @pytest.mark.parametrize('c', [0.1, 0.001]) + def test_rvs(self, c): + # Regression test for gh-11094. + x = stats.loggamma.rvs(c, size=100000) + # Before gh-11094 was fixed, the case with c=0.001 would + # generate many -inf values. + assert np.isfinite(x).all() + # Crude statistical test. About half the values should be + # less than the median and half greater than the median. + med = stats.loggamma.median(c) + btest = stats.binomtest(np.count_nonzero(x < med), len(x)) + ci = btest.proportion_ci(confidence_level=0.999) + assert ci.low < 0.5 < ci.high + + @pytest.mark.parametrize("c, ref", + [(1e-8, 19.420680753952364), + (1, 1.5772156649015328), + (1e4, -3.186214986116763), + (1e10, -10.093986931748889), + (1e100, -113.71031611649761)]) + def test_entropy(self, c, ref): + + # Reference values were calculated with mpmath + # from mpmath import mp + # mp.dps = 500 + # def loggamma_entropy_mpmath(c): + # c = mp.mpf(c) + # return float(mp.log(mp.gamma(c)) + c * (mp.one - mp.digamma(c))) + + assert_allclose(stats.loggamma.entropy(c), ref, rtol=1e-14) + + +class TestJohnsonsu: + # reference values were computed via mpmath + # from mpmath import mp + # mp.dps = 50 + # def johnsonsu_sf(x, a, b): + # x = mp.mpf(x) + # a = mp.mpf(a) + # b = mp.mpf(b) + # return float(mp.ncdf(-(a + b * mp.log(x + mp.sqrt(x*x + 1))))) + # Order is x, a, b, sf, isf tol + # (Can't expect full precision when the ISF input is very nearly 1) + cases = [(-500, 1, 1, 0.9999999982660072, 1e-8), + (2000, 1, 1, 7.426351000595343e-21, 5e-14), + (100000, 1, 1, 4.046923979269977e-40, 5e-14)] + + @pytest.mark.parametrize("case", cases) + def test_sf_isf(self, case): + x, a, b, sf, tol = case + assert_allclose(stats.johnsonsu.sf(x, a, b), sf, rtol=5e-14) + assert_allclose(stats.johnsonsu.isf(sf, a, b), x, rtol=tol) + + +class TestJohnsonb: + # reference values were computed via mpmath + # from mpmath import mp + # mp.dps = 50 + # def johnsonb_sf(x, a, b): + # x = mp.mpf(x) + # a = mp.mpf(a) + # b = mp.mpf(b) + # return float(mp.ncdf(-(a + b * mp.log(x/(mp.one - x))))) + # Order is x, a, b, sf, isf atol + # (Can't expect full precision when the ISF input is very nearly 1) + cases = [(1e-4, 1, 1, 0.9999999999999999, 1e-7), + (0.9999, 1, 1, 8.921114313932308e-25, 5e-14), + (0.999999, 1, 1, 5.815197487181902e-50, 5e-14)] + + @pytest.mark.parametrize("case", cases) + def test_sf_isf(self, case): + x, a, b, sf, tol = case + assert_allclose(stats.johnsonsb.sf(x, a, b), sf, rtol=5e-14) + assert_allclose(stats.johnsonsb.isf(sf, a, b), x, atol=tol) + + +class TestLogistic: + # gh-6226 + def test_cdf_ppf(self): + x = np.linspace(-20, 20) + y = stats.logistic.cdf(x) + xx = stats.logistic.ppf(y) + assert_allclose(x, xx) + + def test_sf_isf(self): + x = np.linspace(-20, 20) + y = stats.logistic.sf(x) + xx = stats.logistic.isf(y) + assert_allclose(x, xx) + + def test_extreme_values(self): + # p is chosen so that 1 - (1 - p) == p in double precision + p = 9.992007221626409e-16 + desired = 34.53957599234088 + assert_allclose(stats.logistic.ppf(1 - p), desired) + assert_allclose(stats.logistic.isf(p), desired) + + def test_logpdf_basic(self): + logp = stats.logistic.logpdf([-15, 0, 10]) + # Expected values computed with mpmath with 50 digits of precision. + expected = [-15.000000611804547, + -1.3862943611198906, + -10.000090797798434] + assert_allclose(logp, expected, rtol=1e-13) + + def test_logpdf_extreme_values(self): + logp = stats.logistic.logpdf([800, -800]) + # For such large arguments, logpdf(x) = -abs(x) when computed + # with 64 bit floating point. + assert_equal(logp, [-800, -800]) + + @pytest.mark.parametrize("loc_rvs,scale_rvs", [(0.4484955, 0.10216821), + (0.62918191, 0.74367064)]) + def test_fit(self, loc_rvs, scale_rvs): + data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs) + + # test that result of fit method is the same as optimization + def func(input, data): + a, b = input + n = len(data) + x1 = np.sum(np.exp((data - a) / b) / + (1 + np.exp((data - a) / b))) - n / 2 + x2 = np.sum(((data - a) / b) * + ((np.exp((data - a) / b) - 1) / + (np.exp((data - a) / b) + 1))) - n + return x1, x2 + + expected_solution = root(func, stats.logistic._fitstart(data), args=( + data,)).x + fit_method = stats.logistic.fit(data) + + # other than computational variances, the fit method and the solution + # to this system of equations are equal + assert_allclose(fit_method, expected_solution, atol=1e-30) + + def test_fit_comp_optimizer(self): + data = stats.logistic.rvs(size=100, loc=0.5, scale=2) + _assert_less_or_close_loglike(stats.logistic, data) + _assert_less_or_close_loglike(stats.logistic, data, floc=1) + _assert_less_or_close_loglike(stats.logistic, data, fscale=1) + + @pytest.mark.parametrize('testlogcdf', [True, False]) + def test_logcdfsf_tails(self, testlogcdf): + # Test either logcdf or logsf. By symmetry, we can use the same + # expected values for both by switching the sign of x for logsf. + x = np.array([-10000, -800, 17, 50, 500]) + if testlogcdf: + y = stats.logistic.logcdf(x) + else: + y = stats.logistic.logsf(-x) + # The expected values were computed with mpmath. + expected = [-10000.0, -800.0, -4.139937633089748e-08, + -1.9287498479639178e-22, -7.124576406741286e-218] + assert_allclose(y, expected, rtol=2e-15) + + def test_fit_gh_18176(self): + # logistic.fit returned `scale < 0` for this data. Check that this has + # been fixed. + data = np.array([-459, 37, 43, 45, 45, 48, 54, 55, 58] + + [59] * 3 + [61] * 9) + # If scale were negative, NLLF would be infinite, so this would fail + _assert_less_or_close_loglike(stats.logistic, data) + + +class TestLogser: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.logser.rvs(0.75, size=(2, 50)) + assert_(numpy.all(vals >= 1)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.logser.rvs(0.75) + assert_(isinstance(val, int)) + val = stats.logser(0.75).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_pmf_small_p(self): + m = stats.logser.pmf(4, 1e-20) + # The expected value was computed using mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 64 + # >>> k = 4 + # >>> p = mpmath.mpf('1e-20') + # >>> float(-(p**k)/k/mpmath.log(1-p)) + # 2.5e-61 + # It is also clear from noticing that for very small p, + # log(1-p) is approximately -p, and the formula becomes + # p**(k-1) / k + assert_allclose(m, 2.5e-61) + + def test_mean_small_p(self): + m = stats.logser.mean(1e-8) + # The expected mean was computed using mpmath: + # >>> import mpmath + # >>> mpmath.dps = 60 + # >>> p = mpmath.mpf('1e-8') + # >>> float(-p / ((1 - p)*mpmath.log(1 - p))) + # 1.000000005 + assert_allclose(m, 1.000000005) + + +class TestGumbel_r_l: + @pytest.fixture(scope='function') + def rng(self): + return np.random.default_rng(1234) + + @pytest.mark.parametrize("dist", [stats.gumbel_r, stats.gumbel_l]) + @pytest.mark.parametrize("loc_rvs", [-1, 0, 1]) + @pytest.mark.parametrize("scale_rvs", [.1, 1, 5]) + @pytest.mark.parametrize('fix_loc, fix_scale', + ([True, False], [False, True])) + def test_fit_comp_optimizer(self, dist, loc_rvs, scale_rvs, + fix_loc, fix_scale, rng): + data = dist.rvs(size=100, loc=loc_rvs, scale=scale_rvs, + random_state=rng) + + kwds = dict() + # the fixed location and scales are arbitrarily modified to not be + # close to the true value. + if fix_loc: + kwds['floc'] = loc_rvs * 2 + if fix_scale: + kwds['fscale'] = scale_rvs * 2 + + # test that the gumbel_* fit method is better than super method + _assert_less_or_close_loglike(dist, data, **kwds) + + @pytest.mark.parametrize("dist, sgn", [(stats.gumbel_r, 1), + (stats.gumbel_l, -1)]) + def test_fit(self, dist, sgn): + z = sgn*np.array([3, 3, 3, 3, 3, 3, 3, 3.00000001]) + loc, scale = dist.fit(z) + # The expected values were computed with mpmath with 60 digits + # of precision. + assert_allclose(loc, sgn*3.0000000001667906) + assert_allclose(scale, 1.2495222465145514e-09, rtol=1e-6) + + +class TestPareto: + def test_stats(self): + # Check the stats() method with some simple values. Also check + # that the calculations do not trigger RuntimeWarnings. + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + + m, v, s, k = stats.pareto.stats(0.5, moments='mvsk') + assert_equal(m, np.inf) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(1.0, moments='mvsk') + assert_equal(m, np.inf) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(1.5, moments='mvsk') + assert_equal(m, 3.0) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(2.0, moments='mvsk') + assert_equal(m, 2.0) + assert_equal(v, np.inf) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(2.5, moments='mvsk') + assert_allclose(m, 2.5 / 1.5) + assert_allclose(v, 2.5 / (1.5*1.5*0.5)) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(3.0, moments='mvsk') + assert_allclose(m, 1.5) + assert_allclose(v, 0.75) + assert_equal(s, np.nan) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(3.5, moments='mvsk') + assert_allclose(m, 3.5 / 2.5) + assert_allclose(v, 3.5 / (2.5*2.5*1.5)) + assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5)) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(4.0, moments='mvsk') + assert_allclose(m, 4.0 / 3.0) + assert_allclose(v, 4.0 / 18.0) + assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0)) + assert_equal(k, np.nan) + + m, v, s, k = stats.pareto.stats(4.5, moments='mvsk') + assert_allclose(m, 4.5 / 3.5) + assert_allclose(v, 4.5 / (3.5*3.5*2.5)) + assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5)) + assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5)) + + def test_sf(self): + x = 1e9 + b = 2 + scale = 1.5 + p = stats.pareto.sf(x, b, loc=0, scale=scale) + expected = (scale/x)**b # 2.25e-18 + assert_allclose(p, expected) + + @pytest.fixture(scope='function') + def rng(self): + return np.random.default_rng(1234) + + @pytest.mark.filterwarnings("ignore:invalid value encountered in " + "double_scalars") + @pytest.mark.parametrize("rvs_shape", [1, 2]) + @pytest.mark.parametrize("rvs_loc", [0, 2]) + @pytest.mark.parametrize("rvs_scale", [1, 5]) + def test_fit(self, rvs_shape, rvs_loc, rvs_scale, rng): + data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale, + loc=rvs_loc, random_state=rng) + + # shape can still be fixed with multiple names + shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0] + shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0] + shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0] + assert (shape_mle_analytical1 == shape_mle_analytical2 == + shape_mle_analytical3 == 1.04) + + # data can be shifted with changes to `loc` + data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale, + loc=(rvs_loc + 2), random_state=rng) + shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2) + assert_equal(scale_mle_a + 2, data.min()) + + data_shift = data - 2 + ndata = data_shift.shape[0] + assert_equal(shape_mle_a, + ndata / np.sum(np.log(data_shift/data_shift.min()))) + assert_equal(loc_mle_a, 2) + + @pytest.mark.parametrize("rvs_shape", [.1, 2]) + @pytest.mark.parametrize("rvs_loc", [0, 2]) + @pytest.mark.parametrize("rvs_scale", [1, 5]) + @pytest.mark.parametrize('fix_shape, fix_loc, fix_scale', + [p for p in product([True, False], repeat=3) + if False in p]) + @np.errstate(invalid="ignore") + def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale, + fix_shape, fix_loc, fix_scale, rng): + data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale, + loc=rvs_loc, random_state=rng) + + kwds = {} + if fix_shape: + kwds['f0'] = rvs_shape + if fix_loc: + kwds['floc'] = rvs_loc + if fix_scale: + kwds['fscale'] = rvs_scale + + _assert_less_or_close_loglike(stats.pareto, data, **kwds) + + @np.errstate(invalid="ignore") + def test_fit_known_bad_seed(self): + # Tests a known seed and set of parameters that would produce a result + # would violate the support of Pareto if the fit method did not check + # the constraint `fscale + floc < min(data)`. + shape, location, scale = 1, 0, 1 + data = stats.pareto.rvs(shape, location, scale, size=100, + random_state=np.random.default_rng(2535619)) + _assert_less_or_close_loglike(stats.pareto, data) + + def test_fit_warnings(self): + assert_fit_warnings(stats.pareto) + # `floc` that causes invalid negative data + assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2) + # `floc` and `fscale` combination causes invalid data + assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1, + fscale=3) + + def test_negative_data(self, rng): + data = stats.pareto.rvs(loc=-130, b=1, size=100, random_state=rng) + assert_array_less(data, 0) + # The purpose of this test is to make sure that no runtime warnings are + # raised for all negative data, not the output of the fit method. Other + # methods test the output but have to silence warnings from the super + # method. + _ = stats.pareto.fit(data) + + +class TestGenpareto: + def test_ab(self): + # c >= 0: a, b = [0, inf] + for c in [1., 0.]: + c = np.asarray(c) + a, b = stats.genpareto._get_support(c) + assert_equal(a, 0.) + assert_(np.isposinf(b)) + + # c < 0: a=0, b=1/|c| + c = np.asarray(-2.) + a, b = stats.genpareto._get_support(c) + assert_allclose([a, b], [0., 0.5]) + + def test_c0(self): + # with c=0, genpareto reduces to the exponential distribution + # rv = stats.genpareto(c=0.) + rv = stats.genpareto(c=0.) + x = np.linspace(0, 10., 30) + assert_allclose(rv.pdf(x), stats.expon.pdf(x)) + assert_allclose(rv.cdf(x), stats.expon.cdf(x)) + assert_allclose(rv.sf(x), stats.expon.sf(x)) + + q = np.linspace(0., 1., 10) + assert_allclose(rv.ppf(q), stats.expon.ppf(q)) + + def test_cm1(self): + # with c=-1, genpareto reduces to the uniform distr on [0, 1] + rv = stats.genpareto(c=-1.) + x = np.linspace(0, 10., 30) + assert_allclose(rv.pdf(x), stats.uniform.pdf(x)) + assert_allclose(rv.cdf(x), stats.uniform.cdf(x)) + assert_allclose(rv.sf(x), stats.uniform.sf(x)) + + q = np.linspace(0., 1., 10) + assert_allclose(rv.ppf(q), stats.uniform.ppf(q)) + + # logpdf(1., c=-1) should be zero + assert_allclose(rv.logpdf(1), 0) + + def test_x_inf(self): + # make sure x=inf is handled gracefully + rv = stats.genpareto(c=0.1) + assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) + assert_(np.isneginf(rv.logpdf(np.inf))) + + rv = stats.genpareto(c=0.) + assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) + assert_(np.isneginf(rv.logpdf(np.inf))) + + rv = stats.genpareto(c=-1.) + assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) + assert_(np.isneginf(rv.logpdf(np.inf))) + + def test_c_continuity(self): + # pdf is continuous at c=0, -1 + x = np.linspace(0, 10, 30) + for c in [0, -1]: + pdf0 = stats.genpareto.pdf(x, c) + for dc in [1e-14, -1e-14]: + pdfc = stats.genpareto.pdf(x, c + dc) + assert_allclose(pdf0, pdfc, atol=1e-12) + + cdf0 = stats.genpareto.cdf(x, c) + for dc in [1e-14, 1e-14]: + cdfc = stats.genpareto.cdf(x, c + dc) + assert_allclose(cdf0, cdfc, atol=1e-12) + + def test_c_continuity_ppf(self): + q = np.r_[np.logspace(1e-12, 0.01, base=0.1), + np.linspace(0.01, 1, 30, endpoint=False), + 1. - np.logspace(1e-12, 0.01, base=0.1)] + for c in [0., -1.]: + ppf0 = stats.genpareto.ppf(q, c) + for dc in [1e-14, -1e-14]: + ppfc = stats.genpareto.ppf(q, c + dc) + assert_allclose(ppf0, ppfc, atol=1e-12) + + def test_c_continuity_isf(self): + q = np.r_[np.logspace(1e-12, 0.01, base=0.1), + np.linspace(0.01, 1, 30, endpoint=False), + 1. - np.logspace(1e-12, 0.01, base=0.1)] + for c in [0., -1.]: + isf0 = stats.genpareto.isf(q, c) + for dc in [1e-14, -1e-14]: + isfc = stats.genpareto.isf(q, c + dc) + assert_allclose(isf0, isfc, atol=1e-12) + + def test_cdf_ppf_roundtrip(self): + # this should pass with machine precision. hat tip @pbrod + q = np.r_[np.logspace(1e-12, 0.01, base=0.1), + np.linspace(0.01, 1, 30, endpoint=False), + 1. - np.logspace(1e-12, 0.01, base=0.1)] + for c in [1e-8, -1e-18, 1e-15, -1e-15]: + assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c), + q, atol=1e-15) + + def test_logsf(self): + logp = stats.genpareto.logsf(1e10, .01, 0, 1) + assert_allclose(logp, -1842.0680753952365) + + # Values in 'expected_stats' are + # [mean, variance, skewness, excess kurtosis]. + @pytest.mark.parametrize( + 'c, expected_stats', + [(0, [1, 1, 2, 6]), + (1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]), + (1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]), + (-1, [1/2, 1/12, 0, -6/5])]) + def test_stats(self, c, expected_stats): + result = stats.genpareto.stats(c, moments='mvsk') + assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15) + + def test_var(self): + # Regression test for gh-11168. + v = stats.genpareto.var(1e-8) + assert_allclose(v, 1.000000040000001, rtol=1e-13) + + +class TestPearson3: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.pearson3.rvs(0.1, size=(2, 50)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllFloat']) + val = stats.pearson3.rvs(0.5) + assert_(isinstance(val, float)) + val = stats.pearson3(0.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllFloat']) + assert_(len(val) == 3) + + def test_pdf(self): + vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2]) + assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]), + atol=1e-6) + vals = stats.pearson3.pdf(-3, 0.1) + assert_allclose(vals, np.array([0.00313791]), atol=1e-6) + vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1) + assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092, + 0.39885918, 0.23413173]), atol=1e-6) + + def test_cdf(self): + vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2]) + assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]), + atol=1e-6) + vals = stats.pearson3.cdf(-3, 0.1) + assert_allclose(vals, [0.00082256], atol=1e-6) + vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1) + assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01, + 5.06649130e-01, 8.41442111e-01], atol=1e-6) + + def test_negative_cdf_bug_11186(self): + # incorrect CDFs for negative skews in gh-11186; fixed in gh-12640 + # Also check vectorization w/ negative, zero, and positive skews + skews = [-3, -1, 0, 0.5] + x_eval = 0.5 + neg_inf = -30 # avoid RuntimeWarning caused by np.log(0) + cdfs = stats.pearson3.cdf(x_eval, skews) + int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0] + for skew in skews] + assert_allclose(cdfs, int_pdfs) + + def test_return_array_bug_11746(self): + # pearson3.moment was returning size 0 or 1 array instead of float + # The first moment is equal to the loc, which defaults to zero + moment = stats.pearson3.moment(1, 2) + assert_equal(moment, 0) + assert isinstance(moment, np.number) + + moment = stats.pearson3.moment(1, 0.000001) + assert_equal(moment, 0) + assert isinstance(moment, np.number) + + def test_ppf_bug_17050(self): + # incorrect PPF for negative skews were reported in gh-17050 + # Check that this is fixed (even in the array case) + skews = [-3, -1, 0, 0.5] + x_eval = 0.5 + res = stats.pearson3.ppf(stats.pearson3.cdf(x_eval, skews), skews) + assert_allclose(res, x_eval) + + # Negation of the skew flips the distribution about the origin, so + # the following should hold + skew = np.array([[-0.5], [1.5]]) + x = np.linspace(-2, 2) + assert_allclose(stats.pearson3.pdf(x, skew), + stats.pearson3.pdf(-x, -skew)) + assert_allclose(stats.pearson3.cdf(x, skew), + stats.pearson3.sf(-x, -skew)) + assert_allclose(stats.pearson3.ppf(x, skew), + -stats.pearson3.isf(x, -skew)) + + def test_sf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 50; Pearson3(skew=skew).sf(x). Check positive, negative, + # and zero skew due to branching. + skew = [0.1, 0.5, 1.0, -0.1] + x = [5.0, 10.0, 50.0, 8.0] + ref = [1.64721926440872e-06, 8.271911573556123e-11, + 1.3149506021756343e-40, 2.763057937820296e-21] + assert_allclose(stats.pearson3.sf(x, skew), ref, rtol=2e-14) + assert_allclose(stats.pearson3.sf(x, 0), stats.norm.sf(x), rtol=2e-14) + + +class TestKappa4: + def test_cdf_genpareto(self): + # h = 1 and k != 0 is generalized Pareto + x = [0.0, 0.1, 0.2, 0.5] + h = 1.0 + for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0, + 1.9]: + vals = stats.kappa4.cdf(x, h, k) + # shape parameter is opposite what is expected + vals_comp = stats.genpareto.cdf(x, -k) + assert_allclose(vals, vals_comp) + + def test_cdf_genextreme(self): + # h = 0 and k != 0 is generalized extreme value + x = np.linspace(-5, 5, 10) + h = 0.0 + k = np.linspace(-3, 3, 10) + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.genextreme.cdf(x, k) + assert_allclose(vals, vals_comp) + + def test_cdf_expon(self): + # h = 1 and k = 0 is exponential + x = np.linspace(0, 10, 10) + h = 1.0 + k = 0.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.expon.cdf(x) + assert_allclose(vals, vals_comp) + + def test_cdf_gumbel_r(self): + # h = 0 and k = 0 is gumbel_r + x = np.linspace(-5, 5, 10) + h = 0.0 + k = 0.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.gumbel_r.cdf(x) + assert_allclose(vals, vals_comp) + + def test_cdf_logistic(self): + # h = -1 and k = 0 is logistic + x = np.linspace(-5, 5, 10) + h = -1.0 + k = 0.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.logistic.cdf(x) + assert_allclose(vals, vals_comp) + + def test_cdf_uniform(self): + # h = 1 and k = 1 is uniform + x = np.linspace(-5, 5, 10) + h = 1.0 + k = 1.0 + vals = stats.kappa4.cdf(x, h, k) + vals_comp = stats.uniform.cdf(x) + assert_allclose(vals, vals_comp) + + def test_integers_ctor(self): + # regression test for gh-7416: _argcheck fails for integer h and k + # in numpy 1.12 + stats.kappa4(1, 2) + + +class TestPoisson: + def setup_method(self): + np.random.seed(1234) + + def test_pmf_basic(self): + # Basic case + ln2 = np.log(2) + vals = stats.poisson.pmf([0, 1, 2], ln2) + expected = [0.5, ln2/2, ln2**2/4] + assert_allclose(vals, expected) + + def test_mu0(self): + # Edge case: mu=0 + vals = stats.poisson.pmf([0, 1, 2], 0) + expected = [1, 0, 0] + assert_array_equal(vals, expected) + + interval = stats.poisson.interval(0.95, 0) + assert_equal(interval, (0, 0)) + + def test_rvs(self): + vals = stats.poisson.rvs(0.5, size=(2, 50)) + assert_(numpy.all(vals >= 0)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.poisson.rvs(0.5) + assert_(isinstance(val, int)) + val = stats.poisson(0.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_stats(self): + mu = 16.0 + result = stats.poisson.stats(mu, moments='mvsk') + assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu]) + + mu = np.array([0.0, 1.0, 2.0]) + result = stats.poisson.stats(mu, moments='mvsk') + expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5]) + assert_allclose(result, expected) + + +class TestKSTwo: + def setup_method(self): + np.random.seed(1234) + + def test_cdf(self): + for n in [1, 2, 3, 10, 100, 1000]: + # Test x-values: + # 0, 1/2n, where the cdf should be 0 + # 1/n, where the cdf should be n!/n^n + # 0.5, where the cdf should match ksone.cdf + # 1-1/n, where cdf = 1-2/n^n + # 1, where cdf == 1 + # (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer) + x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1]) + v1 = (1.0/n)**n + lg = scipy.special.gammaln(n+1) + elg = (np.exp(lg) if v1 != 0 else 0) + expected = np.array([0, 0, v1 * elg, + 1 - 2*stats.ksone.sf(0.5, n), + max(1 - 2*v1, 0.0), + 1.0]) + vals_cdf = stats.kstwo.cdf(x, n) + assert_allclose(vals_cdf, expected) + + def test_sf(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + # Same x values as in test_cdf, and use sf = 1 - cdf + x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1]) + v1 = (1.0/n)**n + lg = scipy.special.gammaln(n+1) + elg = (np.exp(lg) if v1 != 0 else 0) + expected = np.array([1.0, 1.0, + 1 - v1 * elg, + 2*stats.ksone.sf(0.5, n), + min(2*v1, 1.0), 0]) + vals_sf = stats.kstwo.sf(x, n) + assert_allclose(vals_sf, expected) + + def test_cdf_sqrtn(self): + # For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity + # cdf(a/sqrt(n), n) is an increasing function of n (and a) + # Check that the function is indeed increasing (allowing for some + # small floating point and algorithm differences.) + x = np.linspace(0, 2, 11)[1:] + ns = [50, 100, 200, 400, 1000, 2000] + for _x in x: + xn = _x / np.sqrt(ns) + probs = stats.kstwo.cdf(xn, ns) + diffs = np.diff(probs) + assert_array_less(diffs, 1e-8) + + def test_cdf_sf(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + vals_cdf = stats.kstwo.cdf(x, n) + vals_sf = stats.kstwo.sf(x, n) + assert_array_almost_equal(vals_cdf, 1 - vals_sf) + + def test_cdf_sf_sqrtn(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + xn = x / np.sqrt(n) + vals_cdf = stats.kstwo.cdf(xn, n) + vals_sf = stats.kstwo.sf(xn, n) + assert_array_almost_equal(vals_cdf, 1 - vals_sf) + + def test_ppf_of_cdf(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + xn = x[x > 0.5/n] + vals_cdf = stats.kstwo.cdf(xn, n) + # CDFs close to 1 are better dealt with using the SF + cond = (0 < vals_cdf) & (vals_cdf < 0.99) + vals = stats.kstwo.ppf(vals_cdf, n) + assert_allclose(vals[cond], xn[cond], rtol=1e-4) + + def test_isf_of_sf(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + xn = x[x > 0.5/n] + vals_isf = stats.kstwo.isf(xn, n) + cond = (0 < vals_isf) & (vals_isf < 1.0) + vals = stats.kstwo.sf(vals_isf, n) + assert_allclose(vals[cond], xn[cond], rtol=1e-4) + + def test_ppf_of_cdf_sqrtn(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + xn = (x / np.sqrt(n))[x > 0.5/n] + vals_cdf = stats.kstwo.cdf(xn, n) + cond = (0 < vals_cdf) & (vals_cdf < 1.0) + vals = stats.kstwo.ppf(vals_cdf, n) + assert_allclose(vals[cond], xn[cond]) + + def test_isf_of_sf_sqrtn(self): + x = np.linspace(0, 1, 11) + for n in [1, 2, 3, 10, 100, 1000]: + xn = (x / np.sqrt(n))[x > 0.5/n] + vals_sf = stats.kstwo.sf(xn, n) + # SFs close to 1 are better dealt with using the CDF + cond = (0 < vals_sf) & (vals_sf < 0.95) + vals = stats.kstwo.isf(vals_sf, n) + assert_allclose(vals[cond], xn[cond]) + + def test_ppf(self): + probs = np.linspace(0, 1, 11)[1:] + for n in [1, 2, 3, 10, 100, 1000]: + xn = stats.kstwo.ppf(probs, n) + vals_cdf = stats.kstwo.cdf(xn, n) + assert_allclose(vals_cdf, probs) + + def test_simard_lecuyer_table1(self): + # Compute the cdf for values near the mean of the distribution. + # The mean u ~ log(2)*sqrt(pi/(2n)) + # Compute for x in [u/4, u/3, u/2, u, 2u, 3u] + # This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011) + # "Computing the Two-Sided Kolmogorov-Smirnov Distribution". + # Except that the values below are not from the published table, but + # were generated using an independent SageMath implementation of + # Durbin's algorithm (with the exponentiation and scaling of + # Marsaglia/Tsang/Wang's version) using 500 bit arithmetic. + # Some of the values in the published table have relative + # errors greater than 1e-4. + ns = [10, 50, 100, 200, 500, 1000] + ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3]) + expected = np.array([ + [1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01, + 9.97685592e-01, 9.99999942e-01], + [2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01, + 9.96177701e-01, 9.99998662e-01], + [1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01, + 9.95866877e-01, 9.99998240e-01], + [4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01, + 9.95661824e-01, 9.99997964e-01], + [2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01, + 9.95491207e-01, 9.99997750e-01], + [1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01, + 9.95409545e-01, 9.99997657e-01] + ]) + for idx, n in enumerate(ns): + x = ratios * np.log(2) * np.sqrt(np.pi/2/n) + vals_cdf = stats.kstwo.cdf(x, n) + assert_allclose(vals_cdf, expected[idx], rtol=1e-5) + + +class TestZipf: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.zipf.rvs(1.5, size=(2, 50)) + assert_(numpy.all(vals >= 1)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.zipf.rvs(1.5) + assert_(isinstance(val, int)) + val = stats.zipf(1.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + + def test_moments(self): + # n-th moment is finite iff a > n + 1 + m, v = stats.zipf.stats(a=2.8) + assert_(np.isfinite(m)) + assert_equal(v, np.inf) + + s, k = stats.zipf.stats(a=4.8, moments='sk') + assert_(not np.isfinite([s, k]).all()) + + +class TestDLaplace: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + vals = stats.dlaplace.rvs(1.5, size=(2, 50)) + assert_(numpy.shape(vals) == (2, 50)) + assert_(vals.dtype.char in typecodes['AllInteger']) + val = stats.dlaplace.rvs(1.5) + assert_(isinstance(val, int)) + val = stats.dlaplace(1.5).rvs(3) + assert_(isinstance(val, numpy.ndarray)) + assert_(val.dtype.char in typecodes['AllInteger']) + assert_(stats.dlaplace.rvs(0.8) is not None) + + def test_stats(self): + # compare the explicit formulas w/ direct summation using pmf + a = 1. + dl = stats.dlaplace(a) + m, v, s, k = dl.stats('mvsk') + + N = 37 + xx = np.arange(-N, N+1) + pp = dl.pmf(xx) + m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4) + assert_equal((m, s), (0, 0)) + assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8) + + def test_stats2(self): + a = np.log(2.) + dl = stats.dlaplace(a) + m, v, s, k = dl.stats('mvsk') + assert_equal((m, s), (0., 0.)) + assert_allclose((v, k), (4., 3.25)) + + +class TestInvgauss: + def setup_method(self): + np.random.seed(1234) + + @pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale", + [(2, 0, 1), (4.635, 4.362, 6.303)]) + def test_fit(self, rvs_mu, rvs_loc, rvs_scale): + data = stats.invgauss.rvs(size=100, mu=rvs_mu, + loc=rvs_loc, scale=rvs_scale) + # Analytical MLEs are calculated with formula when `floc` is fixed + mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc) + + data = data - rvs_loc + mu_temp = np.mean(data) + scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1))) + mu_mle = mu_temp/scale_mle + + # `mu` and `scale` match analytical formula + assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15) + assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15) + assert_equal(loc, rvs_loc) + data = stats.invgauss.rvs(size=100, mu=rvs_mu, + loc=rvs_loc, scale=rvs_scale) + # fixed parameters are returned + mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1, + fscale=rvs_scale + 1) + assert_equal(rvs_scale + 1, scale) + assert_equal(rvs_loc - 1, loc) + + # shape can still be fixed with multiple names + shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0] + shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0] + shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0] + assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04 + + @pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale", + [(2, 0, 1), (6.311, 3.225, 4.520)]) + def test_fit_MLE_comp_optimizer(self, rvs_mu, rvs_loc, rvs_scale): + rng = np.random.RandomState(1234) + data = stats.invgauss.rvs(size=100, mu=rvs_mu, + loc=rvs_loc, scale=rvs_scale, random_state=rng) + + super_fit = super(type(stats.invgauss), stats.invgauss).fit + # fitting without `floc` uses superclass fit method + super_fitted = super_fit(data) + invgauss_fit = stats.invgauss.fit(data) + assert_equal(super_fitted, invgauss_fit) + + # fitting with `fmu` is uses superclass fit method + super_fitted = super_fit(data, floc=0, fmu=2) + invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2) + assert_equal(super_fitted, invgauss_fit) + + # fixed `floc` uses analytical formula and provides better fit than + # super method + _assert_less_or_close_loglike(stats.invgauss, data, floc=rvs_loc) + + # fixed `floc` not resulting in invalid data < 0 uses analytical + # formulas and provides a better fit than the super method + assert np.all((data - (rvs_loc - 1)) > 0) + _assert_less_or_close_loglike(stats.invgauss, data, floc=rvs_loc - 1) + + # fixed `floc` to an arbitrary number, 0, still provides a better fit + # than the super method + _assert_less_or_close_loglike(stats.invgauss, data, floc=0) + + # fixed `fscale` to an arbitrary number still provides a better fit + # than the super method + _assert_less_or_close_loglike(stats.invgauss, data, floc=rvs_loc, + fscale=np.random.rand(1)[0]) + + def test_fit_raise_errors(self): + assert_fit_warnings(stats.invgauss) + # FitDataError is raised when negative invalid data + with pytest.raises(FitDataError): + stats.invgauss.fit([1, 2, 3], floc=2) + + def test_cdf_sf(self): + # Regression tests for gh-13614. + # Ground truth from R's statmod library (pinvgauss), e.g. + # library(statmod) + # options(digits=15) + # mu = c(4.17022005e-04, 7.20324493e-03, 1.14374817e-06, + # 3.02332573e-03, 1.46755891e-03) + # print(pinvgauss(5, mu, 1)) + + # make sure a finite value is returned when mu is very small. see + # GH-13614 + mu = [4.17022005e-04, 7.20324493e-03, 1.14374817e-06, + 3.02332573e-03, 1.46755891e-03] + expected = [1, 1, 1, 1, 1] + actual = stats.invgauss.cdf(0.4, mu=mu) + assert_equal(expected, actual) + + # test if the function can distinguish small left/right tail + # probabilities from zero. + cdf_actual = stats.invgauss.cdf(0.001, mu=1.05) + assert_allclose(cdf_actual, 4.65246506892667e-219) + sf_actual = stats.invgauss.sf(110, mu=1.05) + assert_allclose(sf_actual, 4.12851625944048e-25) + + # test if x does not cause numerical issues when mu is very small + # and x is close to mu in value. + + # slightly smaller than mu + actual = stats.invgauss.cdf(0.00009, 0.0001) + assert_allclose(actual, 2.9458022894924e-26) + + # slightly bigger than mu + actual = stats.invgauss.cdf(0.000102, 0.0001) + assert_allclose(actual, 0.976445540507925) + + def test_logcdf_logsf(self): + # Regression tests for improvements made in gh-13616. + # Ground truth from R's statmod library (pinvgauss), e.g. + # library(statmod) + # options(digits=15) + # print(pinvgauss(0.001, 1.05, 1, log.p=TRUE, lower.tail=FALSE)) + + # test if logcdf and logsf can compute values too small to + # be represented on the unlogged scale. See: gh-13616 + logcdf = stats.invgauss.logcdf(0.0001, mu=1.05) + assert_allclose(logcdf, -5003.87872590367) + logcdf = stats.invgauss.logcdf(110, 1.05) + assert_allclose(logcdf, -4.12851625944087e-25) + logsf = stats.invgauss.logsf(0.001, mu=1.05) + assert_allclose(logsf, -4.65246506892676e-219) + logsf = stats.invgauss.logsf(110, 1.05) + assert_allclose(logsf, -56.1467092416426) + + # from mpmath import mp + # mp.dps = 100 + # mu = mp.mpf(1e-2) + # ref = (1/2 * mp.log(2 * mp.pi * mp.e * mu**3) + # - 3/2* mp.exp(2/mu) * mp.e1(2/mu)) + @pytest.mark.parametrize("mu, ref", [(2e-8, -25.172361826883957), + (1e-3, -8.943444010642972), + (1e-2, -5.4962796152622335), + (1e8, 3.3244822568873476), + (1e100, 3.32448280139689)]) + def test_entropy(self, mu, ref): + assert_allclose(stats.invgauss.entropy(mu), ref, rtol=5e-14) + + +class TestLaplace: + @pytest.mark.parametrize("rvs_loc", [-5, 0, 1, 2]) + @pytest.mark.parametrize("rvs_scale", [1, 2, 3, 10]) + def test_fit(self, rvs_loc, rvs_scale): + # tests that various inputs follow expected behavior + # for a variety of `loc` and `scale`. + rng = np.random.RandomState(1234) + data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale, + random_state=rng) + + # MLE estimates are given by + loc_mle = np.median(data) + scale_mle = np.sum(np.abs(data - loc_mle)) / len(data) + + # standard outputs should match analytical MLE formulas + loc, scale = stats.laplace.fit(data) + assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15) + assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15) + + # fixed parameter should use analytical formula for other + loc, scale = stats.laplace.fit(data, floc=loc_mle) + assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15) + loc, scale = stats.laplace.fit(data, fscale=scale_mle) + assert_allclose(loc, loc_mle) + + # test with non-mle fixed parameter + # create scale with non-median loc + loc = rvs_loc * 2 + scale_mle = np.sum(np.abs(data - loc)) / len(data) + + # fixed loc to non median, scale should match + # scale calculation with modified loc + loc, scale = stats.laplace.fit(data, floc=loc) + assert_equal(scale_mle, scale) + + # fixed scale created with non median loc, + # loc output should still be the data median. + loc, scale = stats.laplace.fit(data, fscale=scale_mle) + assert_equal(loc_mle, loc) + + # error raised when both `floc` and `fscale` are fixed + assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle, + fscale=scale_mle) + + # error is raised with non-finite values + assert_raises(ValueError, stats.laplace.fit, [np.nan]) + assert_raises(ValueError, stats.laplace.fit, [np.inf]) + + @pytest.mark.parametrize("rvs_loc,rvs_scale", [(-5, 10), + (10, 5), + (0.5, 0.2)]) + def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale): + rng = np.random.RandomState(1234) + data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale, + random_state=rng) + + # the log-likelihood function for laplace is given by + def ll(loc, scale, data): + return -1 * (- (len(data)) * np.log(2*scale) - + (1/scale)*np.sum(np.abs(data - loc))) + + # test that the objective function result of the analytical MLEs is + # less than or equal to that of the numerically optimized estimate + loc, scale = stats.laplace.fit(data) + loc_opt, scale_opt = super(type(stats.laplace), + stats.laplace).fit(data) + ll_mle = ll(loc, scale, data) + ll_opt = ll(loc_opt, scale_opt, data) + assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt, + atol=1e-15, rtol=1e-15) + + def test_fit_simple_non_random_data(self): + data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0]) + # with `floc` fixed to 6, scale should be 4. + loc, scale = stats.laplace.fit(data, floc=6) + assert_allclose(scale, 4, atol=1e-15, rtol=1e-15) + # with `fscale` fixed to 6, loc should be 4. + loc, scale = stats.laplace.fit(data, fscale=6) + assert_allclose(loc, 4, atol=1e-15, rtol=1e-15) + + def test_sf_cdf_extremes(self): + # These calculations should not generate warnings. + x = 1000 + p0 = stats.laplace.cdf(-x) + # The exact value is smaller than can be represented with + # 64 bit floating point, so the expected result is 0. + assert p0 == 0.0 + # The closest 64 bit floating point representation of the + # exact value is 1.0. + p1 = stats.laplace.cdf(x) + assert p1 == 1.0 + + p0 = stats.laplace.sf(x) + # The exact value is smaller than can be represented with + # 64 bit floating point, so the expected result is 0. + assert p0 == 0.0 + # The closest 64 bit floating point representation of the + # exact value is 1.0. + p1 = stats.laplace.sf(-x) + assert p1 == 1.0 + + def test_sf(self): + x = 200 + p = stats.laplace.sf(x) + assert_allclose(p, np.exp(-x)/2, rtol=1e-13) + + def test_isf(self): + p = 1e-25 + x = stats.laplace.isf(p) + assert_allclose(x, -np.log(2*p), rtol=1e-13) + + +class TestLogLaplace: + + def test_sf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 100; LogLaplace(c=c).sf(x). + c = np.array([2.0, 3.0, 5.0]) + x = np.array([1e-5, 1e10, 1e15]) + ref = [0.99999999995, 5e-31, 5e-76] + assert_allclose(stats.loglaplace.sf(x, c), ref, rtol=1e-15) + + def test_isf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 100; LogLaplace(c=c).isf(q). + c = 3.25 + q = [0.8, 0.1, 1e-10, 1e-20, 1e-40] + ref = [0.7543222539245642, 1.6408455124660906, 964.4916294395846, + 1151387.578354072, 1640845512466.0906] + assert_allclose(stats.loglaplace.isf(q, c), ref, rtol=1e-14) + + @pytest.mark.parametrize('r', [1, 2, 3, 4]) + def test_moments_stats(self, r): + mom = 'mvsk'[r - 1] + c = np.arange(0.5, r + 0.5, 0.5) + + # r-th non-central moment is infinite if |r| >= c. + assert_allclose(stats.loglaplace.moment(r, c), np.inf) + + # r-th non-central moment is non-finite (inf or nan) if r >= c. + assert not np.any(np.isfinite(stats.loglaplace.stats(c, moments=mom))) + + @pytest.mark.parametrize("c", [0.5, 1.0, 2.0]) + @pytest.mark.parametrize("loc, scale", [(-1.2, 3.45)]) + @pytest.mark.parametrize("fix_c", [True, False]) + @pytest.mark.parametrize("fix_scale", [True, False]) + def test_fit_analytic_mle(self, c, loc, scale, fix_c, fix_scale): + # Test that the analytical MLE produces no worse result than the + # generic (numerical) MLE. + + rng = np.random.default_rng(6762668991392531563) + data = stats.loglaplace.rvs(c, loc=loc, scale=scale, size=100, + random_state=rng) + + kwds = {'floc': loc} + if fix_c: + kwds['fc'] = c + if fix_scale: + kwds['fscale'] = scale + nfree = 3 - len(kwds) + + if nfree == 0: + error_msg = "All parameters fixed. There is nothing to optimize." + with pytest.raises((RuntimeError, ValueError), match=error_msg): + stats.loglaplace.fit(data, **kwds) + return + + _assert_less_or_close_loglike(stats.loglaplace, data, **kwds) + +class TestPowerlaw: + + # In the following data, `sf` was computed with mpmath. + @pytest.mark.parametrize('x, a, sf', + [(0.25, 2.0, 0.9375), + (0.99609375, 1/256, 1.528855235208108e-05)]) + def test_sf(self, x, a, sf): + assert_allclose(stats.powerlaw.sf(x, a), sf, rtol=1e-15) + + @pytest.fixture(scope='function') + def rng(self): + return np.random.default_rng(1234) + + @pytest.mark.parametrize("rvs_shape", [.1, .5, .75, 1, 2]) + @pytest.mark.parametrize("rvs_loc", [-1, 0, 1]) + @pytest.mark.parametrize("rvs_scale", [.1, 1, 5]) + @pytest.mark.parametrize('fix_shape, fix_loc, fix_scale', + [p for p in product([True, False], repeat=3) + if False in p]) + def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale, + fix_shape, fix_loc, fix_scale, rng): + data = stats.powerlaw.rvs(size=250, a=rvs_shape, loc=rvs_loc, + scale=rvs_scale, random_state=rng) + + kwds = dict() + if fix_shape: + kwds['f0'] = rvs_shape + if fix_loc: + kwds['floc'] = np.nextafter(data.min(), -np.inf) + if fix_scale: + kwds['fscale'] = rvs_scale + + # Numerical result may equal analytical result if some code path + # of the analytical routine makes use of numerical optimization. + _assert_less_or_close_loglike(stats.powerlaw, data, **kwds, + maybe_identical=True) + + def test_problem_case(self): + # An observed problem with the test method indicated that some fixed + # scale values could cause bad results, this is now corrected. + a = 2.50002862645130604506 + location = 0.0 + scale = 35.249023299873095 + + data = stats.powerlaw.rvs(a=a, loc=location, scale=scale, size=100, + random_state=np.random.default_rng(5)) + + kwds = {'fscale': np.ptp(data) * 2} + + _assert_less_or_close_loglike(stats.powerlaw, data, **kwds) + + def test_fit_warnings(self): + assert_fit_warnings(stats.powerlaw) + # test for error when `fscale + floc <= np.max(data)` is not satisfied + msg = r" Maximum likelihood estimation with 'powerlaw' requires" + with assert_raises(FitDataError, match=msg): + stats.powerlaw.fit([1, 2, 4], floc=0, fscale=3) + + # test for error when `data - floc >= 0` is not satisfied + msg = r" Maximum likelihood estimation with 'powerlaw' requires" + with assert_raises(FitDataError, match=msg): + stats.powerlaw.fit([1, 2, 4], floc=2) + + # test for fixed location not less than `min(data)`. + msg = r" Maximum likelihood estimation with 'powerlaw' requires" + with assert_raises(FitDataError, match=msg): + stats.powerlaw.fit([1, 2, 4], floc=1) + + # test for when fixed scale is less than or equal to range of data + msg = r"Negative or zero `fscale` is outside" + with assert_raises(ValueError, match=msg): + stats.powerlaw.fit([1, 2, 4], fscale=-3) + + # test for when fixed scale is less than or equal to range of data + msg = r"`fscale` must be greater than the range of data." + with assert_raises(ValueError, match=msg): + stats.powerlaw.fit([1, 2, 4], fscale=3) + + def test_minimum_data_zero_gh17801(self): + # gh-17801 reported an overflow error when the minimum value of the + # data is zero. Check that this problem is resolved. + data = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6] + dist = stats.powerlaw + with np.errstate(over='ignore'): + _assert_less_or_close_loglike(dist, data) + + +class TestPowerLogNorm: + + # reference values were computed via mpmath + # from mpmath import mp + # mp.dps = 80 + # def powerlognorm_sf_mp(x, c, s): + # x = mp.mpf(x) + # c = mp.mpf(c) + # s = mp.mpf(s) + # return mp.ncdf(-mp.log(x) / s)**c + # + # def powerlognormal_cdf_mp(x, c, s): + # return mp.one - powerlognorm_sf_mp(x, c, s) + # + # x, c, s = 100, 20, 1 + # print(float(powerlognorm_sf_mp(x, c, s))) + + @pytest.mark.parametrize("x, c, s, ref", + [(100, 20, 1, 1.9057100820561928e-114), + (1e-3, 20, 1, 0.9999999999507617), + (1e-3, 0.02, 1, 0.9999999999999508), + (1e22, 0.02, 1, 6.50744044621611e-12)]) + def test_sf(self, x, c, s, ref): + assert_allclose(stats.powerlognorm.sf(x, c, s), ref, rtol=1e-13) + + # reference values were computed via mpmath using the survival + # function above (passing in `ref` and getting `q`). + @pytest.mark.parametrize("q, c, s, ref", + [(0.9999999587870905, 0.02, 1, 0.01), + (6.690376686108851e-233, 20, 1, 1000)]) + def test_isf(self, q, c, s, ref): + assert_allclose(stats.powerlognorm.isf(q, c, s), ref, rtol=5e-11) + + @pytest.mark.parametrize("x, c, s, ref", + [(1e25, 0.02, 1, 0.9999999999999963), + (1e-6, 0.02, 1, 2.054921078040843e-45), + (1e-6, 200, 1, 2.0549210780408428e-41), + (0.3, 200, 1, 0.9999999999713368)]) + def test_cdf(self, x, c, s, ref): + assert_allclose(stats.powerlognorm.cdf(x, c, s), ref, rtol=3e-14) + + # reference values were computed via mpmath + # from mpmath import mp + # mp.dps = 50 + # def powerlognorm_pdf_mpmath(x, c, s): + # x = mp.mpf(x) + # c = mp.mpf(c) + # s = mp.mpf(s) + # res = (c/(x * s) * mp.npdf(mp.log(x)/s) * + # mp.ncdf(-mp.log(x)/s)**(c - mp.one)) + # return float(res) + + @pytest.mark.parametrize("x, c, s, ref", + [(1e22, 0.02, 1, 6.5954987852335016e-34), + (1e20, 1e-3, 1, 1.588073750563988e-22), + (1e40, 1e-3, 1, 1.3179391812506349e-43)]) + def test_pdf(self, x, c, s, ref): + assert_allclose(stats.powerlognorm.pdf(x, c, s), ref, rtol=3e-12) + + +class TestPowerNorm: + + # survival function references were computed with mpmath via + # from mpmath import mp + # x = mp.mpf(x) + # c = mp.mpf(x) + # float(mp.ncdf(-x)**c) + + @pytest.mark.parametrize("x, c, ref", + [(9, 1, 1.1285884059538405e-19), + (20, 2, 7.582445786569958e-178), + (100, 0.02, 3.330957891903866e-44), + (200, 0.01, 1.3004759092324774e-87)]) + def test_sf(self, x, c, ref): + assert_allclose(stats.powernorm.sf(x, c), ref, rtol=1e-13) + + # inverse survival function references were computed with mpmath via + # from mpmath import mp + # def isf_mp(q, c): + # q = mp.mpf(q) + # c = mp.mpf(c) + # arg = q**(mp.one / c) + # return float(-mp.sqrt(2) * mp.erfinv(mp.mpf(2.) * arg - mp.one)) + + @pytest.mark.parametrize("q, c, ref", + [(1e-5, 20, -0.15690800666514138), + (0.99999, 100, -5.19933666203545), + (0.9999, 0.02, -2.576676052143387), + (5e-2, 0.02, 17.089518110222244), + (1e-18, 2, 5.9978070150076865), + (1e-50, 5, 6.361340902404057)]) + def test_isf(self, q, c, ref): + assert_allclose(stats.powernorm.isf(q, c), ref, rtol=5e-12) + + # CDF reference values were computed with mpmath via + # from mpmath import mp + # def cdf_mp(x, c): + # x = mp.mpf(x) + # c = mp.mpf(c) + # return float(mp.one - mp.ncdf(-x)**c) + + @pytest.mark.parametrize("x, c, ref", + [(-12, 9, 1.598833900869911e-32), + (2, 9, 0.9999999999999983), + (-20, 9, 2.4782617067456103e-88), + (-5, 0.02, 5.733032242841443e-09), + (-20, 0.02, 5.507248237212467e-91)]) + def test_cdf(self, x, c, ref): + assert_allclose(stats.powernorm.cdf(x, c), ref, rtol=5e-14) + + +class TestInvGamma: + def test_invgamma_inf_gh_1866(self): + # invgamma's moments are only finite for a>n + # specific numbers checked w/ boost 1.54 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + mvsk = stats.invgamma.stats(a=19.31, moments='mvsk') + expected = [0.05461496450, 0.0001723162534, 1.020362676, + 2.055616582] + assert_allclose(mvsk, expected) + + a = [1.1, 3.1, 5.6] + mvsk = stats.invgamma.stats(a=a, moments='mvsk') + expected = ([10., 0.476190476, 0.2173913043], # mmm + [np.inf, 0.2061430632, 0.01312749422], # vvv + [np.nan, 41.95235392, 2.919025532], # sss + [np.nan, np.nan, 24.51923076]) # kkk + for x, y in zip(mvsk, expected): + assert_almost_equal(x, y) + + def test_cdf_ppf(self): + # gh-6245 + x = np.logspace(-2.6, 0) + y = stats.invgamma.cdf(x, 1) + xx = stats.invgamma.ppf(y, 1) + assert_allclose(x, xx) + + def test_sf_isf(self): + # gh-6245 + if sys.maxsize > 2**32: + x = np.logspace(2, 100) + else: + # Invgamme roundtrip on 32-bit systems has relative accuracy + # ~1e-15 until x=1e+15, and becomes inf above x=1e+18 + x = np.logspace(2, 18) + + y = stats.invgamma.sf(x, 1) + xx = stats.invgamma.isf(y, 1) + assert_allclose(x, xx, rtol=1.0) + + @pytest.mark.parametrize("a, ref", + [(100000000.0, -26.21208257605721), + (1e+100, -343.9688254159022)]) + def test_large_entropy(self, a, ref): + # The reference values were calculated with mpmath: + # from mpmath import mp + # mp.dps = 500 + + # def invgamma_entropy(a): + # a = mp.mpf(a) + # h = a + mp.loggamma(a) - (mp.one + a) * mp.digamma(a) + # return float(h) + assert_allclose(stats.invgamma.entropy(a), ref, rtol=1e-15) + + +class TestF: + def test_endpoints(self): + # Compute the pdf at the left endpoint dst.a. + data = [[stats.f, (2, 1), 1.0]] + for _f, _args, _correct in data: + ans = _f.pdf(_f.a, *_args) + + ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data] + correct = [_correct_ for _f, _args, _correct_ in data] + assert_array_almost_equal(ans, correct) + + def test_f_moments(self): + # n-th moment of F distributions is only finite for n < dfd / 2 + m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk') + assert_(np.isfinite(m)) + assert_(np.isfinite(v)) + assert_(np.isfinite(s)) + assert_(not np.isfinite(k)) + + def test_moments_warnings(self): + # no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero) + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk') + + def test_stats_broadcast(self): + dfn = np.array([[3], [11]]) + dfd = np.array([11, 12]) + m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk') + m2 = [dfd / (dfd - 2)]*2 + assert_allclose(m, m2) + v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4) + assert_allclose(v, v2) + s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) / + ((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2)))) + assert_allclose(s, s2) + k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) + + (dfd - 4) * (dfd - 2)**2) + k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2) + k2 = k2num / k2den + assert_allclose(k, k2) + + +class TestStudentT: + def test_rvgeneric_std(self): + # Regression test for #1191 + assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487]) + + def test_moments_t(self): + # regression test for #8786 + assert_equal(stats.t.stats(df=1, moments='mvsk'), + (np.inf, np.nan, np.nan, np.nan)) + assert_equal(stats.t.stats(df=1.01, moments='mvsk'), + (0.0, np.inf, np.nan, np.nan)) + assert_equal(stats.t.stats(df=2, moments='mvsk'), + (0.0, np.inf, np.nan, np.nan)) + assert_equal(stats.t.stats(df=2.01, moments='mvsk'), + (0.0, 2.01/(2.01-2.0), np.nan, np.inf)) + assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf)) + assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf)) + assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf)) + assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0))) + + def test_t_entropy(self): + df = [1, 2, 25, 100] + # Expected values were computed with mpmath. + expected = [2.5310242469692907, 1.9602792291600821, + 1.459327578078393, 1.4289633653182439] + assert_allclose(stats.t.entropy(df), expected, rtol=1e-13) + + @pytest.mark.parametrize("v, ref", + [(100, 1.4289633653182439), + (1e+100, 1.4189385332046727)]) + def test_t_extreme_entropy(self, v, ref): + # Reference values were calculated with mpmath: + # from mpmath import mp + # mp.dps = 500 + # + # def t_entropy(v): + # v = mp.mpf(v) + # C = (v + mp.one) / 2 + # A = C * (mp.digamma(C) - mp.digamma(v / 2)) + # B = 0.5 * mp.log(v) + mp.log(mp.beta(v / 2, mp.one / 2)) + # h = A + B + # return float(h) + assert_allclose(stats.t.entropy(v), ref, rtol=1e-14) + + @pytest.mark.parametrize("methname", ["pdf", "logpdf", "cdf", + "ppf", "sf", "isf"]) + @pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1], + [[0, 1, 0], [1, 1, 1]], + [[1, 0], [0, 1]], + [[0], [1]]]) + def test_t_inf_df(self, methname, df_infmask): + np.random.seed(0) + df_infmask = np.asarray(df_infmask, dtype=bool) + df = np.random.uniform(0, 10, size=df_infmask.shape) + x = np.random.randn(*df_infmask.shape) + df[df_infmask] = np.inf + t_dist = stats.t(df=df, loc=3, scale=1) + t_dist_ref = stats.t(df=df[~df_infmask], loc=3, scale=1) + norm_dist = stats.norm(loc=3, scale=1) + t_meth = getattr(t_dist, methname) + t_meth_ref = getattr(t_dist_ref, methname) + norm_meth = getattr(norm_dist, methname) + res = t_meth(x) + assert_equal(res[df_infmask], norm_meth(x[df_infmask])) + assert_equal(res[~df_infmask], t_meth_ref(x[~df_infmask])) + + @pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1], + [[0, 1, 0], [1, 1, 1]], + [[1, 0], [0, 1]], + [[0], [1]]]) + def test_t_inf_df_stats_entropy(self, df_infmask): + np.random.seed(0) + df_infmask = np.asarray(df_infmask, dtype=bool) + df = np.random.uniform(0, 10, size=df_infmask.shape) + df[df_infmask] = np.inf + res = stats.t.stats(df=df, loc=3, scale=1, moments='mvsk') + res_ex_inf = stats.norm.stats(loc=3, scale=1, moments='mvsk') + res_ex_noinf = stats.t.stats(df=df[~df_infmask], loc=3, scale=1, + moments='mvsk') + for i in range(4): + assert_equal(res[i][df_infmask], res_ex_inf[i]) + assert_equal(res[i][~df_infmask], res_ex_noinf[i]) + + res = stats.t.entropy(df=df, loc=3, scale=1) + res_ex_inf = stats.norm.entropy(loc=3, scale=1) + res_ex_noinf = stats.t.entropy(df=df[~df_infmask], loc=3, scale=1) + assert_equal(res[df_infmask], res_ex_inf) + assert_equal(res[~df_infmask], res_ex_noinf) + + def test_logpdf_pdf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 500; StudentT(df=df).logpdf(x), StudentT(df=df).pdf(x) + x = [1, 1e3, 10, 1] + df = [1e100, 1e50, 1e20, 1] + logpdf_ref = [-1.4189385332046727, -500000.9189385332, + -50.918938533204674, -1.8378770664093456] + pdf_ref = [0.24197072451914334, 0, + 7.69459862670642e-23, 0.15915494309189535] + assert_allclose(stats.t.logpdf(x, df), logpdf_ref, rtol=1e-14) + assert_allclose(stats.t.pdf(x, df), pdf_ref, rtol=1e-14) + + +class TestRvDiscrete: + def setup_method(self): + np.random.seed(1234) + + def test_rvs(self): + states = [-1, 0, 1, 2, 3, 4] + probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0] + samples = 1000 + r = stats.rv_discrete(name='sample', values=(states, probability)) + x = r.rvs(size=samples) + assert_(isinstance(x, numpy.ndarray)) + + for s, p in zip(states, probability): + assert_(abs(sum(x == s)/float(samples) - p) < 0.05) + + x = r.rvs() + assert np.issubdtype(type(x), np.integer) + + def test_entropy(self): + # Basic tests of entropy. + pvals = np.array([0.25, 0.45, 0.3]) + p = stats.rv_discrete(values=([0, 1, 2], pvals)) + expected_h = -sum(xlogy(pvals, pvals)) + h = p.entropy() + assert_allclose(h, expected_h) + + p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0])) + h = p.entropy() + assert_equal(h, 0.0) + + def test_pmf(self): + xk = [1, 2, 4] + pk = [0.5, 0.3, 0.2] + rv = stats.rv_discrete(values=(xk, pk)) + + x = [[1., 4.], + [3., 2]] + assert_allclose(rv.pmf(x), + [[0.5, 0.2], + [0., 0.3]], atol=1e-14) + + def test_cdf(self): + xk = [1, 2, 4] + pk = [0.5, 0.3, 0.2] + rv = stats.rv_discrete(values=(xk, pk)) + + x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5] + expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1] + assert_allclose(rv.cdf(x_values), expected, atol=1e-14) + + # also check scalar arguments + assert_allclose([rv.cdf(xx) for xx in x_values], + expected, atol=1e-14) + + def test_ppf(self): + xk = [1, 2, 4] + pk = [0.5, 0.3, 0.2] + rv = stats.rv_discrete(values=(xk, pk)) + + q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.] + expected = [1, 1, 2, 2, 4, 4] + assert_allclose(rv.ppf(q_values), expected, atol=1e-14) + + # also check scalar arguments + assert_allclose([rv.ppf(q) for q in q_values], + expected, atol=1e-14) + + def test_cdf_ppf_next(self): + # copied and special cased from test_discrete_basic + vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1]) + rv = stats.rv_discrete(values=vals) + + assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8), + rv.xk[1:]) + + def test_multidimension(self): + xk = np.arange(12).reshape((3, 4)) + pk = np.array([[0.1, 0.1, 0.15, 0.05], + [0.1, 0.1, 0.05, 0.05], + [0.1, 0.1, 0.05, 0.05]]) + rv = stats.rv_discrete(values=(xk, pk)) + + assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14) + + def test_bad_input(self): + xk = [1, 2, 3] + pk = [0.5, 0.5] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + pk = [1, 2, 3] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + xk = [1, 2, 3] + pk = [0.5, 1.2, -0.7] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + xk = [1, 2, 3, 4, 5] + pk = [0.3, 0.3, 0.3, 0.3, -0.2] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + xk = [1, 1] + pk = [0.5, 0.5] + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + def test_shape_rv_sample(self): + # tests added for gh-9565 + + # mismatch of 2d inputs + xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6) + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + # same number of elements, but shapes not compatible + xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6) + assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) + + # same shapes => no error + xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6) + assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6) + + def test_expect1(self): + xk = [1, 2, 4, 6, 7, 11] + pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1] + rv = stats.rv_discrete(values=(xk, pk)) + + assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14) + + def test_expect2(self): + # rv_sample should override _expect. Bug report from + # https://stackoverflow.com/questions/63199792 + y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, + 1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, + 1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0, + 2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0, + 3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0, + 4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0] + + py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0, + 0.004399999999999999, 0.6862, 0.0, 0.0, 0.0, + 0.00019999999999997797, 0.0006000000000000449, + 0.024499999999999966, 0.006400000000000072, + 0.0043999999999999595, 0.019499999999999962, + 0.03770000000000007, 0.01759999999999995, 0.015199999999999991, + 0.018100000000000005, 0.04500000000000004, 0.0025999999999999357, + 0.0, 0.0041000000000001036, 0.005999999999999894, + 0.0042000000000000925, 0.0050000000000000044, + 0.0041999999999999815, 0.0004999999999999449, + 0.009199999999999986, 0.008200000000000096, + 0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128, + 0.0006000000000000449, 0.02510000000000001, 0.0, + 0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0, + 0.008199999999999985, 0.005600000000000049, 0.0] + + rv = stats.rv_discrete(values=(y, py)) + + # check the mean + assert_allclose(rv.expect(), rv.mean(), atol=1e-14) + assert_allclose(rv.expect(), + sum(v * w for v, w in zip(y, py)), atol=1e-14) + + # also check the second moment + assert_allclose(rv.expect(lambda x: x**2), + sum(v**2 * w for v, w in zip(y, py)), atol=1e-14) + + +class TestSkewCauchy: + def test_cauchy(self): + x = np.linspace(-5, 5, 100) + assert_array_almost_equal(stats.skewcauchy.pdf(x, a=0), + stats.cauchy.pdf(x)) + assert_array_almost_equal(stats.skewcauchy.cdf(x, a=0), + stats.cauchy.cdf(x)) + assert_array_almost_equal(stats.skewcauchy.ppf(x, a=0), + stats.cauchy.ppf(x)) + + def test_skewcauchy_R(self): + # options(digits=16) + # library(sgt) + # # lmbda, x contain the values generated for a, x below + # lmbda <- c(0.0976270078546495, 0.430378732744839, 0.2055267521432877, + # 0.0897663659937937, -0.15269040132219, 0.2917882261333122, + # -0.12482557747462, 0.7835460015641595, 0.9273255210020589, + # -0.2331169623484446) + # x <- c(2.917250380826646, 0.2889491975290444, 0.6804456109393229, + # 4.25596638292661, -4.289639418021131, -4.1287070029845925, + # -4.797816025596743, 3.32619845547938, 2.7815675094985046, + # 3.700121482468191) + # pdf = dsgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE, + # var.adj = sqrt(2)) + # cdf = psgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE, + # var.adj = sqrt(2)) + # qsgt(cdf, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE, + # var.adj = sqrt(2)) + + np.random.seed(0) + a = np.random.rand(10) * 2 - 1 + x = np.random.rand(10) * 10 - 5 + pdf = [0.039473975217333909, 0.305829714049903223, 0.24140158118994162, + 0.019585772402693054, 0.021436553695989482, 0.00909817103867518, + 0.01658423410016873, 0.071083288030394126, 0.103250045941454524, + 0.013110230778426242] + cdf = [0.87426677718213752, 0.37556468910780882, 0.59442096496538066, + 0.91304659850890202, 0.09631964100300605, 0.03829624330921733, + 0.08245240578402535, 0.72057062945510386, 0.62826415852515449, + 0.95011308463898292] + assert_allclose(stats.skewcauchy.pdf(x, a), pdf) + assert_allclose(stats.skewcauchy.cdf(x, a), cdf) + assert_allclose(stats.skewcauchy.ppf(cdf, a), x) + + +class TestJFSkewT: + def test_compare_t(self): + # Verify that jf_skew_t with a=b recovers the t distribution with 2a + # degrees of freedom + a = b = 5 + df = a * 2 + x = [-1.0, 0.0, 1.0, 2.0] + q = [0.0, 0.1, 0.25, 0.75, 0.90, 1.0] + + jf = stats.jf_skew_t(a, b) + t = stats.t(df) + + assert_allclose(jf.pdf(x), t.pdf(x)) + assert_allclose(jf.cdf(x), t.cdf(x)) + assert_allclose(jf.ppf(q), t.ppf(q)) + assert_allclose(jf.stats('mvsk'), t.stats('mvsk')) + + @pytest.fixture + def gamlss_pdf_data(self): + """Sample data points computed using the `ST5` distribution from the + GAMLSS package in R. The pdf has been calculated for (a,b)=(2,3), + (a,b)=(8,4), and (a,b)=(12,13) for x in `np.linspace(-10, 10, 41)`. + + N.B. the `ST5` distribution in R uses an alternative parameterization + in terms of nu and tau, where: + - nu = (a - b) / (a * b * (a + b)) ** 0.5 + - tau = 2 / (a + b) + """ + data = np.load( + Path(__file__).parent / "data/jf_skew_t_gamlss_pdf_data.npy" + ) + return np.rec.fromarrays(data, names="x,pdf,a,b") + + @pytest.mark.parametrize("a,b", [(2, 3), (8, 4), (12, 13)]) + def test_compare_with_gamlss_r(self, gamlss_pdf_data, a, b): + """Compare the pdf with a table of reference values. The table of + reference values was produced using R, where the Jones and Faddy skew + t distribution is available in the GAMLSS package as `ST5`. + """ + data = gamlss_pdf_data[ + (gamlss_pdf_data["a"] == a) & (gamlss_pdf_data["b"] == b) + ] + x, pdf = data["x"], data["pdf"] + assert_allclose(pdf, stats.jf_skew_t(a, b).pdf(x), rtol=1e-12) + +# Test data for TestSkewNorm.test_noncentral_moments() +# The expected noncentral moments were computed by Wolfram Alpha. +# In Wolfram Alpha, enter +# SkewNormalDistribution[0, 1, a] moment +# with `a` replaced by the desired shape parameter. In the results, there +# should be a table of the first four moments. Click on "More" to get more +# moments. The expected moments start with the first moment (order = 1). +_skewnorm_noncentral_moments = [ + (2, [2*np.sqrt(2/(5*np.pi)), + 1, + 22/5*np.sqrt(2/(5*np.pi)), + 3, + 446/25*np.sqrt(2/(5*np.pi)), + 15, + 2682/25*np.sqrt(2/(5*np.pi)), + 105, + 107322/125*np.sqrt(2/(5*np.pi))]), + (0.1, [np.sqrt(2/(101*np.pi)), + 1, + 302/101*np.sqrt(2/(101*np.pi)), + 3, + (152008*np.sqrt(2/(101*np.pi)))/10201, + 15, + (107116848*np.sqrt(2/(101*np.pi)))/1030301, + 105, + (97050413184*np.sqrt(2/(101*np.pi)))/104060401]), + (-3, [-3/np.sqrt(5*np.pi), + 1, + -63/(10*np.sqrt(5*np.pi)), + 3, + -2529/(100*np.sqrt(5*np.pi)), + 15, + -30357/(200*np.sqrt(5*np.pi)), + 105, + -2428623/(2000*np.sqrt(5*np.pi)), + 945, + -242862867/(20000*np.sqrt(5*np.pi)), + 10395, + -29143550277/(200000*np.sqrt(5*np.pi)), + 135135]), +] + + +class TestSkewNorm: + def setup_method(self): + self.rng = check_random_state(1234) + + def test_normal(self): + # When the skewness is 0 the distribution is normal + x = np.linspace(-5, 5, 100) + assert_array_almost_equal(stats.skewnorm.pdf(x, a=0), + stats.norm.pdf(x)) + + def test_rvs(self): + shape = (3, 4, 5) + x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng) + assert_equal(shape, x.shape) + + x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng) + assert_equal(shape, x.shape) + + def test_moments(self): + X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2, + random_state=self.rng) + expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)] + computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk') + assert_array_almost_equal(computed, expected, decimal=2) + + X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2, + random_state=self.rng) + expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)] + computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk') + assert_array_almost_equal(computed, expected, decimal=2) + + def test_pdf_large_x(self): + # Triples are [x, a, logpdf(x, a)]. These values were computed + # using Log[PDF[SkewNormalDistribution[0, 1, a], x]] in Wolfram Alpha. + logpdfvals = [ + [40, -1, -1604.834233366398515598970], + [40, -1/2, -1004.142946723741991369168], + [40, 0, -800.9189385332046727417803], + [40, 1/2, -800.2257913526447274323631], + [-40, -1/2, -800.2257913526447274323631], + [-2, 1e7, -2.000000000000199559727173e14], + [2, -1e7, -2.000000000000199559727173e14], + ] + for x, a, logpdfval in logpdfvals: + logp = stats.skewnorm.logpdf(x, a) + assert_allclose(logp, logpdfval, rtol=1e-8) + + def test_cdf_large_x(self): + # Regression test for gh-7746. + # The x values are large enough that the closest 64 bit floating + # point representation of the exact CDF is 1.0. + p = stats.skewnorm.cdf([10, 20, 30], -1) + assert_allclose(p, np.ones(3), rtol=1e-14) + p = stats.skewnorm.cdf(25, 2.5) + assert_allclose(p, 1.0, rtol=1e-14) + + def test_cdf_sf_small_values(self): + # Triples are [x, a, cdf(x, a)]. These values were computed + # using CDF[SkewNormalDistribution[0, 1, a], x] in Wolfram Alpha. + cdfvals = [ + [-8, 1, 3.870035046664392611e-31], + [-4, 2, 8.1298399188811398e-21], + [-2, 5, 1.55326826787106273e-26], + [-9, -1, 2.257176811907681295e-19], + [-10, -4, 1.523970604832105213e-23], + ] + for x, a, cdfval in cdfvals: + p = stats.skewnorm.cdf(x, a) + assert_allclose(p, cdfval, rtol=1e-8) + # For the skew normal distribution, sf(-x, -a) = cdf(x, a). + p = stats.skewnorm.sf(-x, -a) + assert_allclose(p, cdfval, rtol=1e-8) + + @pytest.mark.parametrize('a, moments', _skewnorm_noncentral_moments) + def test_noncentral_moments(self, a, moments): + for order, expected in enumerate(moments, start=1): + mom = stats.skewnorm.moment(order, a) + assert_allclose(mom, expected, rtol=1e-14) + + def test_fit(self): + rng = np.random.default_rng(4609813989115202851) + + a, loc, scale = -2, 3.5, 0.5 # arbitrary, valid parameters + dist = stats.skewnorm(a, loc, scale) + rvs = dist.rvs(size=100, random_state=rng) + + # test that MLE still honors guesses and fixed parameters + a2, loc2, scale2 = stats.skewnorm.fit(rvs, -1.5, floc=3) + a3, loc3, scale3 = stats.skewnorm.fit(rvs, -1.6, floc=3) + assert loc2 == loc3 == 3 # fixed parameter is respected + assert a2 != a3 # different guess -> (slightly) different outcome + # quality of fit is tested elsewhere + + # test that MoM honors fixed parameters, accepts (but ignores) guesses + a4, loc4, scale4 = stats.skewnorm.fit(rvs, 3, fscale=3, method='mm') + assert scale4 == 3 + # because scale was fixed, only the mean and skewness will be matched + dist4 = stats.skewnorm(a4, loc4, scale4) + res = dist4.stats(moments='ms') + ref = np.mean(rvs), stats.skew(rvs) + assert_allclose(res, ref) + + # Test behavior when skew of data is beyond maximum of skewnorm + rvs2 = stats.pareto.rvs(1, size=100, random_state=rng) + + # MLE still works + res = stats.skewnorm.fit(rvs2) + assert np.all(np.isfinite(res)) + + # MoM fits variance and skewness + a5, loc5, scale5 = stats.skewnorm.fit(rvs2, method='mm') + assert np.isinf(a5) + # distribution infrastruction doesn't allow infinite shape parameters + # into _stats; it just bypasses it and produces NaNs. Calculate + # moments manually. + m, v = np.mean(rvs2), np.var(rvs2) + assert_allclose(m, loc5 + scale5 * np.sqrt(2/np.pi)) + assert_allclose(v, scale5**2 * (1 - 2 / np.pi)) + + # test that MLE and MoM behave as expected under sign changes + a6p, loc6p, scale6p = stats.skewnorm.fit(rvs, method='mle') + a6m, loc6m, scale6m = stats.skewnorm.fit(-rvs, method='mle') + assert_allclose([a6m, loc6m, scale6m], [-a6p, -loc6p, scale6p]) + a7p, loc7p, scale7p = stats.skewnorm.fit(rvs, method='mm') + a7m, loc7m, scale7m = stats.skewnorm.fit(-rvs, method='mm') + assert_allclose([a7m, loc7m, scale7m], [-a7p, -loc7p, scale7p]) + + def test_fit_gh19332(self): + # When the skewness of the data was high, `skewnorm.fit` fell back on + # generic `fit` behavior with a bad guess of the skewness parameter. + # Test that this is improved; `skewnorm.fit` is now better at finding + # the global optimum when the sample is highly skewed. See gh-19332. + x = np.array([-5, -1, 1 / 100_000] + 12 * [1] + [5]) + + params = stats.skewnorm.fit(x) + res = stats.skewnorm.nnlf(params, x) + + # Compare overridden fit against generic fit. + # res should be about 32.01, and generic fit is worse at 32.64. + # In case the generic fit improves, remove this assertion (see gh-19333). + params_super = stats.skewnorm.fit(x, superfit=True) + ref = stats.skewnorm.nnlf(params_super, x) + assert res < ref - 0.5 + + # Compare overridden fit against stats.fit + rng = np.random.default_rng(9842356982345693637) + bounds = {'a': (-5, 5), 'loc': (-10, 10), 'scale': (1e-16, 10)} + def optimizer(fun, bounds): + return differential_evolution(fun, bounds, seed=rng) + + fit_result = stats.fit(stats.skewnorm, x, bounds, optimizer=optimizer) + np.testing.assert_allclose(params, fit_result.params, rtol=1e-4) + + +class TestExpon: + def test_zero(self): + assert_equal(stats.expon.pdf(0), 1) + + def test_tail(self): # Regression test for ticket 807 + assert_equal(stats.expon.cdf(1e-18), 1e-18) + assert_equal(stats.expon.isf(stats.expon.sf(40)), 40) + + def test_nan_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan]) + assert_raises(ValueError, stats.expon.fit, x) + + def test_inf_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf]) + assert_raises(ValueError, stats.expon.fit, x) + + +class TestNorm: + def test_nan_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan]) + assert_raises(ValueError, stats.norm.fit, x) + + def test_inf_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf]) + assert_raises(ValueError, stats.norm.fit, x) + + def test_bad_keyword_arg(self): + x = [1, 2, 3] + assert_raises(TypeError, stats.norm.fit, x, plate="shrimp") + + @pytest.mark.parametrize('loc', [0, 1]) + def test_delta_cdf(self, loc): + # The expected value is computed with mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 60 + # >>> float(mpmath.ncdf(12) - mpmath.ncdf(11)) + # 1.910641809677555e-28 + expected = 1.910641809677555e-28 + delta = stats.norm._delta_cdf(11+loc, 12+loc, loc=loc) + assert_allclose(delta, expected, rtol=1e-13) + delta = stats.norm._delta_cdf(-(12+loc), -(11+loc), loc=-loc) + assert_allclose(delta, expected, rtol=1e-13) + + +class TestUniform: + """gh-10300""" + def test_nan_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan]) + assert_raises(ValueError, stats.uniform.fit, x) + + def test_inf_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf]) + assert_raises(ValueError, stats.uniform.fit, x) + + +class TestExponNorm: + def test_moments(self): + # Some moment test cases based on non-loc/scaled formula + def get_moms(lam, sig, mu): + # See wikipedia for these formulae + # where it is listed as an exponentially modified gaussian + opK2 = 1.0 + 1 / (lam*sig)**2 + exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5) + exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2) + return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt] + + mu, sig, lam = 0, 1, 1 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + mu, sig, lam = -3, 2, 0.1 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + mu, sig, lam = 0, 3, 1 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + mu, sig, lam = -5, 11, 3.5 + K = 1.0 / (lam * sig) + sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') + assert_almost_equal(sts, get_moms(lam, sig, mu)) + + def test_nan_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan]) + assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1) + + def test_inf_raises_error(self): + # see gh-issue 10300 + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf]) + assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1) + + def test_extremes_x(self): + # Test for extreme values against overflows + assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0) + assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0) + assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0) + assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0) + + # Expected values for the PDF were computed with mpmath, with + # the following function, and with mpmath.mp.dps = 50. + # + # def exponnorm_stdpdf(x, K): + # x = mpmath.mpf(x) + # K = mpmath.mpf(K) + # t1 = mpmath.exp(1/(2*K**2) - x/K) + # erfcarg = -(x - 1/K)/mpmath.sqrt(2) + # t2 = mpmath.erfc(erfcarg) + # return t1 * t2 / (2*K) + # + @pytest.mark.parametrize('x, K, expected', + [(20, 0.01, 6.90010764753618e-88), + (1, 0.01, 0.24438994313247364), + (-1, 0.01, 0.23955149623472075), + (-20, 0.01, 4.6004708690125477e-88), + (10, 1, 7.48518298877006e-05), + (10, 10000, 9.990005048283775e-05)]) + def test_std_pdf(self, x, K, expected): + assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=5e-12) + + # Expected values for the CDF were computed with mpmath using + # the following function and with mpmath.mp.dps = 60: + # + # def mp_exponnorm_cdf(x, K, loc=0, scale=1): + # x = mpmath.mpf(x) + # K = mpmath.mpf(K) + # loc = mpmath.mpf(loc) + # scale = mpmath.mpf(scale) + # z = (x - loc)/scale + # return (mpmath.ncdf(z) + # - mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K)) + # + @pytest.mark.parametrize('x, K, scale, expected', + [[0, 0.01, 1, 0.4960109760186432], + [-5, 0.005, 1, 2.7939945412195734e-07], + [-1e4, 0.01, 100, 0.0], + [-1e4, 0.01, 1000, 6.920401854427357e-24], + [5, 0.001, 1, 0.9999997118542392]]) + def test_cdf_small_K(self, x, K, scale, expected): + p = stats.exponnorm.cdf(x, K, scale=scale) + if expected == 0.0: + assert p == 0.0 + else: + assert_allclose(p, expected, rtol=1e-13) + + # Expected values for the SF were computed with mpmath using + # the following function and with mpmath.mp.dps = 60: + # + # def mp_exponnorm_sf(x, K, loc=0, scale=1): + # x = mpmath.mpf(x) + # K = mpmath.mpf(K) + # loc = mpmath.mpf(loc) + # scale = mpmath.mpf(scale) + # z = (x - loc)/scale + # return (mpmath.ncdf(-z) + # + mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K)) + # + @pytest.mark.parametrize('x, K, scale, expected', + [[10, 0.01, 1, 8.474702916146657e-24], + [2, 0.005, 1, 0.02302280664231312], + [5, 0.005, 0.5, 8.024820681931086e-24], + [10, 0.005, 0.5, 3.0603340062892486e-89], + [20, 0.005, 0.5, 0.0], + [-3, 0.001, 1, 0.9986545205566117]]) + def test_sf_small_K(self, x, K, scale, expected): + p = stats.exponnorm.sf(x, K, scale=scale) + if expected == 0.0: + assert p == 0.0 + else: + assert_allclose(p, expected, rtol=5e-13) + + +class TestGenExpon: + def test_pdf_unity_area(self): + from scipy.integrate import simpson + # PDF should integrate to one + p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) + assert_almost_equal(simpson(p, dx=0.01), 1, 1) + + def test_cdf_bounds(self): + # CDF should always be positive + cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) + assert_(numpy.all((0 <= cdf) & (cdf <= 1))) + + # The values of p in the following data were computed with mpmath. + # E.g. the script + # from mpmath import mp + # mp.dps = 80 + # x = mp.mpf('15.0') + # a = mp.mpf('1.0') + # b = mp.mpf('2.0') + # c = mp.mpf('1.5') + # print(float(mp.exp((-a-b)*x + (b/c)*-mp.expm1(-c*x)))) + # prints + # 1.0859444834514553e-19 + @pytest.mark.parametrize('x, p, a, b, c', + [(15, 1.0859444834514553e-19, 1, 2, 1.5), + (0.25, 0.7609068232534623, 0.5, 2, 3), + (0.25, 0.09026661397565876, 9.5, 2, 0.5), + (0.01, 0.9753038265071597, 2.5, 0.25, 0.5), + (3.25, 0.0001962824553094492, 2.5, 0.25, 0.5), + (0.125, 0.9508674287164001, 0.25, 5, 0.5)]) + def test_sf_isf(self, x, p, a, b, c): + sf = stats.genexpon.sf(x, a, b, c) + assert_allclose(sf, p, rtol=2e-14) + isf = stats.genexpon.isf(p, a, b, c) + assert_allclose(isf, x, rtol=2e-14) + + # The values of p in the following data were computed with mpmath. + @pytest.mark.parametrize('x, p, a, b, c', + [(0.25, 0.2390931767465377, 0.5, 2, 3), + (0.25, 0.9097333860243412, 9.5, 2, 0.5), + (0.01, 0.0246961734928403, 2.5, 0.25, 0.5), + (3.25, 0.9998037175446906, 2.5, 0.25, 0.5), + (0.125, 0.04913257128359998, 0.25, 5, 0.5)]) + def test_cdf_ppf(self, x, p, a, b, c): + cdf = stats.genexpon.cdf(x, a, b, c) + assert_allclose(cdf, p, rtol=2e-14) + ppf = stats.genexpon.ppf(p, a, b, c) + assert_allclose(ppf, x, rtol=2e-14) + + +class TestTruncexpon: + + def test_sf_isf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 50; TruncExpon(b=b).sf(x) + b = [20, 100] + x = [19.999999, 99.999999] + ref = [2.0611546593828472e-15, 3.7200778266671455e-50] + assert_allclose(stats.truncexpon.sf(x, b), ref, rtol=1.5e-10) + assert_allclose(stats.truncexpon.isf(ref, b), x, rtol=1e-12) + + +class TestExponpow: + def test_tail(self): + assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20) + assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), + 5) + + +class TestSkellam: + def test_pmf(self): + # comparison to R + k = numpy.arange(-10, 15) + mu1, mu2 = 10, 5 + skpmfR = numpy.array( + [4.2254582961926893e-005, 1.1404838449648488e-004, + 2.8979625801752660e-004, 6.9177078182101231e-004, + 1.5480716105844708e-003, 3.2412274963433889e-003, + 6.3373707175123292e-003, 1.1552351566696643e-002, + 1.9606152375042644e-002, 3.0947164083410337e-002, + 4.5401737566767360e-002, 6.1894328166820688e-002, + 7.8424609500170578e-002, 9.2418812533573133e-002, + 1.0139793148019728e-001, 1.0371927988298846e-001, + 9.9076583077406091e-002, 8.8546660073089561e-002, + 7.4187842052486810e-002, 5.8392772862200251e-002, + 4.3268692953013159e-002, 3.0248159818374226e-002, + 1.9991434305603021e-002, 1.2516877303301180e-002, + 7.4389876226229707e-003]) + + assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15) + + def test_cdf(self): + # comparison to R, only 5 decimals + k = numpy.arange(-10, 15) + mu1, mu2 = 10, 5 + skcdfR = numpy.array( + [6.4061475386192104e-005, 1.7810985988267694e-004, + 4.6790611790020336e-004, 1.1596768997212152e-003, + 2.7077485103056847e-003, 5.9489760066490718e-003, + 1.2286346724161398e-002, 2.3838698290858034e-002, + 4.3444850665900668e-002, 7.4392014749310995e-002, + 1.1979375231607835e-001, 1.8168808048289900e-001, + 2.6011268998306952e-001, 3.5253150251664261e-001, + 4.5392943399683988e-001, 5.5764871387982828e-001, + 6.5672529695723436e-001, 7.4527195703032389e-001, + 8.1945979908281064e-001, 8.7785257194501087e-001, + 9.2112126489802404e-001, 9.5136942471639818e-001, + 9.7136085902200120e-001, 9.8387773632530240e-001, + 9.9131672394792536e-001]) + + assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5) + + def test_extreme_mu2(self): + # check that crash reported by gh-17916 large mu2 is resolved + x, mu1, mu2 = 0, 1, 4820232647677555.0 + assert_allclose(stats.skellam.pmf(x, mu1, mu2), 0, atol=1e-16) + assert_allclose(stats.skellam.cdf(x, mu1, mu2), 1, atol=1e-16) + + +class TestLognorm: + def test_pdf(self): + # Regression test for Ticket #1471: avoid nan with 0/0 situation + # Also make sure there are no warnings at x=0, cf gh-5202 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + pdf = stats.lognorm.pdf([0, 0.5, 1], 1) + assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228]) + + def test_logcdf(self): + # Regression test for gh-5940: sf et al would underflow too early + x2, mu, sigma = 201.68, 195, 0.149 + assert_allclose(stats.lognorm.sf(x2-mu, s=sigma), + stats.norm.sf(np.log(x2-mu)/sigma)) + assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma), + stats.norm.logsf(np.log(x2-mu)/sigma)) + + @pytest.fixture(scope='function') + def rng(self): + return np.random.default_rng(1234) + + @pytest.mark.parametrize("rvs_shape", [.1, 2]) + @pytest.mark.parametrize("rvs_loc", [-2, 0, 2]) + @pytest.mark.parametrize("rvs_scale", [.2, 1, 5]) + @pytest.mark.parametrize('fix_shape, fix_loc, fix_scale', + [e for e in product((False, True), repeat=3) + if False in e]) + @np.errstate(invalid="ignore") + def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale, + fix_shape, fix_loc, fix_scale, rng): + data = stats.lognorm.rvs(size=100, s=rvs_shape, scale=rvs_scale, + loc=rvs_loc, random_state=rng) + + kwds = {} + if fix_shape: + kwds['f0'] = rvs_shape + if fix_loc: + kwds['floc'] = rvs_loc + if fix_scale: + kwds['fscale'] = rvs_scale + + # Numerical result may equal analytical result if some code path + # of the analytical routine makes use of numerical optimization. + _assert_less_or_close_loglike(stats.lognorm, data, **kwds, + maybe_identical=True) + + def test_isf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 100; + # LogNormal(s=s).isf(q=0.1, guess=0) + # LogNormal(s=s).isf(q=2e-10, guess=100) + s = 0.954 + q = [0.1, 2e-10, 5e-20, 6e-40] + ref = [3.3960065375794937, 390.07632793595974, 5830.5020828128445, + 287872.84087457904] + assert_allclose(stats.lognorm.isf(q, s), ref, rtol=1e-14) + + +class TestBeta: + def test_logpdf(self): + # Regression test for Ticket #1326: avoid nan with 0*log(0) situation + logpdf = stats.beta.logpdf(0, 1, 0.5) + assert_almost_equal(logpdf, -0.69314718056) + logpdf = stats.beta.logpdf(0, 0.5, 1) + assert_almost_equal(logpdf, np.inf) + + def test_logpdf_ticket_1866(self): + alpha, beta = 267, 1472 + x = np.array([0.2, 0.5, 0.6]) + b = stats.beta(alpha, beta) + assert_allclose(b.logpdf(x).sum(), -1201.699061824062) + assert_allclose(b.pdf(x), np.exp(b.logpdf(x))) + + def test_fit_bad_keyword_args(self): + x = [0.1, 0.5, 0.6] + assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1, + plate="shrimp") + + def test_fit_duplicated_fixed_parameter(self): + # At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method. + # More than one raises a ValueError. + x = [0.1, 0.5, 0.6] + assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5) + + @pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901") + def test_issue_12635(self): + # Confirm that Boost's beta distribution resolves gh-12635. + # Check against R: + # options(digits=16) + # p = 0.9999999999997369 + # a = 75.0 + # b = 66334470.0 + # print(qbeta(p, a, b)) + p, a, b = 0.9999999999997369, 75.0, 66334470.0 + assert_allclose(stats.beta.ppf(p, a, b), 2.343620802982393e-06) + + @pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901") + def test_issue_12794(self): + # Confirm that Boost's beta distribution resolves gh-12794. + # Check against R. + # options(digits=16) + # p = 1e-11 + # count_list = c(10,100,1000) + # print(qbeta(1-p, count_list + 1, 100000 - count_list)) + inv_R = np.array([0.0004944464889611935, + 0.0018360586912635726, + 0.0122663919942518351]) + count_list = np.array([10, 100, 1000]) + p = 1e-11 + inv = stats.beta.isf(p, count_list + 1, 100000 - count_list) + assert_allclose(inv, inv_R) + res = stats.beta.sf(inv, count_list + 1, 100000 - count_list) + assert_allclose(res, p) + + @pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901") + def test_issue_12796(self): + # Confirm that Boost's beta distribution succeeds in the case + # of gh-12796 + alpha_2 = 5e-6 + count_ = np.arange(1, 20) + nobs = 100000 + q, a, b = 1 - alpha_2, count_ + 1, nobs - count_ + inv = stats.beta.ppf(q, a, b) + res = stats.beta.cdf(inv, a, b) + assert_allclose(res, 1 - alpha_2) + + def test_endpoints(self): + # Confirm that boost's beta distribution returns inf at x=1 + # when b<1 + a, b = 1, 0.5 + assert_equal(stats.beta.pdf(1, a, b), np.inf) + + # Confirm that boost's beta distribution returns inf at x=0 + # when a<1 + a, b = 0.2, 3 + assert_equal(stats.beta.pdf(0, a, b), np.inf) + + # Confirm that boost's beta distribution returns 5 at x=0 + # when a=1, b=5 + a, b = 1, 5 + assert_equal(stats.beta.pdf(0, a, b), 5) + assert_equal(stats.beta.pdf(1e-310, a, b), 5) + + # Confirm that boost's beta distribution returns 5 at x=1 + # when a=5, b=1 + a, b = 5, 1 + assert_equal(stats.beta.pdf(1, a, b), 5) + assert_equal(stats.beta.pdf(1-1e-310, a, b), 5) + + @pytest.mark.xfail(IS_PYPY, reason="Does not convert boost warning") + def test_boost_eval_issue_14606(self): + q, a, b = 0.995, 1.0e11, 1.0e13 + with pytest.warns(RuntimeWarning): + stats.beta.ppf(q, a, b) + + @pytest.mark.parametrize('method', [stats.beta.ppf, stats.beta.isf]) + @pytest.mark.parametrize('a, b', [(1e-310, 12.5), (12.5, 1e-310)]) + def test_beta_ppf_with_subnormal_a_b(self, method, a, b): + # Regression test for gh-17444: beta.ppf(p, a, b) and beta.isf(p, a, b) + # would result in a segmentation fault if either a or b was subnormal. + p = 0.9 + # Depending on the version of Boost that we have vendored and + # our setting of the Boost double promotion policy, the call + # `stats.beta.ppf(p, a, b)` might raise an OverflowError or + # return a value. We'll accept either behavior (and not care about + # the value), because our goal here is to verify that the call does + # not trigger a segmentation fault. + try: + method(p, a, b) + except OverflowError: + # The OverflowError exception occurs with Boost 1.80 or earlier + # when Boost's double promotion policy is false; see + # https://github.com/boostorg/math/issues/882 + # and + # https://github.com/boostorg/math/pull/883 + # Once we have vendored the fixed version of Boost, we can drop + # this try-except wrapper and just call the function. + pass + + # entropy accuracy was confirmed using the following mpmath function + # from mpmath import mp + # mp.dps = 50 + # def beta_entropy_mpmath(a, b): + # a = mp.mpf(a) + # b = mp.mpf(b) + # entropy = mp.log(mp.beta(a, b)) - (a - 1) * mp.digamma(a) -\ + # (b - 1) * mp.digamma(b) + (a + b -2) * mp.digamma(a + b) + # return float(entropy) + + @pytest.mark.parametrize('a, b, ref', + [(0.5, 0.5, -0.24156447527049044), + (0.001, 1, -992.0922447210179), + (1, 10000, -8.210440371976183), + (100000, 100000, -5.377247470132859)]) + def test_entropy(self, a, b, ref): + assert_allclose(stats.beta(a, b).entropy(), ref) + + @pytest.mark.parametrize( + "a, b, ref, tol", + [ + (1, 10, -1.4025850929940458, 1e-14), + (10, 20, -1.0567887388936708, 1e-13), + (4e6, 4e6+20, -7.221686009678741, 1e-9), + (5e6, 5e6+10, -7.333257022834638, 1e-8), + (1e10, 1e10+20, -11.133707703130474, 1e-11), + (1e50, 1e50+20, -57.185409562486385, 1e-15), + (2, 1e10, -21.448635265288925, 1e-11), + (2, 1e20, -44.47448619497938, 1e-14), + (2, 1e50, -113.55203898480075, 1e-14), + (5, 1e10, -20.87226777401971, 1e-10), + (5, 1e20, -43.89811870326017, 1e-14), + (5, 1e50, -112.97567149308153, 1e-14), + (10, 1e10, -20.489796752909477, 1e-9), + (10, 1e20, -43.51564768139993, 1e-14), + (10, 1e50, -112.59320047122131, 1e-14), + (1e20, 2, -44.47448619497938, 1e-14), + (1e20, 5, -43.89811870326017, 1e-14), + (1e50, 10, -112.59320047122131, 1e-14), + ] + ) + def test_extreme_entropy(self, a, b, ref, tol): + # Reference values were calculated with mpmath: + # from mpmath import mp + # mp.dps = 500 + # + # def beta_entropy_mpmath(a, b): + # a = mp.mpf(a) + # b = mp.mpf(b) + # entropy = ( + # mp.log(mp.beta(a, b)) - (a - 1) * mp.digamma(a) + # - (b - 1) * mp.digamma(b) + (a + b - 2) * mp.digamma(a + b) + # ) + # return float(entropy) + assert_allclose(stats.beta(a, b).entropy(), ref, rtol=tol) + + +class TestBetaPrime: + # the test values are used in test_cdf_gh_17631 / test_ppf_gh_17631 + # They are computed with mpmath. Example: + # from mpmath import mp + # mp.dps = 50 + # a, b = mp.mpf(0.05), mp.mpf(0.1) + # x = mp.mpf(1e22) + # float(mp.betainc(a, b, 0.0, x/(1+x), regularized=True)) + # note: we use the values computed by the cdf to test whether + # ppf(cdf(x)) == x (up to a small tolerance) + # since the ppf can be very sensitive to small variations of the input, + # it can be required to generate the test case for the ppf separately, + # see self.test_ppf + cdf_vals = [ + (1e22, 100.0, 0.05, 0.8973027435427167), + (1e10, 100.0, 0.05, 0.5911548582766262), + (1e8, 0.05, 0.1, 0.9467768090820048), + (1e8, 100.0, 0.05, 0.4852944858726726), + (1e-10, 0.05, 0.1, 0.21238845427095), + (1e-10, 1.5, 1.5, 1.697652726007973e-15), + (1e-10, 0.05, 100.0, 0.40884514172337383), + (1e-22, 0.05, 0.1, 0.053349567649287326), + (1e-22, 1.5, 1.5, 1.6976527263135503e-33), + (1e-22, 0.05, 100.0, 0.10269725645728331), + (1e-100, 0.05, 0.1, 6.7163126421919795e-06), + (1e-100, 1.5, 1.5, 1.6976527263135503e-150), + (1e-100, 0.05, 100.0, 1.2928818587561651e-05), + ] + + def test_logpdf(self): + alpha, beta = 267, 1472 + x = np.array([0.2, 0.5, 0.6]) + b = stats.betaprime(alpha, beta) + assert_(np.isfinite(b.logpdf(x)).all()) + assert_allclose(b.pdf(x), np.exp(b.logpdf(x))) + + def test_cdf(self): + # regression test for gh-4030: Implementation of + # scipy.stats.betaprime.cdf() + x = stats.betaprime.cdf(0, 0.2, 0.3) + assert_equal(x, 0.0) + + alpha, beta = 267, 1472 + x = np.array([0.2, 0.5, 0.6]) + cdfs = stats.betaprime.cdf(x, alpha, beta) + assert_(np.isfinite(cdfs).all()) + + # check the new cdf implementation vs generic one: + gen_cdf = stats.rv_continuous._cdf_single + cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x] + assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12) + + # The expected values for test_ppf() were computed with mpmath, e.g. + # + # from mpmath import mp + # mp.dps = 125 + # p = 0.01 + # a, b = 1.25, 2.5 + # x = mp.findroot(lambda t: mp.betainc(a, b, x1=0, x2=t/(1+t), + # regularized=True) - p, + # x0=(0.01, 0.011), method='secant') + # print(float(x)) + # + # prints + # + # 0.01080162700956614 + # + @pytest.mark.parametrize( + 'p, a, b, expected', + [(0.010, 1.25, 2.5, 0.01080162700956614), + (1e-12, 1.25, 2.5, 1.0610141996279122e-10), + (1e-18, 1.25, 2.5, 1.6815941817974941e-15), + (1e-17, 0.25, 7.0, 1.0179194531881782e-69), + (0.375, 0.25, 7.0, 0.002036820346115211), + (0.9978811466052919, 0.05, 0.1, 1.0000000000001218e22),] + ) + def test_ppf(self, p, a, b, expected): + x = stats.betaprime.ppf(p, a, b) + assert_allclose(x, expected, rtol=1e-14) + + @pytest.mark.parametrize('x, a, b, p', cdf_vals) + def test_ppf_gh_17631(self, x, a, b, p): + assert_allclose(stats.betaprime.ppf(p, a, b), x, rtol=1e-14) + + @pytest.mark.parametrize( + 'x, a, b, expected', + cdf_vals + [ + (1e10, 1.5, 1.5, 0.9999999999999983), + (1e10, 0.05, 0.1, 0.9664184367890859), + (1e22, 0.05, 0.1, 0.9978811466052919), + ]) + def test_cdf_gh_17631(self, x, a, b, expected): + assert_allclose(stats.betaprime.cdf(x, a, b), expected, rtol=1e-14) + + @pytest.mark.parametrize( + 'x, a, b, expected', + [(1e50, 0.05, 0.1, 0.9999966641709545), + (1e50, 100.0, 0.05, 0.995925162631006)]) + def test_cdf_extreme_tails(self, x, a, b, expected): + # for even more extreme values, we only get a few correct digits + # results are still < 1 + y = stats.betaprime.cdf(x, a, b) + assert y < 1.0 + assert_allclose(y, expected, rtol=2e-5) + + def test_sf(self): + # reference values were computed via the reference distribution, + # e.g. + # mp.dps = 50 + # a, b = 5, 3 + # x = 1e10 + # BetaPrime(a=a, b=b).sf(x); returns 3.4999999979e-29 + a = [5, 4, 2, 0.05, 0.05, 0.05, 0.05, 100.0, 100.0, 0.05, 0.05, + 0.05, 1.5, 1.5] + b = [3, 2, 1, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05, 100.0, 100.0, + 100.0, 1.5, 1.5] + x = [1e10, 1e20, 1e30, 1e22, 1e-10, 1e-22, 1e-100, 1e22, 1e10, + 1e-10, 1e-22, 1e-100, 1e10, 1e-10] + ref = [3.4999999979e-29, 9.999999999994357e-40, 1.9999999999999998e-30, + 0.0021188533947081017, 0.78761154572905, 0.9466504323507127, + 0.9999932836873578, 0.10269725645728331, 0.40884514172337383, + 0.5911548582766262, 0.8973027435427167, 0.9999870711814124, + 1.6976527260079727e-15, 0.9999999999999983] + sf_values = stats.betaprime.sf(x, a, b) + assert_allclose(sf_values, ref, rtol=1e-12) + + def test_fit_stats_gh18274(self): + # gh-18274 reported spurious warning emitted when fitting `betaprime` + # to data. Some of these were emitted by stats, too. Check that the + # warnings are no longer emitted. + stats.betaprime.fit([0.1, 0.25, 0.3, 1.2, 1.6], floc=0, fscale=1) + stats.betaprime(a=1, b=1).stats('mvsk') + + def test_moment_gh18634(self): + # Testing for gh-18634 revealed that `betaprime` raised a + # NotImplementedError for higher moments. Check that this is + # resolved. Parameters are arbitrary but lie on either side of the + # moment order (5) to test both branches of `_lazywhere`. Reference + # values produced with Mathematica, e.g. + # `Moment[BetaPrimeDistribution[2,7],5]` + ref = [np.inf, 0.867096912929055] + res = stats.betaprime(2, [4.2, 7.1]).moment(5) + assert_allclose(res, ref) + + +class TestGamma: + def test_pdf(self): + # a few test cases to compare with R + pdf = stats.gamma.pdf(90, 394, scale=1./5) + assert_almost_equal(pdf, 0.002312341) + + pdf = stats.gamma.pdf(3, 10, scale=1./5) + assert_almost_equal(pdf, 0.1620358) + + def test_logpdf(self): + # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0) + # situation + logpdf = stats.gamma.logpdf(0, 1) + assert_almost_equal(logpdf, 0) + + def test_fit_bad_keyword_args(self): + x = [0.1, 0.5, 0.6] + assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp") + + def test_isf(self): + # Test cases for when the probability is very small. See gh-13664. + # The expected values can be checked with mpmath. With mpmath, + # the survival function sf(x, k) can be computed as + # + # mpmath.gammainc(k, x, mpmath.inf, regularized=True) + # + # Here we have: + # + # >>> mpmath.mp.dps = 60 + # >>> float(mpmath.gammainc(1, 39.14394658089878, mpmath.inf, + # ... regularized=True)) + # 9.99999999999999e-18 + # >>> float(mpmath.gammainc(100, 330.6557590436547, mpmath.inf, + # regularized=True)) + # 1.000000000000028e-50 + # + assert np.isclose(stats.gamma.isf(1e-17, 1), + 39.14394658089878, atol=1e-14) + assert np.isclose(stats.gamma.isf(1e-50, 100), + 330.6557590436547, atol=1e-13) + + @pytest.mark.parametrize('scale', [1.0, 5.0]) + def test_delta_cdf(self, scale): + # Expected value computed with mpmath: + # + # >>> import mpmath + # >>> mpmath.mp.dps = 150 + # >>> cdf1 = mpmath.gammainc(3, 0, 245, regularized=True) + # >>> cdf2 = mpmath.gammainc(3, 0, 250, regularized=True) + # >>> float(cdf2 - cdf1) + # 1.1902609356171962e-102 + # + delta = stats.gamma._delta_cdf(scale*245, scale*250, 3, scale=scale) + assert_allclose(delta, 1.1902609356171962e-102, rtol=1e-13) + + @pytest.mark.parametrize('a, ref, rtol', + [(1e-4, -9990.366610819761, 1e-15), + (2, 1.5772156649015328, 1e-15), + (100, 3.7181819485047463, 1e-13), + (1e4, 6.024075385026086, 1e-15), + (1e18, 22.142204370151084, 1e-15), + (1e100, 116.54819318290696, 1e-15)]) + def test_entropy(self, a, ref, rtol): + # expected value computed with mpmath: + # from mpmath import mp + # mp.dps = 500 + # def gamma_entropy_reference(x): + # x = mp.mpf(x) + # return float(mp.digamma(x) * (mp.one - x) + x + mp.loggamma(x)) + + assert_allclose(stats.gamma.entropy(a), ref, rtol=rtol) + + @pytest.mark.parametrize("a", [1e-2, 1, 1e2]) + @pytest.mark.parametrize("loc", [1e-2, 0, 1e2]) + @pytest.mark.parametrize('scale', [1e-2, 1, 1e2]) + @pytest.mark.parametrize('fix_a', [True, False]) + @pytest.mark.parametrize('fix_loc', [True, False]) + @pytest.mark.parametrize('fix_scale', [True, False]) + def test_fit_mm(self, a, loc, scale, fix_a, fix_loc, fix_scale): + rng = np.random.default_rng(6762668991392531563) + data = stats.gamma.rvs(a, loc=loc, scale=scale, size=100, + random_state=rng) + + kwds = {} + if fix_a: + kwds['fa'] = a + if fix_loc: + kwds['floc'] = loc + if fix_scale: + kwds['fscale'] = scale + nfree = 3 - len(kwds) + + if nfree == 0: + error_msg = "All parameters fixed. There is nothing to optimize." + with pytest.raises(ValueError, match=error_msg): + stats.gamma.fit(data, method='mm', **kwds) + return + + theta = stats.gamma.fit(data, method='mm', **kwds) + dist = stats.gamma(*theta) + if nfree >= 1: + assert_allclose(dist.mean(), np.mean(data)) + if nfree >= 2: + assert_allclose(dist.moment(2), np.mean(data**2)) + if nfree >= 3: + assert_allclose(dist.moment(3), np.mean(data**3)) + +def test_pdf_overflow_gh19616(): + # Confirm that gh19616 (intermediate over/underflows in PDF) is resolved + # Reference value from R GeneralizedHyperbolic library + # library(GeneralizedHyperbolic) + # options(digits=16) + # jitter = 1e-3 + # dnig(1, a=2**0.5 / jitter**2, b=1 / jitter**2) + jitter = 1e-3 + Z = stats.norminvgauss(2**0.5 / jitter**2, 1 / jitter**2, loc=0, scale=1) + assert_allclose(Z.pdf(1.0), 282.0948446666433) + + +class TestDgamma: + def test_pdf(self): + rng = np.random.default_rng(3791303244302340058) + size = 10 # number of points to check + x = rng.normal(scale=10, size=size) + a = rng.uniform(high=10, size=size) + res = stats.dgamma.pdf(x, a) + ref = stats.gamma.pdf(np.abs(x), a) / 2 + assert_allclose(res, ref) + + dist = stats.dgamma(a) + # There was an intermittent failure with assert_equal on Linux - 32 bit + assert_allclose(dist.pdf(x), res, rtol=5e-16) + + # mpmath was used to compute the expected values. + # For x < 0, cdf(x, a) is mp.gammainc(a, -x, mp.inf, regularized=True)/2 + # For x > 0, cdf(x, a) is (1 + mp.gammainc(a, 0, x, regularized=True))/2 + # E.g. + # from mpmath import mp + # mp.dps = 50 + # print(float(mp.gammainc(1, 20, mp.inf, regularized=True)/2)) + # prints + # 1.030576811219279e-09 + @pytest.mark.parametrize('x, a, expected', + [(-20, 1, 1.030576811219279e-09), + (-40, 1, 2.1241771276457944e-18), + (-50, 5, 2.7248509914602648e-17), + (-25, 0.125, 5.333071920958156e-14), + (5, 1, 0.9966310265004573)]) + def test_cdf_ppf_sf_isf_tail(self, x, a, expected): + cdf = stats.dgamma.cdf(x, a) + assert_allclose(cdf, expected, rtol=5e-15) + ppf = stats.dgamma.ppf(expected, a) + assert_allclose(ppf, x, rtol=5e-15) + sf = stats.dgamma.sf(-x, a) + assert_allclose(sf, expected, rtol=5e-15) + isf = stats.dgamma.isf(expected, a) + assert_allclose(isf, -x, rtol=5e-15) + + @pytest.mark.parametrize("a, ref", + [(1.5, 2.0541199559354117), + (1.3, 1.9357296377121247), + (1.1, 1.7856502333412134)]) + def test_entropy(self, a, ref): + # The reference values were calculated with mpmath: + # def entropy_dgamma(a): + # def pdf(x): + # A = mp.one / (mp.mpf(2.) * mp.gamma(a)) + # B = mp.fabs(x) ** (a - mp.one) + # C = mp.exp(-mp.fabs(x)) + # h = A * B * C + # return h + # + # return -mp.quad(lambda t: pdf(t) * mp.log(pdf(t)), + # [-mp.inf, mp.inf]) + assert_allclose(stats.dgamma.entropy(a), ref, rtol=1e-14) + + @pytest.mark.parametrize("a, ref", + [(1e-100, -1e+100), + (1e-10, -9999999975.858217), + (1e-5, -99987.37111657023), + (1e4, 6.717222565586032), + (1000000000000000.0, 19.38147391121996), + (1e+100, 117.2413403634669)]) + def test_entropy_entreme_values(self, a, ref): + # The reference values were calculated with mpmath: + # from mpmath import mp + # mp.dps = 500 + # def second_dgamma(a): + # a = mp.mpf(a) + # x_1 = a + mp.log(2) + mp.loggamma(a) + # x_2 = (mp.one - a) * mp.digamma(a) + # h = x_1 + x_2 + # return h + assert_allclose(stats.dgamma.entropy(a), ref, rtol=1e-10) + + def test_entropy_array_input(self): + x = np.array([1, 5, 1e20, 1e-5]) + y = stats.dgamma.entropy(x) + for i in range(len(y)): + assert y[i] == stats.dgamma.entropy(x[i]) + + +class TestChi2: + # regression tests after precision improvements, ticket:1041, not verified + def test_precision(self): + assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, + decimal=14) + assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, + decimal=14) + + def test_ppf(self): + # Expected values computed with mpmath. + df = 4.8 + x = stats.chi2.ppf(2e-47, df) + assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10) + x = stats.chi2.ppf(0.5, df) + assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10) + + df = 13 + x = stats.chi2.ppf(2e-77, df) + assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10) + x = stats.chi2.ppf(0.1, df) + assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10) + + # Entropy references values were computed with the following mpmath code + # from mpmath import mp + # mp.dps = 50 + # def chisq_entropy_mpmath(df): + # df = mp.mpf(df) + # half_df = 0.5 * df + # entropy = (half_df + mp.log(2) + mp.log(mp.gamma(half_df)) + + # (mp.one - half_df) * mp.digamma(half_df)) + # return float(entropy) + + @pytest.mark.parametrize('df, ref', + [(1e-4, -19988.980448690163), + (1, 0.7837571104739337), + (100, 4.061397128938114), + (251, 4.525577254045129), + (1e15, 19.034900320939986)]) + def test_entropy(self, df, ref): + assert_allclose(stats.chi2(df).entropy(), ref, rtol=1e-13) + + +class TestGumbelL: + # gh-6228 + def test_cdf_ppf(self): + x = np.linspace(-100, -4) + y = stats.gumbel_l.cdf(x) + xx = stats.gumbel_l.ppf(y) + assert_allclose(x, xx) + + def test_logcdf_logsf(self): + x = np.linspace(-100, -4) + y = stats.gumbel_l.logcdf(x) + z = stats.gumbel_l.logsf(x) + u = np.exp(y) + v = -special.expm1(z) + assert_allclose(u, v) + + def test_sf_isf(self): + x = np.linspace(-20, 5) + y = stats.gumbel_l.sf(x) + xx = stats.gumbel_l.isf(y) + assert_allclose(x, xx) + + @pytest.mark.parametrize('loc', [-1, 1]) + def test_fit_fixed_param(self, loc): + # ensure fixed location is correctly reflected from `gumbel_r.fit` + # See comments at end of gh-12737. + data = stats.gumbel_l.rvs(size=100, loc=loc) + fitted_loc, _ = stats.gumbel_l.fit(data, floc=loc) + assert_equal(fitted_loc, loc) + + +class TestGumbelR: + + def test_sf(self): + # Expected value computed with mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 40 + # >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50))) + # 1.9287498479639178e-22 + assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22, + rtol=1e-14) + + def test_isf(self): + # Expected value computed with mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 40 + # >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17))) + # 39.14394658089878 + assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878, + rtol=1e-14) + + +class TestLevyStable: + @pytest.fixture(autouse=True) + def reset_levy_stable_params(self): + """Setup default parameters for levy_stable generator""" + stats.levy_stable.parameterization = "S1" + stats.levy_stable.cdf_default_method = "piecewise" + stats.levy_stable.pdf_default_method = "piecewise" + stats.levy_stable.quad_eps = stats._levy_stable._QUAD_EPS + + @pytest.fixture + def nolan_pdf_sample_data(self): + """Sample data points for pdf computed with Nolan's stablec + + See - http://fs2.american.edu/jpnolan/www/stable/stable.html + + There's a known limitation of Nolan's executable for alpha < 0.2. + + The data table loaded below is generated from Nolan's stablec + with the following parameter space: + + alpha = 0.1, 0.2, ..., 2.0 + beta = -1.0, -0.9, ..., 1.0 + p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5, + and the equivalent for the right tail + + Typically inputs for stablec: + + stablec.exe << + 1 # pdf + 1 # Nolan S equivalent to S0 in scipy + .25,2,.25 # alpha + -1,-1,0 # beta + -10,10,1 # x + 1,0 # gamma, delta + 2 # output file + """ + data = np.load( + Path(__file__).parent / + 'data/levy_stable/stable-Z1-pdf-sample-data.npy' + ) + data = np.rec.fromarrays(data.T, names='x,p,alpha,beta,pct') + return data + + @pytest.fixture + def nolan_cdf_sample_data(self): + """Sample data points for cdf computed with Nolan's stablec + + See - http://fs2.american.edu/jpnolan/www/stable/stable.html + + There's a known limitation of Nolan's executable for alpha < 0.2. + + The data table loaded below is generated from Nolan's stablec + with the following parameter space: + + alpha = 0.1, 0.2, ..., 2.0 + beta = -1.0, -0.9, ..., 1.0 + p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5, + + and the equivalent for the right tail + + Ideally, Nolan's output for CDF values should match the percentile + from where they have been sampled from. Even more so as we extract + percentile x positions from stablec too. However, we note at places + Nolan's stablec will produce absolute errors in order of 1e-5. We + compare against his calculations here. In future, once we less + reliant on Nolan's paper we might switch to comparing directly at + percentiles (those x values being produced from some alternative + means). + + Typically inputs for stablec: + + stablec.exe << + 2 # cdf + 1 # Nolan S equivalent to S0 in scipy + .25,2,.25 # alpha + -1,-1,0 # beta + -10,10,1 # x + 1,0 # gamma, delta + 2 # output file + """ + data = np.load( + Path(__file__).parent / + 'data/levy_stable/stable-Z1-cdf-sample-data.npy' + ) + data = np.rec.fromarrays(data.T, names='x,p,alpha,beta,pct') + return data + + @pytest.fixture + def nolan_loc_scale_sample_data(self): + """Sample data where loc, scale are different from 0, 1 + + Data extracted in similar way to pdf/cdf above using + Nolan's stablec but set to an arbitrary location scale of + (2, 3) for various important parameters alpha, beta and for + parameterisations S0 and S1. + """ + data = np.load( + Path(__file__).parent / + 'data/levy_stable/stable-loc-scale-sample-data.npy' + ) + return data + + @pytest.mark.parametrize( + "sample_size", [ + pytest.param(50), pytest.param(1500, marks=pytest.mark.slow) + ] + ) + @pytest.mark.parametrize("parameterization", ["S0", "S1"]) + @pytest.mark.parametrize( + "alpha,beta", [(1.0, 0), (1.0, -0.5), (1.5, 0), (1.9, 0.5)] + ) + @pytest.mark.parametrize("gamma,delta", [(1, 0), (3, 2)]) + def test_rvs( + self, + parameterization, + alpha, + beta, + gamma, + delta, + sample_size, + ): + stats.levy_stable.parameterization = parameterization + ls = stats.levy_stable( + alpha=alpha, beta=beta, scale=gamma, loc=delta + ) + _, p = stats.kstest( + ls.rvs(size=sample_size, random_state=1234), ls.cdf + ) + assert p > 0.05 + + @pytest.mark.slow + @pytest.mark.parametrize('beta', [0.5, 1]) + def test_rvs_alpha1(self, beta): + """Additional test cases for rvs for alpha equal to 1.""" + np.random.seed(987654321) + alpha = 1.0 + loc = 0.5 + scale = 1.5 + x = stats.levy_stable.rvs(alpha, beta, loc=loc, scale=scale, + size=5000) + stat, p = stats.kstest(x, 'levy_stable', + args=(alpha, beta, loc, scale)) + assert p > 0.01 + + def test_fit(self): + # construct data to have percentiles that match + # example in McCulloch 1986. + x = [ + -.05413, -.05413, 0., 0., 0., 0., .00533, .00533, .00533, .00533, + .00533, .03354, .03354, .03354, .03354, .03354, .05309, .05309, + .05309, .05309, .05309 + ] + alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x) + assert_allclose(alpha1, 1.48, rtol=0, atol=0.01) + assert_almost_equal(beta1, -.22, 2) + assert_almost_equal(scale1, 0.01717, 4) + assert_almost_equal( + loc1, 0.00233, 2 + ) # to 2 dps due to rounding error in McCulloch86 + + # cover alpha=2 scenario + x2 = x + [.05309, .05309, .05309, .05309, .05309] + alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2) + assert_equal(alpha2, 2) + assert_equal(beta2, -1) + assert_almost_equal(scale2, .02503, 4) + assert_almost_equal(loc2, .03354, 4) + + @pytest.mark.xfail(reason="Unknown problem with fitstart.") + @pytest.mark.parametrize( + "alpha,beta,delta,gamma", + [ + (1.5, 0.4, 2, 3), + (1.0, 0.4, 2, 3), + ] + ) + @pytest.mark.parametrize( + "parametrization", ["S0", "S1"] + ) + def test_fit_rvs(self, alpha, beta, delta, gamma, parametrization): + """Test that fit agrees with rvs for each parametrization.""" + stats.levy_stable.parametrization = parametrization + data = stats.levy_stable.rvs( + alpha, beta, loc=delta, scale=gamma, size=10000, random_state=1234 + ) + fit = stats.levy_stable._fitstart(data) + alpha_obs, beta_obs, delta_obs, gamma_obs = fit + assert_allclose( + [alpha, beta, delta, gamma], + [alpha_obs, beta_obs, delta_obs, gamma_obs], + rtol=0.01, + ) + + def test_fit_beta_flip(self): + # Confirm that sign of beta affects loc, not alpha or scale. + x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100]) + alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x) + alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x) + assert_equal(beta1, 1) + assert loc1 != 0 + assert_almost_equal(alpha2, alpha1) + assert_almost_equal(beta2, -beta1) + assert_almost_equal(loc2, -loc1) + assert_almost_equal(scale2, scale1) + + def test_fit_delta_shift(self): + # Confirm that loc slides up and down if data shifts. + SHIFT = 1 + x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100]) + alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(-x) + alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x + SHIFT) + assert_almost_equal(alpha2, alpha1) + assert_almost_equal(beta2, beta1) + assert_almost_equal(loc2, loc1 + SHIFT) + assert_almost_equal(scale2, scale1) + + def test_fit_loc_extrap(self): + # Confirm that loc goes out of sample for alpha close to 1. + x = [1, 1, 3, 3, 10, 10, 10, 30, 30, 140, 140] + alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x) + assert alpha1 < 1, f"Expected alpha < 1, got {alpha1}" + assert loc1 < min(x), f"Expected loc < {min(x)}, got {loc1}" + + x2 = [1, 1, 3, 3, 10, 10, 10, 30, 30, 130, 130] + alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2) + assert alpha2 > 1, f"Expected alpha > 1, got {alpha2}" + assert loc2 > max(x2), f"Expected loc > {max(x2)}, got {loc2}" + + @pytest.mark.parametrize( + "pct_range,alpha_range,beta_range", [ + pytest.param( + [.01, .5, .99], + [.1, 1, 2], + [-1, 0, .8], + ), + pytest.param( + [.01, .05, .5, .95, .99], + [.1, .5, 1, 1.5, 2], + [-.9, -.5, 0, .3, .6, 1], + marks=pytest.mark.slow + ), + pytest.param( + [.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99], + np.linspace(0.1, 2, 20), + np.linspace(-1, 1, 21), + marks=pytest.mark.xslow, + ), + ] + ) + def test_pdf_nolan_samples( + self, nolan_pdf_sample_data, pct_range, alpha_range, beta_range + ): + """Test pdf values against Nolan's stablec.exe output""" + data = nolan_pdf_sample_data + + # some tests break on linux 32 bit + uname = platform.uname() + is_linux_32 = uname.system == 'Linux' and uname.machine == 'i686' + platform_desc = "/".join( + [uname.system, uname.machine, uname.processor]) + + # fmt: off + # There are a number of cases which fail on some but not all platforms. + # These are excluded by the filters below. TODO: Rewrite tests so that + # the now filtered out test cases are still run but marked in pytest as + # expected to fail. + tests = [ + [ + 'dni', 1e-7, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + ~( + ( + (r['beta'] == 0) & + (r['pct'] == 0.5) + ) | + ( + (r['beta'] >= 0.9) & + (r['alpha'] >= 1.6) & + (r['pct'] == 0.5) + ) | + ( + (r['alpha'] <= 0.4) & + np.isin(r['pct'], [.01, .99]) + ) | + ( + (r['alpha'] <= 0.3) & + np.isin(r['pct'], [.05, .95]) + ) | + ( + (r['alpha'] <= 0.2) & + np.isin(r['pct'], [.1, .9]) + ) | + ( + (r['alpha'] == 0.1) & + np.isin(r['pct'], [.25, .75]) & + np.isin(np.abs(r['beta']), [.5, .6, .7]) + ) | + ( + (r['alpha'] == 0.1) & + np.isin(r['pct'], [.5]) & + np.isin(np.abs(r['beta']), [.1]) + ) | + ( + (r['alpha'] == 0.1) & + np.isin(r['pct'], [.35, .65]) & + np.isin(np.abs(r['beta']), [-.4, -.3, .3, .4, .5]) + ) | + ( + (r['alpha'] == 0.2) & + (r['beta'] == 0.5) & + (r['pct'] == 0.25) + ) | + ( + (r['alpha'] == 0.2) & + (r['beta'] == -0.3) & + (r['pct'] == 0.65) + ) | + ( + (r['alpha'] == 0.2) & + (r['beta'] == 0.3) & + (r['pct'] == 0.35) + ) | + ( + (r['alpha'] == 1.) & + np.isin(r['pct'], [.5]) & + np.isin(np.abs(r['beta']), [.1, .2, .3, .4]) + ) | + ( + (r['alpha'] == 1.) & + np.isin(r['pct'], [.35, .65]) & + np.isin(np.abs(r['beta']), [.8, .9, 1.]) + ) | + ( + (r['alpha'] == 1.) & + np.isin(r['pct'], [.01, .99]) & + np.isin(np.abs(r['beta']), [-.1, .1]) + ) | + # various points ok but too sparse to list + (r['alpha'] >= 1.1) + ) + ) + ], + # piecewise generally good accuracy + [ + 'piecewise', 1e-11, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] > 0.2) & + (r['alpha'] != 1.) + ) + ], + # for alpha = 1. for linux 32 bit optimize.bisect + # has some issues for .01 and .99 percentile + [ + 'piecewise', 1e-11, lambda r: ( + (r['alpha'] == 1.) & + (not is_linux_32) & + np.isin(r['pct'], pct_range) & + (1. in alpha_range) & + np.isin(r['beta'], beta_range) + ) + ], + # for small alpha very slightly reduced accuracy + [ + 'piecewise', 2.5e-10, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] <= 0.2) + ) + ], + # fft accuracy reduces as alpha decreases + [ + 'fft-simpson', 1e-5, lambda r: ( + (r['alpha'] >= 1.9) & + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) + ), + ], + [ + 'fft-simpson', 1e-6, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] > 1) & + (r['alpha'] < 1.9) + ) + ], + # fft relative errors for alpha < 1, will raise if enabled + # ['fft-simpson', 1e-4, lambda r: r['alpha'] == 0.9], + # ['fft-simpson', 1e-3, lambda r: r['alpha'] == 0.8], + # ['fft-simpson', 1e-2, lambda r: r['alpha'] == 0.7], + # ['fft-simpson', 1e-1, lambda r: r['alpha'] == 0.6], + ] + # fmt: on + for ix, (default_method, rtol, + filter_func) in enumerate(tests): + stats.levy_stable.pdf_default_method = default_method + subdata = data[filter_func(data) + ] if filter_func is not None else data + with suppress_warnings() as sup: + # occurs in FFT methods only + sup.record( + RuntimeWarning, + "Density calculations experimental for FFT method.*" + ) + p = stats.levy_stable.pdf( + subdata['x'], + subdata['alpha'], + subdata['beta'], + scale=1, + loc=0 + ) + with np.errstate(over="ignore"): + subdata2 = rec_append_fields( + subdata, + ['calc', 'abserr', 'relerr'], + [ + p, + np.abs(p - subdata['p']), + np.abs(p - subdata['p']) / np.abs(subdata['p']) + ] + ) + failures = subdata2[ + (subdata2['relerr'] >= rtol) | + np.isnan(p) + ] + message = ( + f"pdf test {ix} failed with method '{default_method}' " + f"[platform: {platform_desc}]\n{failures.dtype.names}\n{failures}" + ) + assert_allclose( + p, + subdata['p'], + rtol, + err_msg=message, + verbose=False + ) + + @pytest.mark.parametrize( + "pct_range,alpha_range,beta_range", [ + pytest.param( + [.01, .5, .99], + [.1, 1, 2], + [-1, 0, .8], + ), + pytest.param( + [.01, .05, .5, .95, .99], + [.1, .5, 1, 1.5, 2], + [-.9, -.5, 0, .3, .6, 1], + marks=pytest.mark.slow + ), + pytest.param( + [.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99], + np.linspace(0.1, 2, 20), + np.linspace(-1, 1, 21), + marks=pytest.mark.xslow, + ), + ] + ) + def test_cdf_nolan_samples( + self, nolan_cdf_sample_data, pct_range, alpha_range, beta_range + ): + """ Test cdf values against Nolan's stablec.exe output.""" + data = nolan_cdf_sample_data + tests = [ + # piecewise generally good accuracy + [ + 'piecewise', 2e-12, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + ~( + ( + (r['alpha'] == 1.) & + np.isin(r['beta'], [-0.3, -0.2, -0.1]) & + (r['pct'] == 0.01) + ) | + ( + (r['alpha'] == 1.) & + np.isin(r['beta'], [0.1, 0.2, 0.3]) & + (r['pct'] == 0.99) + ) + ) + ) + ], + # for some points with alpha=1, Nolan's STABLE clearly + # loses accuracy + [ + 'piecewise', 5e-2, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + ( + (r['alpha'] == 1.) & + np.isin(r['beta'], [-0.3, -0.2, -0.1]) & + (r['pct'] == 0.01) + ) | + ( + (r['alpha'] == 1.) & + np.isin(r['beta'], [0.1, 0.2, 0.3]) & + (r['pct'] == 0.99) + ) + ) + ], + # fft accuracy poor, very poor alpha < 1 + [ + 'fft-simpson', 1e-5, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] > 1.7) + ) + ], + [ + 'fft-simpson', 1e-4, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] > 1.5) & + (r['alpha'] <= 1.7) + ) + ], + [ + 'fft-simpson', 1e-3, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] > 1.3) & + (r['alpha'] <= 1.5) + ) + ], + [ + 'fft-simpson', 1e-2, lambda r: ( + np.isin(r['pct'], pct_range) & + np.isin(r['alpha'], alpha_range) & + np.isin(r['beta'], beta_range) & + (r['alpha'] > 1.0) & + (r['alpha'] <= 1.3) + ) + ], + ] + for ix, (default_method, rtol, + filter_func) in enumerate(tests): + stats.levy_stable.cdf_default_method = default_method + subdata = data[filter_func(data) + ] if filter_func is not None else data + with suppress_warnings() as sup: + sup.record( + RuntimeWarning, + 'Cumulative density calculations experimental for FFT' + + ' method. Use piecewise method instead.*' + ) + p = stats.levy_stable.cdf( + subdata['x'], + subdata['alpha'], + subdata['beta'], + scale=1, + loc=0 + ) + with np.errstate(over="ignore"): + subdata2 = rec_append_fields( + subdata, + ['calc', 'abserr', 'relerr'], + [ + p, + np.abs(p - subdata['p']), + np.abs(p - subdata['p']) / np.abs(subdata['p']) + ] + ) + failures = subdata2[ + (subdata2['relerr'] >= rtol) | + np.isnan(p) + ] + message = (f"cdf test {ix} failed with method '{default_method}'\n" + f"{failures.dtype.names}\n{failures}") + assert_allclose( + p, + subdata['p'], + rtol, + err_msg=message, + verbose=False + ) + + @pytest.mark.parametrize("param", [0, 1]) + @pytest.mark.parametrize("case", ["pdf", "cdf"]) + def test_location_scale( + self, nolan_loc_scale_sample_data, param, case + ): + """Tests for pdf and cdf where loc, scale are different from 0, 1 + """ + + uname = platform.uname() + is_linux_32 = uname.system == 'Linux' and "32bit" in platform.architecture()[0] + # Test seems to be unstable (see gh-17839 for a bug report on Debian + # i386), so skip it. + if is_linux_32 and case == 'pdf': + pytest.skip("Test unstable on some platforms; see gh-17839, 17859") + + data = nolan_loc_scale_sample_data + # We only test against piecewise as location/scale transforms + # are same for other methods. + stats.levy_stable.cdf_default_method = "piecewise" + stats.levy_stable.pdf_default_method = "piecewise" + + subdata = data[data["param"] == param] + stats.levy_stable.parameterization = f"S{param}" + + assert case in ["pdf", "cdf"] + function = ( + stats.levy_stable.pdf if case == "pdf" else stats.levy_stable.cdf + ) + + v1 = function( + subdata['x'], subdata['alpha'], subdata['beta'], scale=2, loc=3 + ) + assert_allclose(v1, subdata[case], 1e-5) + + @pytest.mark.parametrize( + "method,decimal_places", + [ + ['dni', 4], + ['piecewise', 4], + ] + ) + def test_pdf_alpha_equals_one_beta_non_zero(self, method, decimal_places): + """ sample points extracted from Tables and Graphs of Stable + Probability Density Functions - Donald R Holt - 1973 - p 187. + """ + xs = np.array( + [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4] + ) + density = np.array( + [ + .3183, .3096, .2925, .2622, .1591, .1587, .1599, .1635, .0637, + .0729, .0812, .0955, .0318, .0390, .0458, .0586, .0187, .0236, + .0285, .0384 + ] + ) + betas = np.array( + [ + 0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0, + .25, .5, 1 + ] + ) + with np.errstate(all='ignore'), suppress_warnings() as sup: + sup.filter( + category=RuntimeWarning, + message="Density calculation unstable.*" + ) + stats.levy_stable.pdf_default_method = method + # stats.levy_stable.fft_grid_spacing = 0.0001 + pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0) + assert_almost_equal( + pdf, density, decimal_places, method + ) + + @pytest.mark.parametrize( + "params,expected", + [ + [(1.48, -.22, 0, 1), (0, np.inf, np.nan, np.nan)], + [(2, .9, 10, 1.5), (10, 4.5, 0, 0)] + ] + ) + def test_stats(self, params, expected): + observed = stats.levy_stable.stats( + params[0], params[1], loc=params[2], scale=params[3], + moments='mvsk' + ) + assert_almost_equal(observed, expected) + + @pytest.mark.parametrize('alpha', [0.25, 0.5, 0.75]) + @pytest.mark.parametrize( + 'function,beta,points,expected', + [ + ( + stats.levy_stable.cdf, + 1.0, + np.linspace(-25, 0, 10), + 0.0, + ), + ( + stats.levy_stable.pdf, + 1.0, + np.linspace(-25, 0, 10), + 0.0, + ), + ( + stats.levy_stable.cdf, + -1.0, + np.linspace(0, 25, 10), + 1.0, + ), + ( + stats.levy_stable.pdf, + -1.0, + np.linspace(0, 25, 10), + 0.0, + ) + ] + ) + def test_distribution_outside_support( + self, alpha, function, beta, points, expected + ): + """Ensure the pdf/cdf routines do not return nan outside support. + + This distribution's support becomes truncated in a few special cases: + support is [mu, infty) if alpha < 1 and beta = 1 + support is (-infty, mu] if alpha < 1 and beta = -1 + Otherwise, the support is all reals. Here, mu is zero by default. + """ + assert 0 < alpha < 1 + assert_almost_equal( + function(points, alpha=alpha, beta=beta), + np.full(len(points), expected) + ) + + @pytest.mark.parametrize( + 'x,alpha,beta,expected', + # Reference values from Matlab + # format long + # alphas = [1.7720732804618808, 1.9217001522410235, 1.5654806051633634, + # 1.7420803447784388, 1.5748002527689913]; + # betas = [0.5059373136902996, -0.8779442746685926, -0.4016220341911392, + # -0.38180029468259247, -0.25200194914153684]; + # x0s = [0, 1e-4, -1e-4]; + # for x0 = x0s + # disp("x0 = " + x0) + # for ii = 1:5 + # alpha = alphas(ii); + # beta = betas(ii); + # pd = makedist('Stable','alpha',alpha,'beta',beta,'gam',1,'delta',0); + # % we need to adjust x. It is the same as x = 0 In scipy. + # x = x0 - beta * tan(pi * alpha / 2); + # disp(pd.pdf(x)) + # end + # end + [ + (0, 1.7720732804618808, 0.5059373136902996, 0.278932636798268), + (0, 1.9217001522410235, -0.8779442746685926, 0.281054757202316), + (0, 1.5654806051633634, -0.4016220341911392, 0.271282133194204), + (0, 1.7420803447784388, -0.38180029468259247, 0.280202199244247), + (0, 1.5748002527689913, -0.25200194914153684, 0.280136576218665), + ] + ) + def test_x_equal_zeta( + self, x, alpha, beta, expected + ): + """Test pdf for x equal to zeta. + + With S1 parametrization: x0 = x + zeta if alpha != 1 So, for x = 0, x0 + will be close to zeta. + + When case "x equal zeta" is not handled properly and quad_eps is not + low enough: - pdf may be less than 0 - logpdf is nan + + The points from the parametrize block are found randomly so that PDF is + less than 0. + + Reference values taken from MATLAB + https://www.mathworks.com/help/stats/stable-distribution.html + """ + stats.levy_stable.quad_eps = 1.2e-11 + + assert_almost_equal( + stats.levy_stable.pdf(x, alpha=alpha, beta=beta), + expected, + ) + + @pytest.mark.xfail + @pytest.mark.parametrize( + # See comment for test_x_equal_zeta for script for reference values + 'x,alpha,beta,expected', + [ + (1e-4, 1.7720732804618808, 0.5059373136902996, 0.278929165340670), + (1e-4, 1.9217001522410235, -0.8779442746685926, 0.281056564327953), + (1e-4, 1.5654806051633634, -0.4016220341911392, 0.271252432161167), + (1e-4, 1.7420803447784388, -0.38180029468259247, 0.280205311264134), + (1e-4, 1.5748002527689913, -0.25200194914153684, 0.280140965235426), + (-1e-4, 1.7720732804618808, 0.5059373136902996, 0.278936106741754), + (-1e-4, 1.9217001522410235, -0.8779442746685926, 0.281052948629429), + (-1e-4, 1.5654806051633634, -0.4016220341911392, 0.271275394392385), + (-1e-4, 1.7420803447784388, -0.38180029468259247, 0.280199085645099), + (-1e-4, 1.5748002527689913, -0.25200194914153684, 0.280132185432842), + ] + ) + def test_x_near_zeta( + self, x, alpha, beta, expected + ): + """Test pdf for x near zeta. + + With S1 parametrization: x0 = x + zeta if alpha != 1 So, for x = 0, x0 + will be close to zeta. + + When case "x near zeta" is not handled properly and quad_eps is not + low enough: - pdf may be less than 0 - logpdf is nan + + The points from the parametrize block are found randomly so that PDF is + less than 0. + + Reference values taken from MATLAB + https://www.mathworks.com/help/stats/stable-distribution.html + """ + stats.levy_stable.quad_eps = 1.2e-11 + + assert_almost_equal( + stats.levy_stable.pdf(x, alpha=alpha, beta=beta), + expected, + ) + + +class TestArrayArgument: # test for ticket:992 + def setup_method(self): + np.random.seed(1234) + + def test_noexception(self): + rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), + size=(10, 5)) + assert_equal(rvs.shape, (10, 5)) + + +class TestDocstring: + def test_docstrings(self): + # See ticket #761 + if stats.rayleigh.__doc__ is not None: + assert_("rayleigh" in stats.rayleigh.__doc__.lower()) + if stats.bernoulli.__doc__ is not None: + assert_("bernoulli" in stats.bernoulli.__doc__.lower()) + + def test_no_name_arg(self): + # If name is not given, construction shouldn't fail. See #1508. + stats.rv_continuous() + stats.rv_discrete() + + +def test_args_reduce(): + a = array([1, 3, 2, 1, 2, 3, 3]) + b, c = argsreduce(a > 1, a, 2) + + assert_array_equal(b, [3, 2, 2, 3, 3]) + assert_array_equal(c, [2]) + + b, c = argsreduce(2 > 1, a, 2) + assert_array_equal(b, a) + assert_array_equal(c, [2] * np.size(a)) + + b, c = argsreduce(a > 0, a, 2) + assert_array_equal(b, a) + assert_array_equal(c, [2] * np.size(a)) + + +class TestFitMethod: + skip = ['ncf', 'ksone', 'kstwo'] + + def setup_method(self): + np.random.seed(1234) + + # skip these b/c deprecated, or only loc and scale arguments + fitSkipNonFinite = ['expon', 'norm', 'uniform'] + + @pytest.mark.parametrize('dist,args', distcont) + def test_fit_w_non_finite_data_values(self, dist, args): + """gh-10300""" + if dist in self.fitSkipNonFinite: + pytest.skip("%s fit known to fail or deprecated" % dist) + x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan]) + y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf]) + distfunc = getattr(stats, dist) + assert_raises(ValueError, distfunc.fit, x, fscale=1) + assert_raises(ValueError, distfunc.fit, y, fscale=1) + + def test_fix_fit_2args_lognorm(self): + # Regression test for #1551. + np.random.seed(12345) + with np.errstate(all='ignore'): + x = stats.lognorm.rvs(0.25, 0., 20.0, size=20) + expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean()) + assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)), + [expected_shape, 0, 20], atol=1e-8) + + def test_fix_fit_norm(self): + x = np.arange(1, 6) + + loc, scale = stats.norm.fit(x) + assert_almost_equal(loc, 3) + assert_almost_equal(scale, np.sqrt(2)) + + loc, scale = stats.norm.fit(x, floc=2) + assert_equal(loc, 2) + assert_equal(scale, np.sqrt(3)) + + loc, scale = stats.norm.fit(x, fscale=2) + assert_almost_equal(loc, 3) + assert_equal(scale, 2) + + def test_fix_fit_gamma(self): + x = np.arange(1, 6) + meanlog = np.log(x).mean() + + # A basic test of gamma.fit with floc=0. + floc = 0 + a, loc, scale = stats.gamma.fit(x, floc=floc) + s = np.log(x.mean()) - meanlog + assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5) + assert_equal(loc, floc) + assert_almost_equal(scale, x.mean()/a, decimal=8) + + # Regression tests for gh-2514. + # The problem was that if `floc=0` was given, any other fixed + # parameters were ignored. + f0 = 1 + floc = 0 + a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc) + assert_equal(a, f0) + assert_equal(loc, floc) + assert_almost_equal(scale, x.mean()/a, decimal=8) + + f0 = 2 + floc = 0 + a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc) + assert_equal(a, f0) + assert_equal(loc, floc) + assert_almost_equal(scale, x.mean()/a, decimal=8) + + # loc and scale fixed. + floc = 0 + fscale = 2 + a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale) + assert_equal(loc, floc) + assert_equal(scale, fscale) + c = meanlog - np.log(fscale) + assert_almost_equal(special.digamma(a), c) + + def test_fix_fit_beta(self): + # Test beta.fit when both floc and fscale are given. + + def mlefunc(a, b, x): + # Zeros of this function are critical points of + # the maximum likelihood function. + n = len(x) + s1 = np.log(x).sum() + s2 = np.log(1-x).sum() + psiab = special.psi(a + b) + func = [s1 - n * (-psiab + special.psi(a)), + s2 - n * (-psiab + special.psi(b))] + return func + + # Basic test with floc and fscale given. + x = np.array([0.125, 0.25, 0.5]) + a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1) + assert_equal(loc, 0) + assert_equal(scale, 1) + assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6) + + # Basic test with f0, floc and fscale given. + # This is also a regression test for gh-2514. + x = np.array([0.125, 0.25, 0.5]) + a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1) + assert_equal(a, 2) + assert_equal(loc, 0) + assert_equal(scale, 1) + da, db = mlefunc(a, b, x) + assert_allclose(db, 0, atol=1e-5) + + # Same floc and fscale values as above, but reverse the data + # and fix b (f1). + x2 = 1 - x + a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1) + assert_equal(b2, 2) + assert_equal(loc2, 0) + assert_equal(scale2, 1) + da, db = mlefunc(a2, b2, x2) + assert_allclose(da, 0, atol=1e-5) + # a2 of this test should equal b from above. + assert_almost_equal(a2, b) + + # Check for detection of data out of bounds when floc and fscale + # are given. + assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1) + y = np.array([0, .5, 1]) + assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1) + assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2) + assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2) + + # Check that attempting to fix all the parameters raises a ValueError. + assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1, + floc=2, fscale=3) + + def test_expon_fit(self): + x = np.array([2, 2, 4, 4, 4, 4, 4, 8]) + + loc, scale = stats.expon.fit(x) + assert_equal(loc, 2) # x.min() + assert_equal(scale, 2) # x.mean() - x.min() + + loc, scale = stats.expon.fit(x, fscale=3) + assert_equal(loc, 2) # x.min() + assert_equal(scale, 3) # fscale + + loc, scale = stats.expon.fit(x, floc=0) + assert_equal(loc, 0) # floc + assert_equal(scale, 4) # x.mean() - loc + + def test_lognorm_fit(self): + x = np.array([1.5, 3, 10, 15, 23, 59]) + lnxm1 = np.log(x - 1) + + shape, loc, scale = stats.lognorm.fit(x, floc=1) + assert_allclose(shape, lnxm1.std(), rtol=1e-12) + assert_equal(loc, 1) + assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12) + + shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6) + assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()), + rtol=1e-12) + assert_equal(loc, 1) + assert_equal(scale, 6) + + shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75) + assert_equal(shape, 0.75) + assert_equal(loc, 1) + assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12) + + def test_uniform_fit(self): + x = np.array([1.0, 1.1, 1.2, 9.0]) + + loc, scale = stats.uniform.fit(x) + assert_equal(loc, x.min()) + assert_equal(scale, np.ptp(x)) + + loc, scale = stats.uniform.fit(x, floc=0) + assert_equal(loc, 0) + assert_equal(scale, x.max()) + + loc, scale = stats.uniform.fit(x, fscale=10) + assert_equal(loc, 0) + assert_equal(scale, 10) + + assert_raises(ValueError, stats.uniform.fit, x, floc=2.0) + assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0) + + @pytest.mark.slow + @pytest.mark.parametrize("method", ["MLE", "MM"]) + def test_fshapes(self, method): + # take a beta distribution, with shapes='a, b', and make sure that + # fa is equivalent to f0, and fb is equivalent to f1 + a, b = 3., 4. + x = stats.beta.rvs(a, b, size=100, random_state=1234) + res_1 = stats.beta.fit(x, f0=3., method=method) + res_2 = stats.beta.fit(x, fa=3., method=method) + assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12) + + res_2 = stats.beta.fit(x, fix_a=3., method=method) + assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12) + + res_3 = stats.beta.fit(x, f1=4., method=method) + res_4 = stats.beta.fit(x, fb=4., method=method) + assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12) + + res_4 = stats.beta.fit(x, fix_b=4., method=method) + assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12) + + # cannot specify both positional and named args at the same time + assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2, method=method) + + # check that attempting to fix all parameters raises a ValueError + assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1, + floc=2, fscale=3, method=method) + + # check that specifying floc, fscale and fshapes works for + # beta and gamma which override the generic fit method + res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1, method=method) + aa, bb, ll, ss = res_5 + assert_equal([aa, ll, ss], [3., 0, 1]) + + # gamma distribution + a = 3. + data = stats.gamma.rvs(a, size=100) + aa, ll, ss = stats.gamma.fit(data, fa=a, method=method) + assert_equal(aa, a) + + @pytest.mark.parametrize("method", ["MLE", "MM"]) + def test_extra_params(self, method): + # unknown parameters should raise rather than be silently ignored + dist = stats.exponnorm + data = dist.rvs(K=2, size=100) + dct = dict(enikibeniki=-101) + assert_raises(TypeError, dist.fit, data, **dct, method=method) + + +class TestFrozen: + def setup_method(self): + np.random.seed(1234) + + # Test that a frozen distribution gives the same results as the original + # object. + # + # Only tested for the normal distribution (with loc and scale specified) + # and for the gamma distribution (with a shape parameter specified). + def test_norm(self): + dist = stats.norm + frozen = stats.norm(loc=10.0, scale=3.0) + + result_f = frozen.pdf(20.0) + result = dist.pdf(20.0, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.cdf(20.0) + result = dist.cdf(20.0, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.ppf(0.25) + result = dist.ppf(0.25, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.isf(0.25) + result = dist.isf(0.25, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.sf(10.0) + result = dist.sf(10.0, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.median() + result = dist.median(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.mean() + result = dist.mean(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.var() + result = dist.var(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.std() + result = dist.std(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.entropy() + result = dist.entropy(loc=10.0, scale=3.0) + assert_equal(result_f, result) + + result_f = frozen.moment(2) + result = dist.moment(2, loc=10.0, scale=3.0) + assert_equal(result_f, result) + + assert_equal(frozen.a, dist.a) + assert_equal(frozen.b, dist.b) + + def test_gamma(self): + a = 2.0 + dist = stats.gamma + frozen = stats.gamma(a) + + result_f = frozen.pdf(20.0) + result = dist.pdf(20.0, a) + assert_equal(result_f, result) + + result_f = frozen.cdf(20.0) + result = dist.cdf(20.0, a) + assert_equal(result_f, result) + + result_f = frozen.ppf(0.25) + result = dist.ppf(0.25, a) + assert_equal(result_f, result) + + result_f = frozen.isf(0.25) + result = dist.isf(0.25, a) + assert_equal(result_f, result) + + result_f = frozen.sf(10.0) + result = dist.sf(10.0, a) + assert_equal(result_f, result) + + result_f = frozen.median() + result = dist.median(a) + assert_equal(result_f, result) + + result_f = frozen.mean() + result = dist.mean(a) + assert_equal(result_f, result) + + result_f = frozen.var() + result = dist.var(a) + assert_equal(result_f, result) + + result_f = frozen.std() + result = dist.std(a) + assert_equal(result_f, result) + + result_f = frozen.entropy() + result = dist.entropy(a) + assert_equal(result_f, result) + + result_f = frozen.moment(2) + result = dist.moment(2, a) + assert_equal(result_f, result) + + assert_equal(frozen.a, frozen.dist.a) + assert_equal(frozen.b, frozen.dist.b) + + def test_regression_ticket_1293(self): + # Create a frozen distribution. + frozen = stats.lognorm(1) + # Call one of its methods that does not take any keyword arguments. + m1 = frozen.moment(2) + # Now call a method that takes a keyword argument. + frozen.stats(moments='mvsk') + # Call moment(2) again. + # After calling stats(), the following was raising an exception. + # So this test passes if the following does not raise an exception. + m2 = frozen.moment(2) + # The following should also be true, of course. But it is not + # the focus of this test. + assert_equal(m1, m2) + + def test_ab(self): + # test that the support of a frozen distribution + # (i) remains frozen even if it changes for the original one + # (ii) is actually correct if the shape parameters are such that + # the values of [a, b] are not the default [0, inf] + # take a genpareto as an example where the support + # depends on the value of the shape parameter: + # for c > 0: a, b = 0, inf + # for c < 0: a, b = 0, -1/c + + c = -0.1 + rv = stats.genpareto(c=c) + a, b = rv.dist._get_support(c) + assert_equal([a, b], [0., 10.]) + + c = 0.1 + stats.genpareto.pdf(0, c=c) + assert_equal(rv.dist._get_support(c), [0, np.inf]) + + c = -0.1 + rv = stats.genpareto(c=c) + a, b = rv.dist._get_support(c) + assert_equal([a, b], [0., 10.]) + + c = 0.1 + stats.genpareto.pdf(0, c) # this should NOT change genpareto.b + assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c)) + + rv1 = stats.genpareto(c=0.1) + assert_(rv1.dist is not rv.dist) + + # c >= 0: a, b = [0, inf] + for c in [1., 0.]: + c = np.asarray(c) + rv = stats.genpareto(c=c) + a, b = rv.a, rv.b + assert_equal(a, 0.) + assert_(np.isposinf(b)) + + # c < 0: a=0, b=1/|c| + c = np.asarray(-2.) + a, b = stats.genpareto._get_support(c) + assert_allclose([a, b], [0., 0.5]) + + def test_rv_frozen_in_namespace(self): + # Regression test for gh-3522 + assert_(hasattr(stats.distributions, 'rv_frozen')) + + def test_random_state(self): + # only check that the random_state attribute exists, + frozen = stats.norm() + assert_(hasattr(frozen, 'random_state')) + + # ... that it can be set, + frozen.random_state = 42 + assert_equal(frozen.random_state.get_state(), + np.random.RandomState(42).get_state()) + + # ... and that .rvs method accepts it as an argument + rndm = np.random.RandomState(1234) + frozen.rvs(size=8, random_state=rndm) + + def test_pickling(self): + # test that a frozen instance pickles and unpickles + # (this method is a clone of common_tests.check_pickling) + beta = stats.beta(2.3098496451481823, 0.62687954300963677) + poiss = stats.poisson(3.) + sample = stats.rv_discrete(values=([0, 1, 2, 3], + [0.1, 0.2, 0.3, 0.4])) + + for distfn in [beta, poiss, sample]: + distfn.random_state = 1234 + distfn.rvs(size=8) + s = pickle.dumps(distfn) + r0 = distfn.rvs(size=8) + + unpickled = pickle.loads(s) + r1 = unpickled.rvs(size=8) + assert_equal(r0, r1) + + # also smoke test some methods + medians = [distfn.ppf(0.5), unpickled.ppf(0.5)] + assert_equal(medians[0], medians[1]) + assert_equal(distfn.cdf(medians[0]), + unpickled.cdf(medians[1])) + + def test_expect(self): + # smoke test the expect method of the frozen distribution + # only take a gamma w/loc and scale and poisson with loc specified + def func(x): + return x + + gm = stats.gamma(a=2, loc=3, scale=4) + with np.errstate(invalid="ignore", divide="ignore"): + gm_val = gm.expect(func, lb=1, ub=2, conditional=True) + gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4, + lb=1, ub=2, conditional=True) + assert_allclose(gm_val, gamma_val) + + p = stats.poisson(3, loc=4) + p_val = p.expect(func) + poisson_val = stats.poisson.expect(func, args=(3,), loc=4) + assert_allclose(p_val, poisson_val) + + +class TestExpect: + # Test for expect method. + # + # Uses normal distribution and beta distribution for finite bounds, and + # hypergeom for discrete distribution with finite support + def test_norm(self): + v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2) + assert_almost_equal(v, 4, decimal=14) + + m = stats.norm.expect(lambda x: (x), loc=5, scale=2) + assert_almost_equal(m, 5, decimal=14) + + lb = stats.norm.ppf(0.05, loc=5, scale=2) + ub = stats.norm.ppf(0.95, loc=5, scale=2) + prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub) + assert_almost_equal(prob90, 0.9, decimal=14) + + prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub, + conditional=True) + assert_almost_equal(prob90c, 1., decimal=14) + + def test_beta(self): + # case with finite support interval + v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5), + loc=5, scale=2) + assert_almost_equal(v, 1./18., decimal=13) + + m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.) + assert_almost_equal(m, 19/3., decimal=13) + + ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2) + lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2) + prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5., + scale=2., lb=lb, ub=ub, conditional=False) + assert_almost_equal(prob90, 0.9, decimal=13) + + prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5, + scale=2, lb=lb, ub=ub, conditional=True) + assert_almost_equal(prob90c, 1., decimal=13) + + def test_hypergeom(self): + # test case with finite bounds + + # without specifying bounds + m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.) + m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.) + assert_almost_equal(m, m_true, decimal=13) + + v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8), + loc=5.) + assert_almost_equal(v, v_true, decimal=14) + + # with bounds, bounds equal to shifted support + v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, + args=(20, 10, 8), + loc=5., lb=5, ub=13) + assert_almost_equal(v_bounds, v_true, decimal=14) + + # drop boundary points + prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum() + prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), + loc=5., lb=6, ub=12) + assert_almost_equal(prob_bounds, prob_true, decimal=13) + + # conditional + prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5., + lb=6, ub=12, conditional=True) + assert_almost_equal(prob_bc, 1, decimal=14) + + # check simple integral + prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), + lb=0, ub=8) + assert_almost_equal(prob_b, 1, decimal=13) + + def test_poisson(self): + # poisson, use lower bound only + prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3, + conditional=False) + prob_b_true = 1-stats.poisson.cdf(2, 2) + assert_almost_equal(prob_bounds, prob_b_true, decimal=14) + + prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2, + conditional=True) + assert_almost_equal(prob_lb, 1, decimal=14) + + def test_genhalflogistic(self): + # genhalflogistic, changes upper bound of support in _argcheck + # regression test for gh-2622 + halflog = stats.genhalflogistic + # check consistency when calling expect twice with the same input + res1 = halflog.expect(args=(1.5,)) + halflog.expect(args=(0.5,)) + res2 = halflog.expect(args=(1.5,)) + assert_almost_equal(res1, res2, decimal=14) + + def test_rice_overflow(self): + # rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows + # check that using i0e fixes it + assert_(np.isfinite(stats.rice.pdf(999, 0.74))) + + assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,)))) + assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,)))) + assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,)))) + + def test_logser(self): + # test a discrete distribution with infinite support and loc + p, loc = 0.3, 3 + res_0 = stats.logser.expect(lambda k: k, args=(p,)) + # check against the correct answer (sum of a geom series) + assert_allclose(res_0, + p / (p - 1.) / np.log(1. - p), atol=1e-15) + + # now check it with `loc` + res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc) + assert_allclose(res_l, res_0 + loc, atol=1e-15) + + def test_skellam(self): + # Use a discrete distribution w/ bi-infinite support. Compute two first + # moments and compare to known values (cf skellam.stats) + p1, p2 = 18, 22 + m1 = stats.skellam.expect(lambda x: x, args=(p1, p2)) + m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2)) + assert_allclose(m1, p1 - p2, atol=1e-12) + assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12) + + def test_randint(self): + # Use a discrete distribution w/ parameter-dependent support, which + # is larger than the default chunksize + lo, hi = 0, 113 + res = stats.randint.expect(lambda x: x, (lo, hi)) + assert_allclose(res, + sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15) + + def test_zipf(self): + # Test that there is no infinite loop even if the sum diverges + assert_warns(RuntimeWarning, stats.zipf.expect, + lambda x: x**2, (2,)) + + def test_discrete_kwds(self): + # check that discrete expect accepts keywords to control the summation + n0 = stats.poisson.expect(lambda x: 1, args=(2,)) + n1 = stats.poisson.expect(lambda x: 1, args=(2,), + maxcount=1001, chunksize=32, tolerance=1e-8) + assert_almost_equal(n0, n1, decimal=14) + + def test_moment(self): + # test the .moment() method: compute a higher moment and compare to + # a known value + def poiss_moment5(mu): + return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu + + for mu in [5, 7]: + m5 = stats.poisson.moment(5, mu) + assert_allclose(m5, poiss_moment5(mu), rtol=1e-10) + + def test_challenging_cases_gh8928(self): + # Several cases where `expect` failed to produce a correct result were + # reported in gh-8928. Check that these cases have been resolved. + assert_allclose(stats.norm.expect(loc=36, scale=1.0), 36) + assert_allclose(stats.norm.expect(loc=40, scale=1.0), 40) + assert_allclose(stats.norm.expect(loc=10, scale=0.1), 10) + assert_allclose(stats.gamma.expect(args=(148,)), 148) + assert_allclose(stats.logistic.expect(loc=85), 85) + + def test_lb_ub_gh15855(self): + # Make sure changes to `expect` made in gh15855 treat lb/ub correctly + dist = stats.uniform + ref = dist.mean(loc=10, scale=5) # 12.5 + # moment over whole distribution + assert_allclose(dist.expect(loc=10, scale=5), ref) + # moment over whole distribution, lb and ub outside of support + assert_allclose(dist.expect(loc=10, scale=5, lb=9, ub=16), ref) + # moment over 60% of distribution, [lb, ub] centered within support + assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=14), ref*0.6) + # moment over truncated distribution, essentially + assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=14, + conditional=True), ref) + # moment over 40% of distribution, [lb, ub] not centered within support + assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=13), 12*0.4) + # moment with lb > ub + assert_allclose(dist.expect(loc=10, scale=5, lb=13, ub=11), -12*0.4) + # moment with lb > ub, conditional + assert_allclose(dist.expect(loc=10, scale=5, lb=13, ub=11, + conditional=True), 12) + + +class TestNct: + def test_nc_parameter(self): + # Parameter values c<=0 were not enabled (gh-2402). + # For negative values c and for c=0 results of rv.cdf(0) below were nan + rv = stats.nct(5, 0) + assert_equal(rv.cdf(0), 0.5) + rv = stats.nct(5, -1) + assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10) + + def test_broadcasting(self): + res = stats.nct.pdf(5, np.arange(4, 7)[:, None], + np.linspace(0.1, 1, 4)) + expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997], + [0.00217142, 0.00395366, 0.00683888, 0.01126276], + [0.00153078, 0.00291093, 0.00525206, 0.00900815]]) + assert_allclose(res, expected, rtol=1e-5) + + def test_variance_gh_issue_2401(self): + # Computation of the variance of a non-central t-distribution resulted + # in a TypeError: ufunc 'isinf' not supported for the input types, + # and the inputs could not be safely coerced to any supported types + # according to the casting rule 'safe' + rv = stats.nct(4, 0) + assert_equal(rv.var(), 2.0) + + def test_nct_inf_moments(self): + # n-th moment of nct only exists for df > n + m, v, s, k = stats.nct.stats(df=0.9, nc=0.3, moments='mvsk') + assert_equal([m, v, s, k], [np.nan, np.nan, np.nan, np.nan]) + + m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk') + assert_(np.isfinite(m)) + assert_equal([v, s, k], [np.nan, np.nan, np.nan]) + + m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk') + assert_(np.isfinite([m, v, s]).all()) + assert_equal(k, np.nan) + + def test_nct_stats_large_df_values(self): + # previously gamma function was used which lost precision at df=345 + # cf. https://github.com/scipy/scipy/issues/12919 for details + nct_mean_df_1000 = stats.nct.mean(1000, 2) + nct_stats_df_1000 = stats.nct.stats(1000, 2) + # These expected values were computed with mpmath. They were also + # verified with the Wolfram Alpha expressions: + # Mean[NoncentralStudentTDistribution[1000, 2]] + # Var[NoncentralStudentTDistribution[1000, 2]] + expected_stats_df_1000 = [2.0015015641422464, 1.0040115288163005] + assert_allclose(nct_mean_df_1000, expected_stats_df_1000[0], + rtol=1e-10) + assert_allclose(nct_stats_df_1000, expected_stats_df_1000, + rtol=1e-10) + # and a bigger df value + nct_mean = stats.nct.mean(100000, 2) + nct_stats = stats.nct.stats(100000, 2) + # These expected values were computed with mpmath. + expected_stats = [2.0000150001562518, 1.0000400011500288] + assert_allclose(nct_mean, expected_stats[0], rtol=1e-10) + assert_allclose(nct_stats, expected_stats, rtol=1e-9) + + def test_cdf_large_nc(self): + # gh-17916 reported a crash with large `nc` values + assert_allclose(stats.nct.cdf(2, 2, float(2**16)), 0) + + +class TestRecipInvGauss: + + def test_pdf_endpoint(self): + p = stats.recipinvgauss.pdf(0, 0.6) + assert p == 0.0 + + def test_logpdf_endpoint(self): + logp = stats.recipinvgauss.logpdf(0, 0.6) + assert logp == -np.inf + + def test_cdf_small_x(self): + # The expected value was computer with mpmath: + # + # import mpmath + # + # mpmath.mp.dps = 100 + # + # def recipinvgauss_cdf_mp(x, mu): + # x = mpmath.mpf(x) + # mu = mpmath.mpf(mu) + # trm1 = 1/mu - x + # trm2 = 1/mu + x + # isqx = 1/mpmath.sqrt(x) + # return (mpmath.ncdf(-isqx*trm1) + # - mpmath.exp(2/mu)*mpmath.ncdf(-isqx*trm2)) + # + p = stats.recipinvgauss.cdf(0.05, 0.5) + expected = 6.590396159501331e-20 + assert_allclose(p, expected, rtol=1e-14) + + def test_sf_large_x(self): + # The expected value was computed with mpmath; see test_cdf_small. + p = stats.recipinvgauss.sf(80, 0.5) + expected = 2.699819200556787e-18 + assert_allclose(p, expected, 5e-15) + + +class TestRice: + def test_rice_zero_b(self): + # rice distribution should work with b=0, cf gh-2164 + x = [0.2, 1., 5.] + assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all()) + assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all()) + assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all()) + assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all()) + + q = [0.1, 0.1, 0.5, 0.9] + assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all()) + + mvsk = stats.rice.stats(0, moments='mvsk') + assert_(np.isfinite(mvsk).all()) + + # furthermore, pdf is continuous as b\to 0 + # rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2) + # see e.g. Abramovich & Stegun 9.6.7 & 9.6.10 + b = 1e-8 + assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b), + atol=b, rtol=0) + + def test_rice_rvs(self): + rvs = stats.rice.rvs + assert_equal(rvs(b=3.).size, 1) + assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5)) + + def test_rice_gh9836(self): + # test that gh-9836 is resolved; previously jumped to 1 at the end + + cdf = stats.rice.cdf(np.arange(10, 160, 10), np.arange(10, 160, 10)) + # Generated in R + # library(VGAM) + # options(digits=16) + # x = seq(10, 150, 10) + # print(price(x, sigma=1, vee=x)) + cdf_exp = [0.4800278103504522, 0.4900233218590353, 0.4933500379379548, + 0.4950128317658719, 0.4960103776798502, 0.4966753655438764, + 0.4971503395812474, 0.4975065620443196, 0.4977836197921638, + 0.4980052636649550, 0.4981866072661382, 0.4983377260666599, + 0.4984655952615694, 0.4985751970541413, 0.4986701850071265] + assert_allclose(cdf, cdf_exp) + + probabilities = np.arange(0.1, 1, 0.1) + ppf = stats.rice.ppf(probabilities, 500/4, scale=4) + # Generated in R + # library(VGAM) + # options(digits=16) + # p = seq(0.1, .9, by = .1) + # print(qrice(p, vee = 500, sigma = 4)) + ppf_exp = [494.8898762347361, 496.6495690858350, 497.9184315188069, + 499.0026277378915, 500.0159999146250, 501.0293721352668, + 502.1135684981884, 503.3824312270405, 505.1421247157822] + assert_allclose(ppf, ppf_exp) + + ppf = scipy.stats.rice.ppf(0.5, np.arange(10, 150, 10)) + # Generated in R + # library(VGAM) + # options(digits=16) + # b <- seq(10, 140, 10) + # print(qrice(0.5, vee = b, sigma = 1)) + ppf_exp = [10.04995862522287, 20.02499480078302, 30.01666512465732, + 40.01249934924363, 50.00999966676032, 60.00833314046875, + 70.00714273568241, 80.00624991862573, 90.00555549840364, + 100.00499995833597, 110.00454542324384, 120.00416664255323, + 130.00384613488120, 140.00357141338748] + assert_allclose(ppf, ppf_exp) + + +class TestErlang: + def setup_method(self): + np.random.seed(1234) + + def test_erlang_runtimewarning(self): + # erlang should generate a RuntimeWarning if a non-integer + # shape parameter is used. + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + + # The non-integer shape parameter 1.3 should trigger a + # RuntimeWarning + assert_raises(RuntimeWarning, + stats.erlang.rvs, 1.3, loc=0, scale=1, size=4) + + # Calling the fit method with `f0` set to an integer should + # *not* trigger a RuntimeWarning. It should return the same + # values as gamma.fit(...). + data = [0.5, 1.0, 2.0, 4.0] + result_erlang = stats.erlang.fit(data, f0=1) + result_gamma = stats.gamma.fit(data, f0=1) + assert_allclose(result_erlang, result_gamma, rtol=1e-3) + + def test_gh_pr_10949_argcheck(self): + assert_equal(stats.erlang.pdf(0.5, a=[1, -1]), + stats.gamma.pdf(0.5, a=[1, -1])) + + +class TestRayleigh: + def setup_method(self): + np.random.seed(987654321) + + # gh-6227 + def test_logpdf(self): + y = stats.rayleigh.logpdf(50) + assert_allclose(y, -1246.0879769945718) + + def test_logsf(self): + y = stats.rayleigh.logsf(50) + assert_allclose(y, -1250) + + @pytest.mark.parametrize("rvs_loc,rvs_scale", [(0.85373171, 0.86932204), + (0.20558821, 0.61621008)]) + def test_fit(self, rvs_loc, rvs_scale): + data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale) + + def scale_mle(data, floc): + return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5 + + # when `floc` is provided, `scale` is found with an analytical formula + scale_expect = scale_mle(data, rvs_loc) + loc, scale = stats.rayleigh.fit(data, floc=rvs_loc) + assert_equal(loc, rvs_loc) + assert_equal(scale, scale_expect) + + # when `fscale` is fixed, superclass fit is used to determine `loc`. + loc, scale = stats.rayleigh.fit(data, fscale=.6) + assert_equal(scale, .6) + + # with both parameters free, one dimensional optimization is done + # over a new function that takes into account the dependent relation + # of `scale` to `loc`. + loc, scale = stats.rayleigh.fit(data) + # test that `scale` is defined by its relation to `loc` + assert_equal(scale, scale_mle(data, loc)) + + @pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01], + [0.08464463, 0.12069025]]) + def test_fit_comparison_super_method(self, rvs_loc, rvs_scale): + # test that the objective function result of the analytical MLEs is + # less than or equal to that of the numerically optimized estimate + data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale) + _assert_less_or_close_loglike(stats.rayleigh, data) + + def test_fit_warnings(self): + assert_fit_warnings(stats.rayleigh) + + def test_fit_gh17088(self): + # `rayleigh.fit` could return a location that was inconsistent with + # the data. See gh-17088. + rng = np.random.default_rng(456) + loc, scale, size = 50, 600, 500 + rvs = stats.rayleigh.rvs(loc, scale, size=size, random_state=rng) + loc_fit, _ = stats.rayleigh.fit(rvs) + assert loc_fit < np.min(rvs) + loc_fit, scale_fit = stats.rayleigh.fit(rvs, fscale=scale) + assert loc_fit < np.min(rvs) + assert scale_fit == scale + + +class TestExponWeib: + + def test_pdf_logpdf(self): + # Regression test for gh-3508. + x = 0.1 + a = 1.0 + c = 100.0 + p = stats.exponweib.pdf(x, a, c) + logp = stats.exponweib.logpdf(x, a, c) + # Expected values were computed with mpmath. + assert_allclose([p, logp], + [1.0000000000000054e-97, -223.35075402042244]) + + def test_a_is_1(self): + # For issue gh-3508. + # Check that when a=1, the pdf and logpdf methods of exponweib are the + # same as those of weibull_min. + x = np.logspace(-4, -1, 4) + a = 1 + c = 100 + + p = stats.exponweib.pdf(x, a, c) + expected = stats.weibull_min.pdf(x, c) + assert_allclose(p, expected) + + logp = stats.exponweib.logpdf(x, a, c) + expected = stats.weibull_min.logpdf(x, c) + assert_allclose(logp, expected) + + def test_a_is_1_c_is_1(self): + # When a = 1 and c = 1, the distribution is exponential. + x = np.logspace(-8, 1, 10) + a = 1 + c = 1 + + p = stats.exponweib.pdf(x, a, c) + expected = stats.expon.pdf(x) + assert_allclose(p, expected) + + logp = stats.exponweib.logpdf(x, a, c) + expected = stats.expon.logpdf(x) + assert_allclose(logp, expected) + + # Reference values were computed with mpmath, e.g: + # + # from mpmath import mp + # + # def mp_sf(x, a, c): + # x = mp.mpf(x) + # a = mp.mpf(a) + # c = mp.mpf(c) + # return -mp.powm1(-mp.expm1(-x**c)), a) + # + # mp.dps = 100 + # print(float(mp_sf(1, 2.5, 0.75))) + # + # prints + # + # 0.6823127476985246 + # + @pytest.mark.parametrize( + 'x, a, c, ref', + [(1, 2.5, 0.75, 0.6823127476985246), + (50, 2.5, 0.75, 1.7056666054719663e-08), + (125, 2.5, 0.75, 1.4534393150714602e-16), + (250, 2.5, 0.75, 1.2391389689773512e-27), + (250, 0.03125, 0.75, 1.548923711221689e-29), + (3, 0.03125, 3.0, 5.873527551689983e-14), + (2e80, 10.0, 0.02, 2.9449084156902135e-17)] + ) + def test_sf(self, x, a, c, ref): + sf = stats.exponweib.sf(x, a, c) + assert_allclose(sf, ref, rtol=1e-14) + + # Reference values were computed with mpmath, e.g. + # + # from mpmath import mp + # + # def mp_isf(p, a, c): + # p = mp.mpf(p) + # a = mp.mpf(a) + # c = mp.mpf(c) + # return (-mp.log(-mp.expm1(mp.log1p(-p)/a)))**(1/c) + # + # mp.dps = 100 + # print(float(mp_isf(0.25, 2.5, 0.75))) + # + # prints + # + # 2.8946008178158924 + # + @pytest.mark.parametrize( + 'p, a, c, ref', + [(0.25, 2.5, 0.75, 2.8946008178158924), + (3e-16, 2.5, 0.75, 121.77966713102938), + (1e-12, 1, 2, 5.256521769756932), + (2e-13, 0.03125, 3, 2.953915059484589), + (5e-14, 10.0, 0.02, 7.57094886384687e+75)] + ) + def test_isf(self, p, a, c, ref): + isf = stats.exponweib.isf(p, a, c) + assert_allclose(isf, ref, rtol=5e-14) + + +class TestFatigueLife: + + def test_sf_tail(self): + # Expected value computed with mpmath: + # import mpmath + # mpmath.mp.dps = 80 + # x = mpmath.mpf(800.0) + # c = mpmath.mpf(2.5) + # s = float(1 - mpmath.ncdf(1/c * (mpmath.sqrt(x) + # - 1/mpmath.sqrt(x)))) + # print(s) + # Output: + # 6.593376447038406e-30 + s = stats.fatiguelife.sf(800.0, 2.5) + assert_allclose(s, 6.593376447038406e-30, rtol=1e-13) + + def test_isf_tail(self): + # See test_sf_tail for the mpmath code. + p = 6.593376447038406e-30 + q = stats.fatiguelife.isf(p, 2.5) + assert_allclose(q, 800.0, rtol=1e-13) + + +class TestWeibull: + + def test_logpdf(self): + # gh-6217 + y = stats.weibull_min.logpdf(0, 1) + assert_equal(y, 0) + + def test_with_maxima_distrib(self): + # Tests for weibull_min and weibull_max. + # The expected values were computed using the symbolic algebra + # program 'maxima' with the package 'distrib', which has + # 'pdf_weibull' and 'cdf_weibull'. The mapping between the + # scipy and maxima functions is as follows: + # ----------------------------------------------------------------- + # scipy maxima + # --------------------------------- ------------------------------ + # weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b) + # weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b)) + # weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b) + # weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b)) + # weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b) + # weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b)) + # + # weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b) + # weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b)) + # weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b) + # weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b)) + # weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b) + # weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b)) + # ----------------------------------------------------------------- + x = 1.5 + a = 2.0 + b = 3.0 + + # weibull_min + + p = stats.weibull_min.pdf(x, a, scale=b) + assert_allclose(p, np.exp(-0.25)/3) + + lp = stats.weibull_min.logpdf(x, a, scale=b) + assert_allclose(lp, -0.25 - np.log(3)) + + c = stats.weibull_min.cdf(x, a, scale=b) + assert_allclose(c, -special.expm1(-0.25)) + + lc = stats.weibull_min.logcdf(x, a, scale=b) + assert_allclose(lc, np.log(-special.expm1(-0.25))) + + s = stats.weibull_min.sf(x, a, scale=b) + assert_allclose(s, np.exp(-0.25)) + + ls = stats.weibull_min.logsf(x, a, scale=b) + assert_allclose(ls, -0.25) + + # Also test using a large value x, for which computing the survival + # function using the CDF would result in 0. + s = stats.weibull_min.sf(30, 2, scale=3) + assert_allclose(s, np.exp(-100)) + + ls = stats.weibull_min.logsf(30, 2, scale=3) + assert_allclose(ls, -100) + + # weibull_max + x = -1.5 + + p = stats.weibull_max.pdf(x, a, scale=b) + assert_allclose(p, np.exp(-0.25)/3) + + lp = stats.weibull_max.logpdf(x, a, scale=b) + assert_allclose(lp, -0.25 - np.log(3)) + + c = stats.weibull_max.cdf(x, a, scale=b) + assert_allclose(c, np.exp(-0.25)) + + lc = stats.weibull_max.logcdf(x, a, scale=b) + assert_allclose(lc, -0.25) + + s = stats.weibull_max.sf(x, a, scale=b) + assert_allclose(s, -special.expm1(-0.25)) + + ls = stats.weibull_max.logsf(x, a, scale=b) + assert_allclose(ls, np.log(-special.expm1(-0.25))) + + # Also test using a value of x close to 0, for which computing the + # survival function using the CDF would result in 0. + s = stats.weibull_max.sf(-1e-9, 2, scale=3) + assert_allclose(s, -special.expm1(-1/9000000000000000000)) + + ls = stats.weibull_max.logsf(-1e-9, 2, scale=3) + assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000))) + + @pytest.mark.parametrize('scale', [1.0, 0.1]) + def test_delta_cdf(self, scale): + # Expected value computed with mpmath: + # + # def weibull_min_sf(x, k, scale): + # x = mpmath.mpf(x) + # k = mpmath.mpf(k) + # scale =mpmath.mpf(scale) + # return mpmath.exp(-(x/scale)**k) + # + # >>> import mpmath + # >>> mpmath.mp.dps = 60 + # >>> sf1 = weibull_min_sf(7.5, 3, 1) + # >>> sf2 = weibull_min_sf(8.0, 3, 1) + # >>> float(sf1 - sf2) + # 6.053624060118734e-184 + # + delta = stats.weibull_min._delta_cdf(scale*7.5, scale*8, 3, + scale=scale) + assert_allclose(delta, 6.053624060118734e-184) + + def test_fit_min(self): + rng = np.random.default_rng(5985959307161735394) + + c, loc, scale = 2, 3.5, 0.5 # arbitrary, valid parameters + dist = stats.weibull_min(c, loc, scale) + rvs = dist.rvs(size=100, random_state=rng) + + # test that MLE still honors guesses and fixed parameters + c2, loc2, scale2 = stats.weibull_min.fit(rvs, 1.5, floc=3) + c3, loc3, scale3 = stats.weibull_min.fit(rvs, 1.6, floc=3) + assert loc2 == loc3 == 3 # fixed parameter is respected + assert c2 != c3 # different guess -> (slightly) different outcome + # quality of fit is tested elsewhere + + # test that MoM honors fixed parameters, accepts (but ignores) guesses + c4, loc4, scale4 = stats.weibull_min.fit(rvs, 3, fscale=3, method='mm') + assert scale4 == 3 + # because scale was fixed, only the mean and skewness will be matched + dist4 = stats.weibull_min(c4, loc4, scale4) + res = dist4.stats(moments='ms') + ref = np.mean(rvs), stats.skew(rvs) + assert_allclose(res, ref) + + # reference values were computed via mpmath + # from mpmath import mp + # def weibull_sf_mpmath(x, c): + # x = mp.mpf(x) + # c = mp.mpf(c) + # return float(mp.exp(-x**c)) + + @pytest.mark.parametrize('x, c, ref', [(50, 1, 1.9287498479639178e-22), + (1000, 0.8, + 8.131269637872743e-110)]) + def test_sf_isf(self, x, c, ref): + assert_allclose(stats.weibull_min.sf(x, c), ref, rtol=5e-14) + assert_allclose(stats.weibull_min.isf(ref, c), x, rtol=5e-14) + + +class TestDweibull: + def test_entropy(self): + # Test that dweibull entropy follows that of weibull_min. + # (Generic tests check that the dweibull entropy is consistent + # with its PDF. As for accuracy, dweibull entropy should be just + # as accurate as weibull_min entropy. Checks of accuracy against + # a reference need only be applied to the fundamental distribution - + # weibull_min.) + rng = np.random.default_rng(8486259129157041777) + c = 10**rng.normal(scale=100, size=10) + res = stats.dweibull.entropy(c) + ref = stats.weibull_min.entropy(c) - np.log(0.5) + assert_allclose(res, ref, rtol=1e-15) + + def test_sf(self): + # test that for positive values the dweibull survival function is half + # the weibull_min survival function + rng = np.random.default_rng(8486259129157041777) + c = 10**rng.normal(scale=1, size=10) + x = 10 * rng.uniform() + res = stats.dweibull.sf(x, c) + ref = 0.5 * stats.weibull_min.sf(x, c) + assert_allclose(res, ref, rtol=1e-15) + + +class TestTruncWeibull: + + def test_pdf_bounds(self): + # test bounds + y = stats.truncweibull_min.pdf([0.1, 2.0], 2.0, 0.11, 1.99) + assert_equal(y, [0.0, 0.0]) + + def test_logpdf(self): + y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, np.inf) + assert_equal(y, 0.0) + + # hand calculation + y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, 4.0) + assert_allclose(y, 0.14541345786885884) + + def test_ppf_bounds(self): + # test bounds + y = stats.truncweibull_min.ppf([0.0, 1.0], 2.0, 0.1, 2.0) + assert_equal(y, [0.1, 2.0]) + + def test_cdf_to_ppf(self): + q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.] + x = stats.truncweibull_min.ppf(q, 2., 0., 3.) + q_out = stats.truncweibull_min.cdf(x, 2., 0., 3.) + assert_allclose(q, q_out) + + def test_sf_to_isf(self): + q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.] + x = stats.truncweibull_min.isf(q, 2., 0., 3.) + q_out = stats.truncweibull_min.sf(x, 2., 0., 3.) + assert_allclose(q, q_out) + + def test_munp(self): + c = 2. + a = 1. + b = 3. + + def xnpdf(x, n): + return x**n*stats.truncweibull_min.pdf(x, c, a, b) + + m0 = stats.truncweibull_min.moment(0, c, a, b) + assert_equal(m0, 1.) + + m1 = stats.truncweibull_min.moment(1, c, a, b) + m1_expected, _ = quad(lambda x: xnpdf(x, 1), a, b) + assert_allclose(m1, m1_expected) + + m2 = stats.truncweibull_min.moment(2, c, a, b) + m2_expected, _ = quad(lambda x: xnpdf(x, 2), a, b) + assert_allclose(m2, m2_expected) + + m3 = stats.truncweibull_min.moment(3, c, a, b) + m3_expected, _ = quad(lambda x: xnpdf(x, 3), a, b) + assert_allclose(m3, m3_expected) + + m4 = stats.truncweibull_min.moment(4, c, a, b) + m4_expected, _ = quad(lambda x: xnpdf(x, 4), a, b) + assert_allclose(m4, m4_expected) + + def test_reference_values(self): + a = 1. + b = 3. + c = 2. + x_med = np.sqrt(1 - np.log(0.5 + np.exp(-(8. + np.log(2.))))) + + cdf = stats.truncweibull_min.cdf(x_med, c, a, b) + assert_allclose(cdf, 0.5) + + lc = stats.truncweibull_min.logcdf(x_med, c, a, b) + assert_allclose(lc, -np.log(2.)) + + ppf = stats.truncweibull_min.ppf(0.5, c, a, b) + assert_allclose(ppf, x_med) + + sf = stats.truncweibull_min.sf(x_med, c, a, b) + assert_allclose(sf, 0.5) + + ls = stats.truncweibull_min.logsf(x_med, c, a, b) + assert_allclose(ls, -np.log(2.)) + + isf = stats.truncweibull_min.isf(0.5, c, a, b) + assert_allclose(isf, x_med) + + def test_compare_weibull_min(self): + # Verify that the truncweibull_min distribution gives the same results + # as the original weibull_min + x = 1.5 + c = 2.0 + a = 0.0 + b = np.inf + scale = 3.0 + + p = stats.weibull_min.pdf(x, c, scale=scale) + p_trunc = stats.truncweibull_min.pdf(x, c, a, b, scale=scale) + assert_allclose(p, p_trunc) + + lp = stats.weibull_min.logpdf(x, c, scale=scale) + lp_trunc = stats.truncweibull_min.logpdf(x, c, a, b, scale=scale) + assert_allclose(lp, lp_trunc) + + cdf = stats.weibull_min.cdf(x, c, scale=scale) + cdf_trunc = stats.truncweibull_min.cdf(x, c, a, b, scale=scale) + assert_allclose(cdf, cdf_trunc) + + lc = stats.weibull_min.logcdf(x, c, scale=scale) + lc_trunc = stats.truncweibull_min.logcdf(x, c, a, b, scale=scale) + assert_allclose(lc, lc_trunc) + + s = stats.weibull_min.sf(x, c, scale=scale) + s_trunc = stats.truncweibull_min.sf(x, c, a, b, scale=scale) + assert_allclose(s, s_trunc) + + ls = stats.weibull_min.logsf(x, c, scale=scale) + ls_trunc = stats.truncweibull_min.logsf(x, c, a, b, scale=scale) + assert_allclose(ls, ls_trunc) + + # # Also test using a large value x, for which computing the survival + # # function using the CDF would result in 0. + s = stats.truncweibull_min.sf(30, 2, a, b, scale=3) + assert_allclose(s, np.exp(-100)) + + ls = stats.truncweibull_min.logsf(30, 2, a, b, scale=3) + assert_allclose(ls, -100) + + def test_compare_weibull_min2(self): + # Verify that the truncweibull_min distribution PDF and CDF results + # are the same as those calculated from truncating weibull_min + c, a, b = 2.5, 0.25, 1.25 + x = np.linspace(a, b, 100) + + pdf1 = stats.truncweibull_min.pdf(x, c, a, b) + cdf1 = stats.truncweibull_min.cdf(x, c, a, b) + + norm = stats.weibull_min.cdf(b, c) - stats.weibull_min.cdf(a, c) + pdf2 = stats.weibull_min.pdf(x, c) / norm + cdf2 = (stats.weibull_min.cdf(x, c) - stats.weibull_min.cdf(a, c))/norm + + np.testing.assert_allclose(pdf1, pdf2) + np.testing.assert_allclose(cdf1, cdf2) + + +class TestRdist: + def test_rdist_cdf_gh1285(self): + # check workaround in rdist._cdf for issue gh-1285. + distfn = stats.rdist + values = [0.001, 0.5, 0.999] + assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0), + values, decimal=5) + + def test_rdist_beta(self): + # rdist is a special case of stats.beta + x = np.linspace(-0.99, 0.99, 10) + c = 2.7 + assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2), + stats.rdist(c).pdf(x)) + + # reference values were computed via mpmath + # from mpmath import mp + # mp.dps = 200 + # def rdist_sf_mpmath(x, c): + # x = mp.mpf(x) + # c = mp.mpf(c) + # return float(mp.betainc(c/2, c/2, (x+1)/2, mp.one, regularized=True)) + @pytest.mark.parametrize( + "x, c, ref", + [ + (0.0001, 541, 0.49907251345565845), + (0.1, 241, 0.06000788166249205), + (0.5, 441, 1.0655898106047832e-29), + (0.8, 341, 6.025478373732215e-78), + ] + ) + def test_rdist_sf(self, x, c, ref): + assert_allclose(stats.rdist.sf(x, c), ref, rtol=5e-14) + + +class TestTrapezoid: + def test_reduces_to_triang(self): + modes = [0, 0.3, 0.5, 1] + for mode in modes: + x = [0, mode, 1] + assert_almost_equal(stats.trapezoid.pdf(x, mode, mode), + stats.triang.pdf(x, mode)) + assert_almost_equal(stats.trapezoid.cdf(x, mode, mode), + stats.triang.cdf(x, mode)) + + def test_reduces_to_uniform(self): + x = np.linspace(0, 1, 10) + assert_almost_equal(stats.trapezoid.pdf(x, 0, 1), stats.uniform.pdf(x)) + assert_almost_equal(stats.trapezoid.cdf(x, 0, 1), stats.uniform.cdf(x)) + + def test_cases(self): + # edge cases + assert_almost_equal(stats.trapezoid.pdf(0, 0, 0), 2) + assert_almost_equal(stats.trapezoid.pdf(1, 1, 1), 2) + assert_almost_equal(stats.trapezoid.pdf(0.5, 0, 0.8), + 1.11111111111111111) + assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 1.0), + 1.11111111111111111) + + # straightforward case + assert_almost_equal(stats.trapezoid.pdf(0.1, 0.2, 0.8), 0.625) + assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 0.8), 1.25) + assert_almost_equal(stats.trapezoid.pdf(0.9, 0.2, 0.8), 0.625) + + assert_almost_equal(stats.trapezoid.cdf(0.1, 0.2, 0.8), 0.03125) + assert_almost_equal(stats.trapezoid.cdf(0.2, 0.2, 0.8), 0.125) + assert_almost_equal(stats.trapezoid.cdf(0.5, 0.2, 0.8), 0.5) + assert_almost_equal(stats.trapezoid.cdf(0.9, 0.2, 0.8), 0.96875) + assert_almost_equal(stats.trapezoid.cdf(1.0, 0.2, 0.8), 1.0) + + def test_moments_and_entropy(self): + # issue #11795: improve precision of trapezoid stats + # Apply formulas from Wikipedia for the following parameters: + a, b, c, d = -3, -1, 2, 3 # => 1/3, 5/6, -3, 6 + p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a + h = 2 / (d+c-b-a) + + def moment(n): + return (h * ((d**(n+2) - c**(n+2)) / (d-c) + - (b**(n+2) - a**(n+2)) / (b-a)) / + (n+1) / (n+2)) + + mean = moment(1) + var = moment(2) - mean**2 + entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a)) + assert_almost_equal(stats.trapezoid.mean(p1, p2, loc, scale), + mean, decimal=13) + assert_almost_equal(stats.trapezoid.var(p1, p2, loc, scale), + var, decimal=13) + assert_almost_equal(stats.trapezoid.entropy(p1, p2, loc, scale), + entropy, decimal=13) + + # Check boundary cases where scipy d=0 or d=1. + assert_almost_equal(stats.trapezoid.mean(0, 0, -3, 6), -1, decimal=13) + assert_almost_equal(stats.trapezoid.mean(0, 1, -3, 6), 0, decimal=13) + assert_almost_equal(stats.trapezoid.var(0, 1, -3, 6), 3, decimal=13) + + def test_trapezoid_vect(self): + # test that array-valued shapes and arguments are handled + c = np.array([0.1, 0.2, 0.3]) + d = np.array([0.5, 0.6])[:, None] + x = np.array([0.15, 0.25, 0.9]) + v = stats.trapezoid.pdf(x, c, d) + + cc, dd, xx = np.broadcast_arrays(c, d, x) + + res = np.empty(xx.size, dtype=xx.dtype) + ind = np.arange(xx.size) + for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()): + res[i] = stats.trapezoid.pdf(x1, c1, d1) + + assert_allclose(v, res.reshape(v.shape), atol=1e-15) + + # Check that the stats() method supports vector arguments. + v = np.asarray(stats.trapezoid.stats(c, d, moments="mvsk")) + cc, dd = np.broadcast_arrays(c, d) + res = np.empty((cc.size, 4)) # 4 stats returned per value + ind = np.arange(cc.size) + for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()): + res[i] = stats.trapezoid.stats(c1, d1, moments="mvsk") + + assert_allclose(v, res.T.reshape(v.shape), atol=1e-15) + + def test_trapz(self): + # Basic test for alias + x = np.linspace(0, 1, 10) + assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x)) + + +class TestTriang: + def test_edge_cases(self): + with np.errstate(all='raise'): + assert_equal(stats.triang.pdf(0, 0), 2.) + assert_equal(stats.triang.pdf(0.5, 0), 1.) + assert_equal(stats.triang.pdf(1, 0), 0.) + + assert_equal(stats.triang.pdf(0, 1), 0) + assert_equal(stats.triang.pdf(0.5, 1), 1.) + assert_equal(stats.triang.pdf(1, 1), 2) + + assert_equal(stats.triang.cdf(0., 0.), 0.) + assert_equal(stats.triang.cdf(0.5, 0.), 0.75) + assert_equal(stats.triang.cdf(1.0, 0.), 1.0) + + assert_equal(stats.triang.cdf(0., 1.), 0.) + assert_equal(stats.triang.cdf(0.5, 1.), 0.25) + assert_equal(stats.triang.cdf(1., 1.), 1) + + +class TestMaxwell: + + # reference values were computed with wolfram alpha + # erfc(x/sqrt(2)) + sqrt(2/pi) * x * e^(-x^2/2) + + @pytest.mark.parametrize("x, ref", + [(20, 2.2138865931011177e-86), + (0.01, 0.999999734046458435)]) + def test_sf(self, x, ref): + assert_allclose(stats.maxwell.sf(x), ref, rtol=1e-14) + + # reference values were computed with wolfram alpha + # sqrt(2) * sqrt(Q^(-1)(3/2, q)) + + @pytest.mark.parametrize("q, ref", + [(0.001, 4.033142223656157022), + (0.9999847412109375, 0.0385743284050381), + (2**-55, 8.95564974719481)]) + def test_isf(self, q, ref): + assert_allclose(stats.maxwell.isf(q), ref, rtol=1e-15) + + +class TestMielke: + def test_moments(self): + k, s = 4.642, 0.597 + # n-th moment exists only if n < s + assert_equal(stats.mielke(k, s).moment(1), np.inf) + assert_equal(stats.mielke(k, 1.0).moment(1), np.inf) + assert_(np.isfinite(stats.mielke(k, 1.01).moment(1))) + + def test_burr_equivalence(self): + x = np.linspace(0.01, 100, 50) + k, s = 2.45, 5.32 + assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s)) + + +class TestBurr: + def test_endpoints_7491(self): + # gh-7491 + # Compute the pdf at the left endpoint dst.a. + data = [ + [stats.fisk, (1,), 1], + [stats.burr, (0.5, 2), 1], + [stats.burr, (1, 1), 1], + [stats.burr, (2, 0.5), 1], + [stats.burr12, (1, 0.5), 0.5], + [stats.burr12, (1, 1), 1.0], + [stats.burr12, (1, 2), 2.0]] + + ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data] + correct = [_correct_ for _f, _args, _correct_ in data] + assert_array_almost_equal(ans, correct) + + ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data] + correct = [np.log(_correct_) for _f, _args, _correct_ in data] + assert_array_almost_equal(ans, correct) + + def test_burr_stats_9544(self): + # gh-9544. Test from gh-9978 + c, d = 5.0, 3 + mean, variance = stats.burr(c, d).stats() + # mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263... + # var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 - + # (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2 + mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643 + assert_allclose(mean, mean_hc) + assert_allclose(variance, variance_hc) + + def test_burr_nan_mean_var_9544(self): + # gh-9544. Test from gh-9978 + c, d = 0.5, 3 + mean, variance = stats.burr(c, d).stats() + assert_(np.isnan(mean)) + assert_(np.isnan(variance)) + c, d = 1.5, 3 + mean, variance = stats.burr(c, d).stats() + assert_(np.isfinite(mean)) + assert_(np.isnan(variance)) + + c, d = 0.5, 3 + e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d) + assert_(np.isnan(e1)) + assert_(np.isnan(e2)) + assert_(np.isnan(e3)) + assert_(np.isnan(e4)) + c, d = 1.5, 3 + e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d) + assert_(np.isfinite(e1)) + assert_(np.isnan(e2)) + assert_(np.isnan(e3)) + assert_(np.isnan(e4)) + c, d = 2.5, 3 + e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d) + assert_(np.isfinite(e1)) + assert_(np.isfinite(e2)) + assert_(np.isnan(e3)) + assert_(np.isnan(e4)) + c, d = 3.5, 3 + e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d) + assert_(np.isfinite(e1)) + assert_(np.isfinite(e2)) + assert_(np.isfinite(e3)) + assert_(np.isnan(e4)) + c, d = 4.5, 3 + e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d) + assert_(np.isfinite(e1)) + assert_(np.isfinite(e2)) + assert_(np.isfinite(e3)) + assert_(np.isfinite(e4)) + + def test_burr_isf(self): + # reference values were computed via the reference distribution, e.g. + # mp.dps = 100 + # Burr(c=5, d=3).isf([0.1, 1e-10, 1e-20, 1e-40]) + c, d = 5.0, 3.0 + q = [0.1, 1e-10, 1e-20, 1e-40] + ref = [1.9469686558286508, 124.57309395989076, 12457.309396155173, + 124573093.96155174] + assert_allclose(stats.burr.isf(q, c, d), ref, rtol=1e-14) + + +class TestBurr12: + + @pytest.mark.parametrize('scale, expected', + [(1.0, 2.3283064359965952e-170), + (3.5, 5.987114417447875e-153)]) + def test_delta_cdf(self, scale, expected): + # Expected value computed with mpmath: + # + # def burr12sf(x, c, d, scale): + # x = mpmath.mpf(x) + # c = mpmath.mpf(c) + # d = mpmath.mpf(d) + # scale = mpmath.mpf(scale) + # return (mpmath.mp.one + (x/scale)**c)**(-d) + # + # >>> import mpmath + # >>> mpmath.mp.dps = 60 + # >>> float(burr12sf(2e5, 4, 8, 1) - burr12sf(4e5, 4, 8, 1)) + # 2.3283064359965952e-170 + # >>> float(burr12sf(2e5, 4, 8, 3.5) - burr12sf(4e5, 4, 8, 3.5)) + # 5.987114417447875e-153 + # + delta = stats.burr12._delta_cdf(2e5, 4e5, 4, 8, scale=scale) + assert_allclose(delta, expected, rtol=1e-13) + + def test_moments_edge(self): + # gh-18838 reported that burr12 moments could be invalid; see above. + # Check that this is resolved in an edge case where c*d == n, and + # compare the results against those produced by Mathematica, e.g. + # `SinghMaddalaDistribution[2, 2, 1]` at Wolfram Alpha. + c, d = 2, 2 + mean = np.pi/4 + var = 1 - np.pi**2/16 + skew = np.pi**3/(32*var**1.5) + kurtosis = np.nan + ref = [mean, var, skew, kurtosis] + res = stats.burr12(c, d).stats('mvsk') + assert_allclose(res, ref, rtol=1e-14) + + +class TestStudentizedRange: + # For alpha = .05, .01, and .001, and for each value of + # v = [1, 3, 10, 20, 120, inf], a Q was picked from each table for + # k = [2, 8, 14, 20]. + + # these arrays are written with `k` as column, and `v` as rows. + # Q values are taken from table 3: + # https://www.jstor.org/stable/2237810 + q05 = [17.97, 45.40, 54.33, 59.56, + 4.501, 8.853, 10.35, 11.24, + 3.151, 5.305, 6.028, 6.467, + 2.950, 4.768, 5.357, 5.714, + 2.800, 4.363, 4.842, 5.126, + 2.772, 4.286, 4.743, 5.012] + q01 = [90.03, 227.2, 271.8, 298.0, + 8.261, 15.64, 18.22, 19.77, + 4.482, 6.875, 7.712, 8.226, + 4.024, 5.839, 6.450, 6.823, + 3.702, 5.118, 5.562, 5.827, + 3.643, 4.987, 5.400, 5.645] + q001 = [900.3, 2272, 2718, 2980, + 18.28, 34.12, 39.69, 43.05, + 6.487, 9.352, 10.39, 11.03, + 5.444, 7.313, 7.966, 8.370, + 4.772, 6.039, 6.448, 6.695, + 4.654, 5.823, 6.191, 6.411] + qs = np.concatenate((q05, q01, q001)) + ps = [.95, .99, .999] + vs = [1, 3, 10, 20, 120, np.inf] + ks = [2, 8, 14, 20] + + data = list(zip(product(ps, vs, ks), qs)) + + # A small selection of large-v cases generated with R's `ptukey` + # Each case is in the format (q, k, v, r_result) + r_data = [ + (0.1, 3, 9001, 0.002752818526842), + (1, 10, 1000, 0.000526142388912), + (1, 3, np.inf, 0.240712641229283), + (4, 3, np.inf, 0.987012338626815), + (1, 10, np.inf, 0.000519869467083), + ] + + def test_cdf_against_tables(self): + for pvk, q in self.data: + p_expected, v, k = pvk + res_p = stats.studentized_range.cdf(q, k, v) + assert_allclose(res_p, p_expected, rtol=1e-4) + + @pytest.mark.slow + def test_ppf_against_tables(self): + for pvk, q_expected in self.data: + p, v, k = pvk + res_q = stats.studentized_range.ppf(p, k, v) + assert_allclose(res_q, q_expected, rtol=5e-4) + + path_prefix = os.path.dirname(__file__) + relative_path = "data/studentized_range_mpmath_ref.json" + with open(os.path.join(path_prefix, relative_path)) as file: + pregenerated_data = json.load(file) + + @pytest.mark.parametrize("case_result", pregenerated_data["cdf_data"]) + def test_cdf_against_mp(self, case_result): + src_case = case_result["src_case"] + mp_result = case_result["mp_result"] + qkv = src_case["q"], src_case["k"], src_case["v"] + res = stats.studentized_range.cdf(*qkv) + + assert_allclose(res, mp_result, + atol=src_case["expected_atol"], + rtol=src_case["expected_rtol"]) + + @pytest.mark.parametrize("case_result", pregenerated_data["pdf_data"]) + def test_pdf_against_mp(self, case_result): + src_case = case_result["src_case"] + mp_result = case_result["mp_result"] + qkv = src_case["q"], src_case["k"], src_case["v"] + res = stats.studentized_range.pdf(*qkv) + + assert_allclose(res, mp_result, + atol=src_case["expected_atol"], + rtol=src_case["expected_rtol"]) + + @pytest.mark.slow + @pytest.mark.xfail_on_32bit("intermittent RuntimeWarning: invalid value.") + @pytest.mark.parametrize("case_result", pregenerated_data["moment_data"]) + def test_moment_against_mp(self, case_result): + src_case = case_result["src_case"] + mp_result = case_result["mp_result"] + mkv = src_case["m"], src_case["k"], src_case["v"] + + # Silence invalid value encountered warnings. Actual problems will be + # caught by the result comparison. + with np.errstate(invalid='ignore'): + res = stats.studentized_range.moment(*mkv) + + assert_allclose(res, mp_result, + atol=src_case["expected_atol"], + rtol=src_case["expected_rtol"]) + + def test_pdf_integration(self): + k, v = 3, 10 + # Test whether PDF integration is 1 like it should be. + res = quad(stats.studentized_range.pdf, 0, np.inf, args=(k, v)) + assert_allclose(res[0], 1) + + @pytest.mark.xslow + def test_pdf_against_cdf(self): + k, v = 3, 10 + + # Test whether the integrated PDF matches the CDF using cumulative + # integration. Use a small step size to reduce error due to the + # summation. This is slow, but tests the results well. + x = np.arange(0, 10, step=0.01) + + y_cdf = stats.studentized_range.cdf(x, k, v)[1:] + y_pdf_raw = stats.studentized_range.pdf(x, k, v) + y_pdf_cumulative = cumulative_trapezoid(y_pdf_raw, x) + + # Because of error caused by the summation, use a relatively large rtol + assert_allclose(y_pdf_cumulative, y_cdf, rtol=1e-4) + + @pytest.mark.parametrize("r_case_result", r_data) + def test_cdf_against_r(self, r_case_result): + # Test large `v` values using R + q, k, v, r_res = r_case_result + with np.errstate(invalid='ignore'): + res = stats.studentized_range.cdf(q, k, v) + assert_allclose(res, r_res) + + @pytest.mark.slow + @pytest.mark.xfail_on_32bit("intermittent RuntimeWarning: invalid value.") + def test_moment_vectorization(self): + # Test moment broadcasting. Calls `_munp` directly because + # `rv_continuous.moment` is broken at time of writing. See gh-12192 + + # Silence invalid value encountered warnings. Actual problems will be + # caught by the result comparison. + with np.errstate(invalid='ignore'): + m = stats.studentized_range._munp([1, 2], [4, 5], [10, 11]) + + assert_allclose(m.shape, (2,)) + + with pytest.raises(ValueError, match="...could not be broadcast..."): + stats.studentized_range._munp(1, [4, 5], [10, 11, 12]) + + @pytest.mark.xslow + def test_fitstart_valid(self): + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + # the integration warning message may differ + sup.filter(IntegrationWarning) + k, df, _, _ = stats.studentized_range._fitstart([1, 2, 3]) + assert_(stats.studentized_range._argcheck(k, df)) + + def test_infinite_df(self): + # Check that the CDF and PDF infinite and normal integrators + # roughly match for a high df case + res = stats.studentized_range.pdf(3, 10, np.inf) + res_finite = stats.studentized_range.pdf(3, 10, 99999) + assert_allclose(res, res_finite, atol=1e-4, rtol=1e-4) + + res = stats.studentized_range.cdf(3, 10, np.inf) + res_finite = stats.studentized_range.cdf(3, 10, 99999) + assert_allclose(res, res_finite, atol=1e-4, rtol=1e-4) + + def test_df_cutoff(self): + # Test that the CDF and PDF properly switch integrators at df=100,000. + # The infinite integrator should be different enough that it fails + # an allclose assertion. Also sanity check that using the same + # integrator does pass the allclose with a 1-df difference, which + # should be tiny. + + res = stats.studentized_range.pdf(3, 10, 100000) + res_finite = stats.studentized_range.pdf(3, 10, 99999) + res_sanity = stats.studentized_range.pdf(3, 10, 99998) + assert_raises(AssertionError, assert_allclose, res, res_finite, + atol=1e-6, rtol=1e-6) + assert_allclose(res_finite, res_sanity, atol=1e-6, rtol=1e-6) + + res = stats.studentized_range.cdf(3, 10, 100000) + res_finite = stats.studentized_range.cdf(3, 10, 99999) + res_sanity = stats.studentized_range.cdf(3, 10, 99998) + assert_raises(AssertionError, assert_allclose, res, res_finite, + atol=1e-6, rtol=1e-6) + assert_allclose(res_finite, res_sanity, atol=1e-6, rtol=1e-6) + + def test_clipping(self): + # The result of this computation was -9.9253938401489e-14 on some + # systems. The correct result is very nearly zero, but should not be + # negative. + q, k, v = 34.6413996195345746, 3, 339 + p = stats.studentized_range.sf(q, k, v) + assert_allclose(p, 0, atol=1e-10) + assert p >= 0 + + +def test_540_567(): + # test for nan returned in tickets 540, 567 + assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126, + decimal=10, err_msg='test_540_567') + assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846, + decimal=10, err_msg='test_540_567') + assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309, + scale=0.204423758009), + 0.98353464004309321, + decimal=10, err_msg='test_540_567') + + +def test_regression_ticket_1326(): + # adjust to avoid nan with 0*log(0) + assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14) + + +def test_regression_tukey_lambda(): + # Make sure that Tukey-Lambda distribution correctly handles + # non-positive lambdas. + x = np.linspace(-5.0, 5.0, 101) + + with np.errstate(divide='ignore'): + for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]: + p = stats.tukeylambda.pdf(x, lam) + assert_((p != 0.0).all()) + assert_(~np.isnan(p).all()) + + lam = np.array([[-1.0], [0.0], [2.0]]) + p = stats.tukeylambda.pdf(x, lam) + + assert_(~np.isnan(p).all()) + assert_((p[0] != 0.0).all()) + assert_((p[1] != 0.0).all()) + assert_((p[2] != 0.0).any()) + assert_((p[2] == 0.0).any()) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_regression_ticket_1421(): + assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__) + assert_('pmf(x,' in stats.poisson.__doc__) + + +def test_nan_arguments_gh_issue_1362(): + with np.errstate(invalid='ignore'): + assert_(np.isnan(stats.t.logcdf(1, np.nan))) + assert_(np.isnan(stats.t.cdf(1, np.nan))) + assert_(np.isnan(stats.t.logsf(1, np.nan))) + assert_(np.isnan(stats.t.sf(1, np.nan))) + assert_(np.isnan(stats.t.pdf(1, np.nan))) + assert_(np.isnan(stats.t.logpdf(1, np.nan))) + assert_(np.isnan(stats.t.ppf(1, np.nan))) + assert_(np.isnan(stats.t.isf(1, np.nan))) + + assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5))) + assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5))) + + +def test_frozen_fit_ticket_1536(): + np.random.seed(5678) + true = np.array([0.25, 0., 0.5]) + x = stats.lognorm.rvs(true[0], true[1], true[2], size=100) + + with np.errstate(divide='ignore'): + params = np.array(stats.lognorm.fit(x, floc=0.)) + + assert_almost_equal(params, true, decimal=2) + + params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0)) + assert_almost_equal(params, true, decimal=2) + + params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0)) + assert_almost_equal(params, true, decimal=2) + + params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0)) + assert_almost_equal(params, true, decimal=2) + + np.random.seed(5678) + loc = 1 + floc = 0.9 + x = stats.norm.rvs(loc, 2., size=100) + params = np.array(stats.norm.fit(x, floc=floc)) + expected = np.array([floc, np.sqrt(((x-floc)**2).mean())]) + assert_almost_equal(params, expected, decimal=4) + + +def test_regression_ticket_1530(): + # Check the starting value works for Cauchy distribution fit. + np.random.seed(654321) + rvs = stats.cauchy.rvs(size=100) + params = stats.cauchy.fit(rvs) + expected = (0.045, 1.142) + assert_almost_equal(params, expected, decimal=1) + + +def test_gh_pr_4806(): + # Check starting values for Cauchy distribution fit. + np.random.seed(1234) + x = np.random.randn(42) + for offset in 10000.0, 1222333444.0: + loc, scale = stats.cauchy.fit(x + offset) + assert_allclose(loc, offset, atol=1.0) + assert_allclose(scale, 0.6, atol=1.0) + + +def test_tukeylambda_stats_ticket_1545(): + # Some test for the variance and kurtosis of the Tukey Lambda distr. + # See test_tukeylamdba_stats.py for more tests. + + mv = stats.tukeylambda.stats(0, moments='mvsk') + # Known exact values: + expected = [0, np.pi**2/3, 0, 1.2] + assert_almost_equal(mv, expected, decimal=10) + + mv = stats.tukeylambda.stats(3.13, moments='mvsk') + # 'expected' computed with mpmath. + expected = [0, 0.0269220858861465102, 0, -0.898062386219224104] + assert_almost_equal(mv, expected, decimal=10) + + mv = stats.tukeylambda.stats(0.14, moments='mvsk') + # 'expected' computed with mpmath. + expected = [0, 2.11029702221450250, 0, -0.02708377353223019456] + assert_almost_equal(mv, expected, decimal=10) + + +def test_poisson_logpmf_ticket_1436(): + assert_(np.isfinite(stats.poisson.logpmf(1500, 200))) + + +def test_powerlaw_stats(): + """Test the powerlaw stats function. + + This unit test is also a regression test for ticket 1548. + + The exact values are: + mean: + mu = a / (a + 1) + variance: + sigma**2 = a / ((a + 2) * (a + 1) ** 2) + skewness: + One formula (see https://en.wikipedia.org/wiki/Skewness) is + gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3 + A short calculation shows that E[X**k] is a / (a + k), so gamma_1 + can be implemented as + n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3 + d = sqrt(a/((a+2)*(a+1)**2)) ** 3 + gamma_1 = n/d + Either by simplifying, or by a direct calculation of mu_3 / sigma**3, + one gets the more concise formula: + gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a) + kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis) + The excess kurtosis is + gamma_2 = mu_4 / sigma**4 - 3 + A bit of calculus and algebra (sympy helps) shows that + mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4)) + so + gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3 + which can be rearranged to + gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4)) + """ + cases = [(1.0, (0.5, 1./12, 0.0, -1.2)), + (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))] + for a, exact_mvsk in cases: + mvsk = stats.powerlaw.stats(a, moments="mvsk") + assert_array_almost_equal(mvsk, exact_mvsk) + + +def test_powerlaw_edge(): + # Regression test for gh-3986. + p = stats.powerlaw.logpdf(0, 1) + assert_equal(p, 0.0) + + +def test_exponpow_edge(): + # Regression test for gh-3982. + p = stats.exponpow.logpdf(0, 1) + assert_equal(p, 0.0) + + # Check pdf and logpdf at x = 0 for other values of b. + p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5]) + assert_equal(p, [np.inf, 1.0, 0.0]) + p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5]) + assert_equal(p, [np.inf, 0.0, -np.inf]) + + +def test_gengamma_edge(): + # Regression test for gh-3985. + p = stats.gengamma.pdf(0, 1, 1) + assert_equal(p, 1.0) + + +@pytest.mark.parametrize("a, c, ref, tol", + [(1500000.0, 1, 8.529426144018633, 1e-15), + (1e+30, 1, 35.95771492811536, 1e-15), + (1e+100, 1, 116.54819318290696, 1e-15), + (3e3, 1, 5.422011196659015, 1e-13), + (3e6, -1e100, -236.29663213396054, 1e-15), + (3e60, 1e-100, 1.3925371786831085e+102, 1e-15)]) +def test_gengamma_extreme_entropy(a, c, ref, tol): + # The reference values were calculated with mpmath: + # from mpmath import mp + # mp.dps = 500 + # + # def gen_entropy(a, c): + # a, c = mp.mpf(a), mp.mpf(c) + # val = mp.digamma(a) + # h = (a * (mp.one - val) + val/c + mp.loggamma(a) - mp.log(abs(c))) + # return float(h) + assert_allclose(stats.gengamma.entropy(a, c), ref, rtol=tol) + + +def test_gengamma_endpoint_with_neg_c(): + p = stats.gengamma.pdf(0, 1, -1) + assert p == 0.0 + logp = stats.gengamma.logpdf(0, 1, -1) + assert logp == -np.inf + + +def test_gengamma_munp(): + # Regression tests for gh-4724. + p = stats.gengamma._munp(-2, 200, 1.) + assert_almost_equal(p, 1./199/198) + + p = stats.gengamma._munp(-2, 10, 1.) + assert_almost_equal(p, 1./9/8) + + +def test_ksone_fit_freeze(): + # Regression test for ticket #1638. + d = np.array( + [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649, + -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294, + 0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048, + 0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724, + 0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662, + 0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391, + -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725, + -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199, + -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746, + -0.06037974, 0.37670779, -0.21684405]) + + with np.errstate(invalid='ignore'): + with suppress_warnings() as sup: + sup.filter(IntegrationWarning, + "The maximum number of subdivisions .50. has been " + "achieved.") + sup.filter(RuntimeWarning, + "floating point number truncated to an integer") + stats.ksone.fit(d) + + +def test_norm_logcdf(): + # Test precision of the logcdf of the normal distribution. + # This precision was enhanced in ticket 1614. + x = -np.asarray(list(range(0, 120, 4))) + # Values from R + expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300, + -131.69539607, -203.91715537, -292.09872100, -396.25241451, + -516.38564863, -652.50322759, -804.60844201, -972.70364403, + -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068, + -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493, + -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522, + -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548, + -6277.63751711, -6733.67260303] + + assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8) + + # also test the complex-valued code path + assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8) + + # test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf) + deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag + deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x)) + assert_allclose(deriv, deriv_expected, atol=1e-10) + + +def test_levy_cdf_ppf(): + # Test levy.cdf, including small arguments. + x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001]) + + # Expected values were calculated separately with mpmath. + # E.g. + # >>> mpmath.mp.dps = 100 + # >>> x = mpmath.mp.mpf('0.01') + # >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x))) + expected = np.array([0.9747728793699604, + 0.3173105078629141, + 0.1572992070502851, + 0.0015654022580025495, + 1.523970604832105e-23, + 1.795832784800726e-219]) + + y = stats.levy.cdf(x) + assert_allclose(y, expected, rtol=1e-10) + + # ppf(expected) should get us back to x. + xx = stats.levy.ppf(expected) + assert_allclose(xx, x, rtol=1e-13) + + +def test_levy_sf(): + # Large values, far into the tail of the distribution. + x = np.array([1e15, 1e25, 1e35, 1e50]) + # Expected values were calculated with mpmath. + expected = np.array([2.5231325220201597e-08, + 2.52313252202016e-13, + 2.52313252202016e-18, + 7.978845608028653e-26]) + y = stats.levy.sf(x) + assert_allclose(y, expected, rtol=1e-14) + + +# The expected values for levy.isf(p) were calculated with mpmath. +# For loc=0 and scale=1, the inverse SF can be computed with +# +# import mpmath +# +# def levy_invsf(p): +# return 1/(2*mpmath.erfinv(p)**2) +# +# For example, with mpmath.mp.dps set to 60, float(levy_invsf(1e-20)) +# returns 6.366197723675814e+39. +# +@pytest.mark.parametrize('p, expected_isf', + [(1e-20, 6.366197723675814e+39), + (1e-8, 6366197723675813.0), + (0.375, 4.185810119346273), + (0.875, 0.42489442055310134), + (0.999, 0.09235685880262713), + (0.9999999962747097, 0.028766845244146945)]) +def test_levy_isf(p, expected_isf): + x = stats.levy.isf(p) + assert_allclose(x, expected_isf, atol=5e-15) + + +def test_levy_l_sf(): + # Test levy_l.sf for small arguments. + x = np.array([-0.016, -0.01, -0.005, -0.0015]) + # Expected values were calculated with mpmath. + expected = np.array([2.6644463892359302e-15, + 1.523970604832107e-23, + 2.0884875837625492e-45, + 5.302850374626878e-147]) + y = stats.levy_l.sf(x) + assert_allclose(y, expected, rtol=1e-13) + + +def test_levy_l_isf(): + # Test roundtrip sf(isf(p)), including a small input value. + p = np.array([3.0e-15, 0.25, 0.99]) + x = stats.levy_l.isf(p) + q = stats.levy_l.sf(x) + assert_allclose(q, p, rtol=5e-14) + + +def test_hypergeom_interval_1802(): + # these two had endless loops + assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757), + (152.0, 197.0)) + assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757), + (152.0, 197.0)) + # this was working also before + assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757), + (153.0, 196.0)) + + # degenerate case .a == .b + assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8) + assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8) + + +def test_distribution_too_many_args(): + np.random.seed(1234) + + # Check that a TypeError is raised when too many args are given to a method + # Regression test for ticket 1815. + x = np.linspace(0.1, 0.7, num=5) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5) + assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5) + assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5) + + # These should not give errors + stats.gamma.pdf(x, 2, 3) # loc=3 + stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4 + stats.gamma.stats(2., 3) + stats.gamma.stats(2., 3, 4) + stats.gamma.stats(2., 3, 4, 'mv') + stats.gamma.rvs(2., 3, 4, 5) + stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.) + + # Also for a discrete distribution + stats.geom.pmf(x, 2, loc=3) # no error, loc=3 + assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4) + assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4) + + # And for distributions with 0, 2 and 3 args respectively + assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0) + assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0) + assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1) + assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0) + assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5) + stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale + + +def test_ncx2_tails_ticket_955(): + # Trac #955 -- check that the cdf computed by special functions + # matches the integrated pdf + a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02) + b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02) + assert_allclose(a, b, rtol=1e-3, atol=0) + + +def test_ncx2_tails_pdf(): + # ncx2.pdf does not return nans in extreme tails(example from gh-1577) + # NB: this is to check that nan_to_num is not needed in ncx2.pdf + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0) + logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2) + + assert_(np.isneginf(logval).all()) + + # Verify logpdf has extended precision when pdf underflows to 0 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + assert_equal(stats.ncx2.pdf(10000, 3, 12), 0) + assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883) + + +@pytest.mark.parametrize('method, expected', [ + ('cdf', np.array([2.497951336e-09, 3.437288941e-10])), + ('pdf', np.array([1.238579980e-07, 1.710041145e-08])), + ('logpdf', np.array([-15.90413011, -17.88416331])), + ('ppf', np.array([4.865182052, 7.017182271])) +]) +def test_ncx2_zero_nc(method, expected): + # gh-5441 + # ncx2 with nc=0 is identical to chi2 + # Comparison to R (v3.5.1) + # > options(digits=10) + # > pchisq(0.1, df=10, ncp=c(0,4)) + # > dchisq(0.1, df=10, ncp=c(0,4)) + # > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE) + # > qchisq(0.1, df=10, ncp=c(0,4)) + + result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10) + assert_allclose(result, expected, atol=1e-15) + + +def test_ncx2_zero_nc_rvs(): + # gh-5441 + # ncx2 with nc=0 is identical to chi2 + result = stats.ncx2.rvs(df=10, nc=0, random_state=1) + expected = stats.chi2.rvs(df=10, random_state=1) + assert_allclose(result, expected, atol=1e-15) + + +def test_ncx2_gh12731(): + # test that gh-12731 is resolved; previously these were all 0.5 + nc = 10**np.arange(5, 10) + assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0) + + +def test_ncx2_gh8665(): + # test that gh-8665 is resolved; previously this tended to nonzero value + x = np.array([4.99515382e+00, 1.07617327e+01, 2.31854502e+01, + 4.99515382e+01, 1.07617327e+02, 2.31854502e+02, + 4.99515382e+02, 1.07617327e+03, 2.31854502e+03, + 4.99515382e+03, 1.07617327e+04, 2.31854502e+04, + 4.99515382e+04]) + nu, lam = 20, 499.51538166556196 + + sf = stats.ncx2.sf(x, df=nu, nc=lam) + # computed in R. Couldn't find a survival function implementation + # options(digits=16) + # x <- c(4.99515382e+00, 1.07617327e+01, 2.31854502e+01, 4.99515382e+01, + # 1.07617327e+02, 2.31854502e+02, 4.99515382e+02, 1.07617327e+03, + # 2.31854502e+03, 4.99515382e+03, 1.07617327e+04, 2.31854502e+04, + # 4.99515382e+04) + # nu <- 20 + # lam <- 499.51538166556196 + # 1 - pchisq(x, df = nu, ncp = lam) + sf_expected = [1.0000000000000000, 1.0000000000000000, 1.0000000000000000, + 1.0000000000000000, 1.0000000000000000, 0.9999999999999888, + 0.6646525582135460, 0.0000000000000000, 0.0000000000000000, + 0.0000000000000000, 0.0000000000000000, 0.0000000000000000, + 0.0000000000000000] + assert_allclose(sf, sf_expected, atol=1e-12) + + +def test_ncx2_gh11777(): + # regression test for gh-11777: + # At high values of degrees of freedom df, ensure the pdf of ncx2 does + # not get clipped to zero when the non-centrality parameter is + # sufficiently less than df + df = 6700 + nc = 5300 + x = np.linspace(stats.ncx2.ppf(0.001, df, nc), + stats.ncx2.ppf(0.999, df, nc), num=10000) + ncx2_pdf = stats.ncx2.pdf(x, df, nc) + gauss_approx = stats.norm.pdf(x, df + nc, np.sqrt(2 * df + 4 * nc)) + # use huge tolerance as we're only looking for obvious discrepancy + assert_allclose(ncx2_pdf, gauss_approx, atol=1e-4) + + +# Expected values for foldnorm.sf were computed with mpmath: +# +# from mpmath import mp +# mp.dps = 60 +# def foldcauchy_sf(x, c): +# x = mp.mpf(x) +# c = mp.mpf(c) +# return mp.one - (mp.atan(x - c) + mp.atan(x + c))/mp.pi +# +# E.g. +# +# >>> float(foldcauchy_sf(2, 1)) +# 0.35241638234956674 +# +@pytest.mark.parametrize('x, c, expected', + [(2, 1, 0.35241638234956674), + (2, 2, 0.5779791303773694), + (1e13, 1, 6.366197723675813e-14), + (2e16, 1, 3.183098861837907e-17), + (1e13, 2e11, 6.368745221764519e-14), + (0.125, 200, 0.999998010612169)]) +def test_foldcauchy_sf(x, c, expected): + sf = stats.foldcauchy.sf(x, c) + assert_allclose(sf, expected, 2e-15) + + +# The same mpmath code shown in the comments above test_foldcauchy_sf() +# is used to create these expected values. +@pytest.mark.parametrize('x, expected', + [(2, 0.2951672353008665), + (1e13, 6.366197723675813e-14), + (2e16, 3.183098861837907e-17), + (5e80, 1.2732395447351629e-81)]) +def test_halfcauchy_sf(x, expected): + sf = stats.halfcauchy.sf(x) + assert_allclose(sf, expected, 2e-15) + + +# Expected value computed with mpmath: +# expected = mp.cot(mp.pi*p/2) +@pytest.mark.parametrize('p, expected', + [(0.9999995, 7.853981633329977e-07), + (0.975, 0.039290107007669675), + (0.5, 1.0), + (0.01, 63.65674116287158), + (1e-14, 63661977236758.13), + (5e-80, 1.2732395447351627e+79)]) +def test_halfcauchy_isf(p, expected): + x = stats.halfcauchy.isf(p) + assert_allclose(x, expected) + + +def test_foldnorm_zero(): + # Parameter value c=0 was not enabled, see gh-2399. + rv = stats.foldnorm(0, scale=1) + assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan + + +# Expected values for foldnorm.sf were computed with mpmath: +# +# from mpmath import mp +# mp.dps = 60 +# def foldnorm_sf(x, c): +# x = mp.mpf(x) +# c = mp.mpf(c) +# return mp.ncdf(-x+c) + mp.ncdf(-x-c) +# +# E.g. +# +# >>> float(foldnorm_sf(2, 1)) +# 0.16000515196308715 +# +@pytest.mark.parametrize('x, c, expected', + [(2, 1, 0.16000515196308715), + (20, 1, 8.527223952630977e-81), + (10, 15, 0.9999997133484281), + (25, 15, 7.619853024160525e-24)]) +def test_foldnorm_sf(x, c, expected): + sf = stats.foldnorm.sf(x, c) + assert_allclose(sf, expected, 1e-14) + + +def test_stats_shapes_argcheck(): + # stats method was failing for vector shapes if some of the values + # were outside of the allowed range, see gh-2678 + mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a` + mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5) + mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2) + assert_equal(mv2_augmented, mv3) + + # -1 is not a legal shape parameter + mv3 = stats.lognorm.stats([2, 2.4, -1]) + mv2 = stats.lognorm.stats([2, 2.4]) + mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2) + assert_equal(mv2_augmented, mv3) + + # FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix. + # stats method with multiple shape parameters is not properly vectorized + # anyway, so some distributions may or may not fail. + + +# Test subclassing distributions w/ explicit shapes + +class _distr_gen(stats.rv_continuous): + def _pdf(self, x, a): + return 42 + + +class _distr2_gen(stats.rv_continuous): + def _cdf(self, x, a): + return 42 * a + x + + +class _distr3_gen(stats.rv_continuous): + def _pdf(self, x, a, b): + return a + b + + def _cdf(self, x, a): + # Different # of shape params from _pdf, to be able to check that + # inspection catches the inconsistency. + return 42 * a + x + + +class _distr6_gen(stats.rv_continuous): + # Two shape parameters (both _pdf and _cdf defined, consistent shapes.) + def _pdf(self, x, a, b): + return a*x + b + + def _cdf(self, x, a, b): + return 42 * a + x + + +class TestSubclassingExplicitShapes: + # Construct a distribution w/ explicit shapes parameter and test it. + + def test_correct_shapes(self): + dummy_distr = _distr_gen(name='dummy', shapes='a') + assert_equal(dummy_distr.pdf(1, a=1), 42) + + def test_wrong_shapes_1(self): + dummy_distr = _distr_gen(name='dummy', shapes='A') + assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1)) + + def test_wrong_shapes_2(self): + dummy_distr = _distr_gen(name='dummy', shapes='a, b, c') + dct = dict(a=1, b=2, c=3) + assert_raises(TypeError, dummy_distr.pdf, 1, **dct) + + def test_shapes_string(self): + # shapes must be a string + dct = dict(name='dummy', shapes=42) + assert_raises(TypeError, _distr_gen, **dct) + + def test_shapes_identifiers_1(self): + # shapes must be a comma-separated list of valid python identifiers + dct = dict(name='dummy', shapes='(!)') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_identifiers_2(self): + dct = dict(name='dummy', shapes='4chan') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_identifiers_3(self): + dct = dict(name='dummy', shapes='m(fti)') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_identifiers_nodefaults(self): + dct = dict(name='dummy', shapes='a=2') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_args(self): + dct = dict(name='dummy', shapes='*args') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_kwargs(self): + dct = dict(name='dummy', shapes='**kwargs') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_keywords(self): + # python keywords cannot be used for shape parameters + dct = dict(name='dummy', shapes='a, b, c, lambda') + assert_raises(SyntaxError, _distr_gen, **dct) + + def test_shapes_signature(self): + # test explicit shapes which agree w/ the signature of _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a): + return stats.norm._pdf(x) * a + + dist = _dist_gen(shapes='a') + assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2) + + def test_shapes_signature_inconsistent(self): + # test explicit shapes which do not agree w/ the signature of _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a): + return stats.norm._pdf(x) * a + + dist = _dist_gen(shapes='a, b') + assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2)) + + def test_star_args(self): + # test _pdf with only starargs + # NB: **kwargs of pdf will never reach _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, *args): + extra_kwarg = args[0] + return stats.norm._pdf(x) * extra_kwarg + + dist = _dist_gen(shapes='extra_kwarg') + assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33) + assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33) + assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33)) + + def test_star_args_2(self): + # test _pdf with named & starargs + # NB: **kwargs of pdf will never reach _pdf + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, offset, *args): + extra_kwarg = args[0] + return stats.norm._pdf(x) * extra_kwarg + offset + + dist = _dist_gen(shapes='offset, extra_kwarg') + assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33), + stats.norm.pdf(0.5)*33 + 111) + assert_equal(dist.pdf(0.5, 111, 33), + stats.norm.pdf(0.5)*33 + 111) + + def test_extra_kwarg(self): + # **kwargs to _pdf are ignored. + # this is a limitation of the framework (_pdf(x, *goodargs)) + class _distr_gen(stats.rv_continuous): + def _pdf(self, x, *args, **kwargs): + # _pdf should handle *args, **kwargs itself. Here "handling" + # is ignoring *args and looking for ``extra_kwarg`` and using + # that. + extra_kwarg = kwargs.pop('extra_kwarg', 1) + return stats.norm._pdf(x) * extra_kwarg + + dist = _distr_gen(shapes='extra_kwarg') + assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1)) + + def test_shapes_empty_string(self): + # shapes='' is equivalent to shapes=None + class _dist_gen(stats.rv_continuous): + def _pdf(self, x): + return stats.norm.pdf(x) + + dist = _dist_gen(shapes='') + assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5)) + + +class TestSubclassingNoShapes: + # Construct a distribution w/o explicit shapes parameter and test it. + + def test_only__pdf(self): + dummy_distr = _distr_gen(name='dummy') + assert_equal(dummy_distr.pdf(1, a=1), 42) + + def test_only__cdf(self): + # _pdf is determined from _cdf by taking numerical derivative + dummy_distr = _distr2_gen(name='dummy') + assert_almost_equal(dummy_distr.pdf(1, a=1), 1) + + @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") + def test_signature_inspection(self): + # check that _pdf signature inspection works correctly, and is used in + # the class docstring + dummy_distr = _distr_gen(name='dummy') + assert_equal(dummy_distr.numargs, 1) + assert_equal(dummy_distr.shapes, 'a') + res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)', + dummy_distr.__doc__) + assert_(len(res) == 1) + + @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") + def test_signature_inspection_2args(self): + # same for 2 shape params and both _pdf and _cdf defined + dummy_distr = _distr6_gen(name='dummy') + assert_equal(dummy_distr.numargs, 2) + assert_equal(dummy_distr.shapes, 'a, b') + res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)', + dummy_distr.__doc__) + assert_(len(res) == 1) + + def test_signature_inspection_2args_incorrect_shapes(self): + # both _pdf and _cdf defined, but shapes are inconsistent: raises + assert_raises(TypeError, _distr3_gen, name='dummy') + + def test_defaults_raise(self): + # default arguments should raise + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a=42): + return 42 + assert_raises(TypeError, _dist_gen, **dict(name='dummy')) + + def test_starargs_raise(self): + # without explicit shapes, *args are not allowed + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a, *args): + return 42 + assert_raises(TypeError, _dist_gen, **dict(name='dummy')) + + def test_kwargs_raise(self): + # without explicit shapes, **kwargs are not allowed + class _dist_gen(stats.rv_continuous): + def _pdf(self, x, a, **kwargs): + return 42 + assert_raises(TypeError, _dist_gen, **dict(name='dummy')) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") +def test_docstrings(): + badones = [r',\s*,', r'\(\s*,', r'^\s*:'] + for distname in stats.__all__: + dist = getattr(stats, distname) + if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)): + for regex in badones: + assert_(re.search(regex, dist.__doc__) is None) + + +def test_infinite_input(): + assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0) + assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1) + + +def test_lomax_accuracy(): + # regression test for gh-4033 + p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1) + assert_allclose(p, 1e-100) + + +def test_truncexpon_accuracy(): + # regression test for gh-4035 + p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1) + assert_allclose(p, 1e-100) + + +def test_rayleigh_accuracy(): + # regression test for gh-4034 + p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1) + assert_almost_equal(p, 9.0, decimal=15) + + +def test_genextreme_give_no_warnings(): + """regression test for gh-6219""" + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + stats.genextreme.cdf(.5, 0) + stats.genextreme.pdf(.5, 0) + stats.genextreme.ppf(.5, 0) + stats.genextreme.logpdf(-np.inf, 0.0) + number_of_warnings_thrown = len(w) + assert_equal(number_of_warnings_thrown, 0) + + +def test_genextreme_entropy(): + # regression test for gh-5181 + euler_gamma = 0.5772156649015329 + + h = stats.genextreme.entropy(-1.0) + assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14) + + h = stats.genextreme.entropy(0) + assert_allclose(h, euler_gamma + 1, rtol=1e-14) + + h = stats.genextreme.entropy(1.0) + assert_equal(h, 1) + + h = stats.genextreme.entropy(-2.0, scale=10) + assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14) + + h = stats.genextreme.entropy(10) + assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14) + + h = stats.genextreme.entropy(-10) + assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14) + + +def test_genextreme_sf_isf(): + # Expected values were computed using mpmath: + # + # import mpmath + # + # def mp_genextreme_sf(x, xi, mu=0, sigma=1): + # # Formula from wikipedia, which has a sign convention for xi that + # # is the opposite of scipy's shape parameter. + # if xi != 0: + # t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi) + # else: + # t = mpmath.exp(-(x - mu)/sigma) + # return 1 - mpmath.exp(-t) + # + # >>> mpmath.mp.dps = 1000 + # >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125")) + # >>> float(s) + # 1.6777205262585625e-57 + # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125")) + # >>> float(s) + # 1.52587890625e-21 + # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0")) + # >>> float(s) + # 0.00034218086528426593 + + x = 1e8 + s = stats.genextreme.sf(x, -0.125) + assert_allclose(s, 1.6777205262585625e-57) + x2 = stats.genextreme.isf(s, -0.125) + assert_allclose(x2, x) + + x = 7.98 + s = stats.genextreme.sf(x, 0.125) + assert_allclose(s, 1.52587890625e-21) + x2 = stats.genextreme.isf(s, 0.125) + assert_allclose(x2, x) + + x = 7.98 + s = stats.genextreme.sf(x, 0) + assert_allclose(s, 0.00034218086528426593) + x2 = stats.genextreme.isf(s, 0) + assert_allclose(x2, x) + + +def test_burr12_ppf_small_arg(): + prob = 1e-16 + quantile = stats.burr12.ppf(prob, 2, 3) + # The expected quantile was computed using mpmath: + # >>> import mpmath + # >>> mpmath.mp.dps = 100 + # >>> prob = mpmath.mpf('1e-16') + # >>> c = mpmath.mpf(2) + # >>> d = mpmath.mpf(3) + # >>> float(((1-prob)**(-1/d) - 1)**(1/c)) + # 5.7735026918962575e-09 + assert_allclose(quantile, 5.7735026918962575e-09) + + +def test_crystalball_function(): + """ + All values are calculated using the independent implementation of the + ROOT framework (see https://root.cern.ch/). + Corresponding ROOT code is given in the comments. + """ + X = np.linspace(-5.0, 5.0, 21)[:-1] + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", "; + calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0) + expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645, + 0.059618, 0.0811467, 0.116851, 0.18258, 0.265652, + 0.301023, 0.265652, 0.18258, 0.097728, 0.0407391, + 0.013226, 0.00334407, 0.000658486, 0.000100982, + 1.20606e-05]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", "; + calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0) + expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121, + 0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752, + 0.345928, 0.391987, 0.345928, 0.237752, 0.12726, + 0.0530497, 0.0172227, 0.00435458, 0.000857469, + 0.000131497, 1.57051e-05]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) { + # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5); + # std::cout << ", "; + # } + calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0) + expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249, + 0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944, + 0.172964, 0.189964, 0.195994, 0.189964, 0.172964, + 0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866, + 0.0265249]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", "; + calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0) + expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258, + 0.208663, 0.24344, 0.292128, 0.36516, 0.478254, + 0.622723, 0.767192, 0.880286, 0.94959, 0.982834, + 0.995314, 0.998981, 0.999824, 0.999976, 0.999997]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) + # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", "; + calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0) + expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682, + 0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323, + 0.320592, 0.508717, 0.696841, 0.844111, 0.934357, + 0.977646, 0.993899, 0.998674, 0.999771, 0.999969, + 0.999997]) + assert_allclose(expected, calculated, rtol=0.001) + + # for(float x = -5.0; x < 5.0; x+=0.5) { + # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5); + # std::cout << ", "; + # } + calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0) + expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945, + 0.0830763, 0.121242, 0.173323, 0.24011, 0.320592, + 0.411731, 0.508717, 0.605702, 0.696841, 0.777324, + 0.844111, 0.896192, 0.934357, 0.960639, 0.977646]) + assert_allclose(expected, calculated, rtol=0.001) + + +def test_crystalball_function_moments(): + """ + All values are calculated using the pdf formula and the integrate function + of Mathematica + """ + # The Last two (alpha, n) pairs test the special case n == alpha**2 + beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0]) + m = np.array([3.0, 3.0, 2.0, 4.0, 9.0]) + + # The distribution should be correctly normalised + expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0]) + calculated_0th_moment = stats.crystalball._munp(0, beta, m) + assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001) + + # calculated using wolframalpha.com + # e.g. for beta = 2 and m = 3 we calculate the norm like this: + # integrate exp(-x^2/2) from -2 to infinity + + # integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2 + norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455]) + + a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174]) + expected_1th_moment = a / norm + calculated_1th_moment = stats.crystalball._munp(1, beta, m) + assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908]) + expected_2th_moment = a / norm + calculated_2th_moment = stats.crystalball._munp(2, beta, m) + assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668]) + expected_3th_moment = a / norm + calculated_3th_moment = stats.crystalball._munp(3, beta, m) + assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468]) + expected_4th_moment = a / norm + calculated_4th_moment = stats.crystalball._munp(4, beta, m) + assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001) + + a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086]) + expected_5th_moment = a / norm + calculated_5th_moment = stats.crystalball._munp(5, beta, m) + assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001) + + +def test_crystalball_entropy(): + # regression test for gh-13602 + cb = stats.crystalball(2, 3) + res1 = cb.entropy() + # -20000 and 30 are negative and positive infinity, respectively + lo, hi, N = -20000, 30, 200000 + x = np.linspace(lo, hi, N) + res2 = trapezoid(entr(cb.pdf(x)), x) + assert_allclose(res1, res2, rtol=1e-7) + + +def test_invweibull_fit(): + """ + Test fitting invweibull to data. + + Here is a the same calculation in R: + + > library(evd) + > library(fitdistrplus) + > x = c(1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99) + > result = fitdist(x, 'frechet', control=list(reltol=1e-13), + + fix.arg=list(loc=0), start=list(shape=2, scale=3)) + > result + Fitting of the distribution ' frechet ' by maximum likelihood + Parameters: + estimate Std. Error + shape 1.048482 0.2261815 + scale 3.099456 0.8292887 + Fixed parameters: + value + loc 0 + + """ + + def optimizer(func, x0, args=(), disp=0): + return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12) + + x = np.array([1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99]) + c, loc, scale = stats.invweibull.fit(x, floc=0, optimizer=optimizer) + assert_allclose(c, 1.048482, rtol=5e-6) + assert loc == 0 + assert_allclose(scale, 3.099456, rtol=5e-6) + + +# Expected values were computed with mpmath. +@pytest.mark.parametrize('x, c, expected', + [(3, 1.5, 0.175064510070713299327), + (2000, 1.5, 1.11802773877318715787e-5), + (2000, 9.25, 2.92060308832269637092e-31), + (1e15, 1.5, 3.16227766016837933199884e-23)]) +def test_invweibull_sf(x, c, expected): + computed = stats.invweibull.sf(x, c) + assert_allclose(computed, expected, rtol=1e-15) + + +# Expected values were computed with mpmath. +@pytest.mark.parametrize('p, c, expected', + [(0.5, 2.5, 1.15789669836468183976), + (3e-18, 5, 3195.77171838060906447)]) +def test_invweibull_isf(p, c, expected): + computed = stats.invweibull.isf(p, c) + assert_allclose(computed, expected, rtol=1e-15) + + +@pytest.mark.parametrize( + 'df1,df2,x', + [(2, 2, [-0.5, 0.2, 1.0, 2.3]), + (4, 11, [-0.5, 0.2, 1.0, 2.3]), + (7, 17, [1, 2, 3, 4, 5])] +) +def test_ncf_edge_case(df1, df2, x): + # Test for edge case described in gh-11660. + # Non-central Fisher distribution when nc = 0 + # should be the same as Fisher distribution. + nc = 0 + expected_cdf = stats.f.cdf(x, df1, df2) + calculated_cdf = stats.ncf.cdf(x, df1, df2, nc) + assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14) + + # when ncf_gen._skip_pdf will be used instead of generic pdf, + # this additional test will be useful. + expected_pdf = stats.f.pdf(x, df1, df2) + calculated_pdf = stats.ncf.pdf(x, df1, df2, nc) + assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6) + + +def test_ncf_variance(): + # Regression test for gh-10658 (incorrect variance formula for ncf). + # The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for + # example, Wolfram Alpha with the expression + # Variance[NoncentralFRatioDistribution[2, 6, 4]] + # or with the implementation of the noncentral F distribution in the C++ + # library Boost. + v = stats.ncf.var(2, 6, 4) + assert_allclose(v, 42.75, rtol=1e-14) + + +def test_ncf_cdf_spotcheck(): + # Regression test for gh-15582 testing against values from R/MATLAB + # Generate check_val from R or MATLAB as follows: + # R: pf(20, df1 = 6, df2 = 33, ncp = 30.4) = 0.998921 + # MATLAB: ncfcdf(20, 6, 33, 30.4) = 0.998921 + scipy_val = stats.ncf.cdf(20, 6, 33, 30.4) + check_val = 0.998921 + assert_allclose(check_val, np.round(scipy_val, decimals=6)) + + +@pytest.mark.skipif(sys.maxsize <= 2**32, + reason="On some 32-bit the warning is not raised") +def test_ncf_ppf_issue_17026(): + # Regression test for gh-17026 + x = np.linspace(0, 1, 600) + x[0] = 1e-16 + par = (0.1, 2, 5, 0, 1) + with pytest.warns(RuntimeWarning): + q = stats.ncf.ppf(x, *par) + q0 = [stats.ncf.ppf(xi, *par) for xi in x] + assert_allclose(q, q0) + + +class TestHistogram: + def setup_method(self): + np.random.seed(1234) + + # We have 8 bins + # [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9) + # But actually np.histogram will put the last 9 also in the [8,9) bin! + # Therefore there is a slight difference below for the last bin, from + # what you might have expected. + histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8) + self.template = stats.rv_histogram(histogram) + + data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123) + norm_histogram = np.histogram(data, bins=50) + self.norm_template = stats.rv_histogram(norm_histogram) + + def test_pdf(self): + values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, + 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]) + pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0, + 2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0, + 4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0, + 4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0, + 3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0]) + assert_allclose(self.template.pdf(values), pdf_values) + + # Test explicitly the corner cases: + # As stated above the pdf in the bin [8,9) is greater than + # one would naively expect because np.histogram putted the 9 + # into the [8,9) bin. + assert_almost_equal(self.template.pdf(8.0), 3.0/25.0) + assert_almost_equal(self.template.pdf(8.5), 3.0/25.0) + # 9 is outside our defined bins [8,9) hence the pdf is already 0 + # for a continuous distribution this is fine, because a single value + # does not have a finite probability! + assert_almost_equal(self.template.pdf(9.0), 0.0/25.0) + assert_almost_equal(self.template.pdf(10.0), 0.0/25.0) + + x = np.linspace(-2, 2, 10) + assert_allclose(self.norm_template.pdf(x), + stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1) + + def test_cdf_ppf(self): + values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, + 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]) + cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0, + 1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0, + 6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0, + 15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0, + 22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0]) + assert_allclose(self.template.cdf(values), cdf_values) + # First three and last two values in cdf_value are not unique + assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1]) + + # Test of cdf and ppf are inverse functions + x = np.linspace(1.0, 9.0, 100) + assert_allclose(self.template.ppf(self.template.cdf(x)), x) + x = np.linspace(0.0, 1.0, 100) + assert_allclose(self.template.cdf(self.template.ppf(x)), x) + + x = np.linspace(-2, 2, 10) + assert_allclose(self.norm_template.cdf(x), + stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1) + + def test_rvs(self): + N = 10000 + sample = self.template.rvs(size=N, random_state=123) + assert_equal(np.sum(sample < 1.0), 0.0) + assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2) + assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2) + assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1) + assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05) + assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05) + assert_equal(np.sum(sample > 9.0), 0.0) + + def test_munp(self): + for n in range(4): + assert_allclose(self.norm_template._munp(n), + stats.norm(1.0, 2.5).moment(n), rtol=0.05) + + def test_entropy(self): + assert_allclose(self.norm_template.entropy(), + stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05) + + +def test_histogram_non_uniform(): + # Tests rv_histogram works even for non-uniform bin widths + counts, bins = ([1, 1], [0, 1, 1001]) + + dist = stats.rv_histogram((counts, bins), density=False) + np.testing.assert_allclose(dist.pdf([0.5, 200]), [0.5, 0.0005]) + assert dist.median() == 1 + + dist = stats.rv_histogram((counts, bins), density=True) + np.testing.assert_allclose(dist.pdf([0.5, 200]), 1/1001) + assert dist.median() == 1001/2 + + # Omitting density produces a warning for non-uniform bins... + message = "Bin widths are not constant. Assuming..." + with pytest.warns(RuntimeWarning, match=message): + dist = stats.rv_histogram((counts, bins)) + assert dist.median() == 1001/2 # default is like `density=True` + + # ... but not for uniform bins + dist = stats.rv_histogram((counts, [0, 1, 2])) + assert dist.median() == 1 + + +class TestLogUniform: + def test_alias(self): + # This test makes sure that "reciprocal" and "loguniform" are + # aliases of the same distribution and that both are log-uniform + rng = np.random.default_rng(98643218961) + rv = stats.loguniform(10 ** -3, 10 ** 0) + rvs = rv.rvs(size=10000, random_state=rng) + + rng = np.random.default_rng(98643218961) + rv2 = stats.reciprocal(10 ** -3, 10 ** 0) + rvs2 = rv2.rvs(size=10000, random_state=rng) + + assert_allclose(rvs2, rvs) + + vals, _ = np.histogram(np.log10(rvs), bins=10) + assert 900 <= vals.min() <= vals.max() <= 1100 + assert np.abs(np.median(vals) - 1000) <= 10 + + @pytest.mark.parametrize("method", ['mle', 'mm']) + def test_fit_override(self, method): + # loguniform is overparameterized, so check that fit override enforces + # scale=1 unless fscale is provided by the user + rng = np.random.default_rng(98643218961) + rvs = stats.loguniform.rvs(0.1, 1, size=1000, random_state=rng) + + a, b, loc, scale = stats.loguniform.fit(rvs, method=method) + assert scale == 1 + + a, b, loc, scale = stats.loguniform.fit(rvs, fscale=2, method=method) + assert scale == 2 + + def test_overflow(self): + # original formulation had overflow issues; check that this is resolved + # Extensive accuracy tests elsewhere, no need to test all methods + rng = np.random.default_rng(7136519550773909093) + a, b = 1e-200, 1e200 + dist = stats.loguniform(a, b) + + # test roundtrip error + cdf = rng.uniform(0, 1, size=1000) + assert_allclose(dist.cdf(dist.ppf(cdf)), cdf) + rvs = dist.rvs(size=1000) + assert_allclose(dist.ppf(dist.cdf(rvs)), rvs) + + # test a property of the pdf (and that there is no overflow) + x = 10.**np.arange(-200, 200) + pdf = dist.pdf(x) # no overflow + assert_allclose(pdf[:-1]/pdf[1:], 10) + + # check munp against wikipedia reference + mean = (b - a)/(np.log(b) - np.log(a)) + assert_allclose(dist.mean(), mean) + + +class TestArgus: + def test_argus_rvs_large_chi(self): + # test that the algorithm can handle large values of chi + x = stats.argus.rvs(50, size=500, random_state=325) + assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4) + + @pytest.mark.parametrize('chi, random_state', [ + [0.1, 325], # chi <= 0.5: rejection method case 1 + [1.3, 155], # 0.5 < chi <= 1.8: rejection method case 2 + [3.5, 135] # chi > 1.8: transform conditional Gamma distribution + ]) + def test_rvs(self, chi, random_state): + x = stats.argus.rvs(chi, size=500, random_state=random_state) + _, p = stats.kstest(x, "argus", (chi, )) + assert_(p > 0.05) + + @pytest.mark.parametrize('chi', [1e-9, 1e-6]) + def test_rvs_small_chi(self, chi): + # test for gh-11699 => rejection method case 1 can even handle chi=0 + # the CDF of the distribution for chi=0 is 1 - (1 - x**2)**(3/2) + # test rvs against distribution of limit chi=0 + r = stats.argus.rvs(chi, size=500, random_state=890981) + _, p = stats.kstest(r, lambda x: 1 - (1 - x**2)**(3/2)) + assert_(p > 0.05) + + # Expected values were computed with mpmath. + @pytest.mark.parametrize('chi, expected_mean', + [(1, 0.6187026683551835), + (10, 0.984805536783744), + (40, 0.9990617659702923), + (60, 0.9995831885165300), + (99, 0.9998469348663028)]) + def test_mean(self, chi, expected_mean): + m = stats.argus.mean(chi, scale=1) + assert_allclose(m, expected_mean, rtol=1e-13) + + # Expected values were computed with mpmath. + @pytest.mark.parametrize('chi, expected_var, rtol', + [(1, 0.05215651254197807, 1e-13), + (10, 0.00015805472008165595, 1e-11), + (40, 5.877763210262901e-07, 1e-8), + (60, 1.1590179389611416e-07, 1e-8), + (99, 1.5623277006064666e-08, 1e-8)]) + def test_var(self, chi, expected_var, rtol): + v = stats.argus.var(chi, scale=1) + assert_allclose(v, expected_var, rtol=rtol) + + # Expected values were computed with mpmath (code: see gh-13370). + @pytest.mark.parametrize('chi, expected, rtol', + [(0.9, 0.07646314974436118, 1e-14), + (0.5, 0.015429797891863365, 1e-14), + (0.1, 0.0001325825293278049, 1e-14), + (0.01, 1.3297677078224565e-07, 1e-15), + (1e-3, 1.3298072023958999e-10, 1e-14), + (1e-4, 1.3298075973486862e-13, 1e-14), + (1e-6, 1.32980760133771e-19, 1e-14), + (1e-9, 1.329807601338109e-28, 1e-15)]) + def test_argus_phi_small_chi(self, chi, expected, rtol): + assert_allclose(_argus_phi(chi), expected, rtol=rtol) + + # Expected values were computed with mpmath (code: see gh-13370). + @pytest.mark.parametrize( + 'chi, expected', + [(0.5, (0.28414073302940573, 1.2742227939992954, 1.2381254688255896)), + (0.2, (0.296172952995264, 1.2951290588110516, 1.1865767100877576)), + (0.1, (0.29791447523536274, 1.29806307956989, 1.1793168289857412)), + (0.01, (0.2984904104866452, 1.2990283628160553, 1.1769268414080531)), + (1e-3, (0.298496172925224, 1.2990380082487925, 1.176902956021053)), + (1e-4, (0.29849623054991836, 1.2990381047023793, 1.1769027171686324)), + (1e-6, (0.2984962311319278, 1.2990381056765605, 1.1769027147562232)), + (1e-9, (0.298496231131986, 1.299038105676658, 1.1769027147559818))]) + def test_pdf_small_chi(self, chi, expected): + x = np.array([0.1, 0.5, 0.9]) + assert_allclose(stats.argus.pdf(x, chi), expected, rtol=1e-13) + + # Expected values were computed with mpmath (code: see gh-13370). + @pytest.mark.parametrize( + 'chi, expected', + [(0.5, (0.9857660526895221, 0.6616565930168475, 0.08796070398429937)), + (0.2, (0.9851555052359501, 0.6514666238985464, 0.08362690023746594)), + (0.1, (0.9850670974995661, 0.6500061310508574, 0.08302050640683846)), + (0.01, (0.9850378582451867, 0.6495239242251358, 0.08282109244852445)), + (1e-3, (0.9850375656906663, 0.6495191015522573, 0.08281910005231098)), + (1e-4, (0.9850375627651049, 0.6495190533254682, 0.08281908012852317)), + (1e-6, (0.9850375627355568, 0.6495190528383777, 0.08281907992729293)), + (1e-9, (0.9850375627355538, 0.649519052838329, 0.0828190799272728))]) + def test_sf_small_chi(self, chi, expected): + x = np.array([0.1, 0.5, 0.9]) + assert_allclose(stats.argus.sf(x, chi), expected, rtol=1e-14) + + # Expected values were computed with mpmath (code: see gh-13370). + @pytest.mark.parametrize( + 'chi, expected', + [(0.5, (0.0142339473104779, 0.3383434069831524, 0.9120392960157007)), + (0.2, (0.014844494764049919, 0.34853337610145363, 0.916373099762534)), + (0.1, (0.014932902500433911, 0.34999386894914264, 0.9169794935931616)), + (0.01, (0.014962141754813293, 0.35047607577486417, 0.9171789075514756)), + (1e-3, (0.01496243430933372, 0.35048089844774266, 0.917180899947689)), + (1e-4, (0.014962437234895118, 0.3504809466745317, 0.9171809198714769)), + (1e-6, (0.01496243726444329, 0.3504809471616223, 0.9171809200727071)), + (1e-9, (0.014962437264446245, 0.350480947161671, 0.9171809200727272))]) + def test_cdf_small_chi(self, chi, expected): + x = np.array([0.1, 0.5, 0.9]) + assert_allclose(stats.argus.cdf(x, chi), expected, rtol=1e-12) + + # Expected values were computed with mpmath (code: see gh-13370). + @pytest.mark.parametrize( + 'chi, expected, rtol', + [(0.5, (0.5964284712757741, 0.052890651988588604), 1e-12), + (0.101, (0.5893490968089076, 0.053017469847275685), 1e-11), + (0.1, (0.5893431757009437, 0.05301755449499372), 1e-13), + (0.01, (0.5890515677940915, 0.05302167905837031), 1e-13), + (1e-3, (0.5890486520005177, 0.053021719862088104), 1e-13), + (1e-4, (0.5890486228426105, 0.0530217202700811), 1e-13), + (1e-6, (0.5890486225481156, 0.05302172027420182), 1e-13), + (1e-9, (0.5890486225480862, 0.05302172027420224), 1e-13)]) + def test_stats_small_chi(self, chi, expected, rtol): + val = stats.argus.stats(chi, moments='mv') + assert_allclose(val, expected, rtol=rtol) + + +class TestNakagami: + + def test_logpdf(self): + # Test nakagami logpdf for an input where the PDF is smaller + # than can be represented with 64 bit floating point. + # The expected value of logpdf was computed with mpmath: + # + # def logpdf(x, nu): + # x = mpmath.mpf(x) + # nu = mpmath.mpf(nu) + # return (mpmath.log(2) + nu*mpmath.log(nu) - + # mpmath.loggamma(nu) + (2*nu - 1)*mpmath.log(x) - + # nu*x**2) + # + nu = 2.5 + x = 25 + logp = stats.nakagami.logpdf(x, nu) + assert_allclose(logp, -1546.9253055607549) + + def test_sf_isf(self): + # Test nakagami sf and isf when the survival function + # value is very small. + # The expected value of the survival function was computed + # with mpmath: + # + # def sf(x, nu): + # x = mpmath.mpf(x) + # nu = mpmath.mpf(nu) + # return mpmath.gammainc(nu, nu*x*x, regularized=True) + # + nu = 2.5 + x0 = 5.0 + sf = stats.nakagami.sf(x0, nu) + assert_allclose(sf, 2.736273158588307e-25, rtol=1e-13) + # Check round trip back to x0. + x1 = stats.nakagami.isf(sf, nu) + assert_allclose(x1, x0, rtol=1e-13) + + @pytest.mark.parametrize("m, ref", + [(5, -0.097341814372152), + (0.5, 0.7257913526447274), + (10, -0.43426184310934907)]) + def test_entropy(self, m, ref): + # from sympy import * + # from mpmath import mp + # import numpy as np + # v, x = symbols('v, x', real=True, positive=True) + # pdf = 2 * v ** v / gamma(v) * x ** (2 * v - 1) * exp(-v * x ** 2) + # h = simplify(simplify(integrate(-pdf * log(pdf), (x, 0, oo)))) + # entropy = lambdify(v, h, 'mpmath') + # mp.dps = 200 + # nu = 5 + # ref = np.float64(entropy(mp.mpf(nu))) + # print(ref) + assert_allclose(stats.nakagami.entropy(m), ref, rtol=1.1e-14) + + @pytest.mark.parametrize("m, ref", + [(1e-100, -5.0e+99), (1e-10, -4999999965.442979), + (9.999e6, -7.333206478668433), (1.001e7, -7.3337562313259825), + (1e10, -10.787134112333835), (1e100, -114.40346329705756)]) + def test_extreme_nu(self, m, ref): + assert_allclose(stats.nakagami.entropy(m), ref) + + def test_entropy_overflow(self): + assert np.isfinite(stats.nakagami._entropy(1e100)) + assert np.isfinite(stats.nakagami._entropy(1e-100)) + + @pytest.mark.parametrize("nu, ref", + [(1e10, 0.9999999999875), + (1e3, 0.9998750078173821), + (1e-10, 1.772453850659802e-05)]) + def test_mean(self, nu, ref): + # reference values were computed with mpmath + # from mpmath import mp + # mp.dps = 500 + # nu = mp.mpf(1e10) + # float(mp.rf(nu, mp.mpf(0.5))/mp.sqrt(nu)) + assert_allclose(stats.nakagami.mean(nu), ref, rtol=1e-12) + + @pytest.mark.xfail(reason="Fit of nakagami not reliable, see gh-10908.") + @pytest.mark.parametrize('nu', [1.6, 2.5, 3.9]) + @pytest.mark.parametrize('loc', [25.0, 10, 35]) + @pytest.mark.parametrize('scale', [13, 5, 20]) + def test_fit(self, nu, loc, scale): + # Regression test for gh-13396 (21/27 cases failed previously) + # The first tuple of the parameters' values is discussed in gh-10908 + N = 100 + samples = stats.nakagami.rvs(size=N, nu=nu, loc=loc, + scale=scale, random_state=1337) + nu_est, loc_est, scale_est = stats.nakagami.fit(samples) + assert_allclose(nu_est, nu, rtol=0.2) + assert_allclose(loc_est, loc, rtol=0.2) + assert_allclose(scale_est, scale, rtol=0.2) + + def dlogl_dnu(nu, loc, scale): + return ((-2*nu + 1) * np.sum(1/(samples - loc)) + + 2*nu/scale**2 * np.sum(samples - loc)) + + def dlogl_dloc(nu, loc, scale): + return (N * (1 + np.log(nu) - polygamma(0, nu)) + + 2 * np.sum(np.log((samples - loc) / scale)) + - np.sum(((samples - loc) / scale)**2)) + + def dlogl_dscale(nu, loc, scale): + return (- 2 * N * nu / scale + + 2 * nu / scale ** 3 * np.sum((samples - loc) ** 2)) + + assert_allclose(dlogl_dnu(nu_est, loc_est, scale_est), 0, atol=1e-3) + assert_allclose(dlogl_dloc(nu_est, loc_est, scale_est), 0, atol=1e-3) + assert_allclose(dlogl_dscale(nu_est, loc_est, scale_est), 0, atol=1e-3) + + @pytest.mark.parametrize('loc', [25.0, 10, 35]) + @pytest.mark.parametrize('scale', [13, 5, 20]) + def test_fit_nu(self, loc, scale): + # For nu = 0.5, we have analytical values for + # the MLE of the loc and the scale + nu = 0.5 + n = 100 + samples = stats.nakagami.rvs(size=n, nu=nu, loc=loc, + scale=scale, random_state=1337) + nu_est, loc_est, scale_est = stats.nakagami.fit(samples, f0=nu) + + # Analytical values + loc_theo = np.min(samples) + scale_theo = np.sqrt(np.mean((samples - loc_est) ** 2)) + + assert_allclose(nu_est, nu, rtol=1e-7) + assert_allclose(loc_est, loc_theo, rtol=1e-7) + assert_allclose(scale_est, scale_theo, rtol=1e-7) + + +class TestWrapCauchy: + + def test_cdf_shape_broadcasting(self): + # Regression test for gh-13791. + # Check that wrapcauchy.cdf broadcasts the shape parameter + # correctly. + c = np.array([[0.03, 0.25], [0.5, 0.75]]) + x = np.array([[1.0], [4.0]]) + p = stats.wrapcauchy.cdf(x, c) + assert p.shape == (2, 2) + scalar_values = [stats.wrapcauchy.cdf(x1, c1) + for (x1, c1) in np.nditer((x, c))] + assert_allclose(p.ravel(), scalar_values, rtol=1e-13) + + def test_cdf_center(self): + p = stats.wrapcauchy.cdf(np.pi, 0.03) + assert_allclose(p, 0.5, rtol=1e-14) + + def test_cdf(self): + x1 = 1.0 # less than pi + x2 = 4.0 # greater than pi + c = 0.75 + p = stats.wrapcauchy.cdf([x1, x2], c) + cr = (1 + c)/(1 - c) + assert_allclose(p[0], np.arctan(cr*np.tan(x1/2))/np.pi) + assert_allclose(p[1], 1 - np.arctan(cr*np.tan(np.pi - x2/2))/np.pi) + + +def test_rvs_no_size_error(): + # _rvs methods must have parameter `size`; see gh-11394 + class rvs_no_size_gen(stats.rv_continuous): + def _rvs(self): + return 1 + + rvs_no_size = rvs_no_size_gen(name='rvs_no_size') + + with assert_raises(TypeError, match=r"_rvs\(\) got (an|\d) unexpected"): + rvs_no_size.rvs() + + +@pytest.mark.parametrize('distname, args', invdistdiscrete + invdistcont) +def test_support_gh13294_regression(distname, args): + if distname in skip_test_support_gh13294_regression: + pytest.skip(f"skipping test for the support method for " + f"distribution {distname}.") + dist = getattr(stats, distname) + # test support method with invalid arguments + if isinstance(dist, stats.rv_continuous): + # test with valid scale + if len(args) != 0: + a0, b0 = dist.support(*args) + assert_equal(a0, np.nan) + assert_equal(b0, np.nan) + # test with invalid scale + # For some distributions, that take no parameters, + # the case of only invalid scale occurs and hence, + # it is implicitly tested in this test case. + loc1, scale1 = 0, -1 + a1, b1 = dist.support(*args, loc1, scale1) + assert_equal(a1, np.nan) + assert_equal(b1, np.nan) + else: + a, b = dist.support(*args) + assert_equal(a, np.nan) + assert_equal(b, np.nan) + + +def test_support_broadcasting_gh13294_regression(): + a0, b0 = stats.norm.support([0, 0, 0, 1], [1, 1, 1, -1]) + ex_a0 = np.array([-np.inf, -np.inf, -np.inf, np.nan]) + ex_b0 = np.array([np.inf, np.inf, np.inf, np.nan]) + assert_equal(a0, ex_a0) + assert_equal(b0, ex_b0) + assert a0.shape == ex_a0.shape + assert b0.shape == ex_b0.shape + + a1, b1 = stats.norm.support([], []) + ex_a1, ex_b1 = np.array([]), np.array([]) + assert_equal(a1, ex_a1) + assert_equal(b1, ex_b1) + assert a1.shape == ex_a1.shape + assert b1.shape == ex_b1.shape + + a2, b2 = stats.norm.support([0, 0, 0, 1], [-1]) + ex_a2 = np.array(4*[np.nan]) + ex_b2 = np.array(4*[np.nan]) + assert_equal(a2, ex_a2) + assert_equal(b2, ex_b2) + assert a2.shape == ex_a2.shape + assert b2.shape == ex_b2.shape + + +def test_stats_broadcasting_gh14953_regression(): + # test case in gh14953 + loc = [0., 0.] + scale = [[1.], [2.], [3.]] + assert_equal(stats.norm.var(loc, scale), [[1., 1.], [4., 4.], [9., 9.]]) + # test some edge cases + loc = np.empty((0, )) + scale = np.empty((1, 0)) + assert stats.norm.var(loc, scale).shape == (1, 0) + + +# Check a few values of the cosine distribution's cdf, sf, ppf and +# isf methods. Expected values were computed with mpmath. + +@pytest.mark.parametrize('x, expected', + [(-3.14159, 4.956444476505336e-19), + (3.14, 0.9999999998928399)]) +def test_cosine_cdf_sf(x, expected): + assert_allclose(stats.cosine.cdf(x), expected) + assert_allclose(stats.cosine.sf(-x), expected) + + +@pytest.mark.parametrize('p, expected', + [(1e-6, -3.1080612413765905), + (1e-17, -3.141585429601399), + (0.975, 2.1447547020964923)]) +def test_cosine_ppf_isf(p, expected): + assert_allclose(stats.cosine.ppf(p), expected) + assert_allclose(stats.cosine.isf(p), -expected) + + +def test_cosine_logpdf_endpoints(): + logp = stats.cosine.logpdf([-np.pi, np.pi]) + # reference value calculated using mpmath assuming `np.cos(-1)` is four + # floating point numbers too high. See gh-18382. + assert_array_less(logp, -37.18838327496655) + + +def test_distr_params_lists(): + # distribution objects are extra distributions added in + # test_discrete_basic. All other distributions are strings (names) + # and so we only choose those to compare whether both lists match. + discrete_distnames = {name for name, _ in distdiscrete + if isinstance(name, str)} + invdiscrete_distnames = {name for name, _ in invdistdiscrete} + assert discrete_distnames == invdiscrete_distnames + + cont_distnames = {name for name, _ in distcont} + invcont_distnames = {name for name, _ in invdistcont} + assert cont_distnames == invcont_distnames + + +def test_moment_order_4(): + # gh-13655 reported that if a distribution has a `_stats` method that + # accepts the `moments` parameter, then if the distribution's `moment` + # method is called with `order=4`, the faster/more accurate`_stats` gets + # called, but the results aren't used, and the generic `_munp` method is + # called to calculate the moment anyway. This tests that the issue has + # been fixed. + # stats.skewnorm._stats accepts the `moments` keyword + stats.skewnorm._stats(a=0, moments='k') # no failure = has `moments` + # When `moment` is called, `_stats` is used, so the moment is very accurate + # (exactly equal to Pearson's kurtosis of the normal distribution, 3) + assert stats.skewnorm.moment(order=4, a=0) == 3.0 + # At the time of gh-13655, skewnorm._munp() used the generic method + # to compute its result, which was inefficient and not very accurate. + # At that time, the following assertion would fail. skewnorm._munp() + # has since been made more accurate and efficient, so now this test + # is expected to pass. + assert stats.skewnorm._munp(4, 0) == 3.0 + + +class TestRelativisticBW: + @pytest.fixture + def ROOT_pdf_sample_data(self): + """Sample data points for pdf computed with CERN's ROOT + + See - https://root.cern/ + + Uses ROOT.TMath.BreitWignerRelativistic, available in ROOT + versions 6.27+ + + pdf calculated for Z0 Boson, W Boson, and Higgs Boson for + x in `np.linspace(0, 200, 401)`. + """ + data = np.load( + Path(__file__).parent / + 'data/rel_breitwigner_pdf_sample_data_ROOT.npy' + ) + data = np.rec.fromarrays(data.T, names='x,pdf,rho,gamma') + return data + + @pytest.mark.parametrize( + "rho,gamma,rtol", [ + (36.545206797050334, 2.4952, 5e-14), # Z0 Boson + (38.55107913669065, 2.085, 1e-14), # W Boson + (96292.3076923077, 0.0013, 5e-13), # Higgs Boson + ] + ) + def test_pdf_against_ROOT(self, ROOT_pdf_sample_data, rho, gamma, rtol): + data = ROOT_pdf_sample_data[ + (ROOT_pdf_sample_data['rho'] == rho) + & (ROOT_pdf_sample_data['gamma'] == gamma) + ] + x, pdf = data['x'], data['pdf'] + assert_allclose( + pdf, stats.rel_breitwigner.pdf(x, rho, scale=gamma), rtol=rtol + ) + + @pytest.mark.parametrize("rho, Gamma, rtol", [ + (36.545206797050334, 2.4952, 5e-13), # Z0 Boson + (38.55107913669065, 2.085, 5e-13), # W Boson + (96292.3076923077, 0.0013, 5e-10), # Higgs Boson + ] + ) + def test_pdf_against_simple_implementation(self, rho, Gamma, rtol): + # reference implementation straight from formulas on Wikipedia [1] + def pdf(E, M, Gamma): + gamma = np.sqrt(M**2 * (M**2 + Gamma**2)) + k = (2 * np.sqrt(2) * M * Gamma * gamma + / (np.pi * np.sqrt(M**2 + gamma))) + return k / ((E**2 - M**2)**2 + M**2*Gamma**2) + # get reasonable values at which to evaluate the CDF + p = np.linspace(0.05, 0.95, 10) + x = stats.rel_breitwigner.ppf(p, rho, scale=Gamma) + res = stats.rel_breitwigner.pdf(x, rho, scale=Gamma) + ref = pdf(x, rho*Gamma, Gamma) + assert_allclose(res, ref, rtol=rtol) + + @pytest.mark.parametrize( + "rho,gamma", [ + pytest.param( + 36.545206797050334, 2.4952, marks=pytest.mark.slow + ), # Z0 Boson + pytest.param( + 38.55107913669065, 2.085, marks=pytest.mark.xslow + ), # W Boson + pytest.param( + 96292.3076923077, 0.0013, marks=pytest.mark.xslow + ), # Higgs Boson + ] + ) + def test_fit_floc(self, rho, gamma): + """Tests fit for cases where floc is set. + + `rel_breitwigner` has special handling for these cases. + """ + seed = 6936804688480013683 + rng = np.random.default_rng(seed) + data = stats.rel_breitwigner.rvs( + rho, scale=gamma, size=1000, random_state=rng + ) + fit = stats.rel_breitwigner.fit(data, floc=0) + assert_allclose((fit[0], fit[2]), (rho, gamma), rtol=2e-1) + assert fit[1] == 0 + # Check again with fscale set. + fit = stats.rel_breitwigner.fit(data, floc=0, fscale=gamma) + assert_allclose(fit[0], rho, rtol=1e-2) + assert (fit[1], fit[2]) == (0, gamma) + + +class TestJohnsonSU: + @pytest.mark.parametrize("case", [ # a, b, loc, scale, m1, m2, g1, g2 + (-0.01, 1.1, 0.02, 0.0001, 0.02000137427557091, + 2.1112742956578063e-08, 0.05989781342460999, 20.36324408592951-3), + (2.554395574161155, 2.2482281679651965, 0, 1, -1.54215386737391, + 0.7629882028469993, -1.256656139406788, 6.303058419339775-3)]) + def test_moment_gh18071(self, case): + # gh-18071 reported an IntegrationWarning emitted by johnsonsu.stats + # Check that the warning is no longer emitted and that the values + # are accurate compared against results from Mathematica. + # Reference values from Mathematica, e.g. + # Mean[JohnsonDistribution["SU",-0.01, 1.1, 0.02, 0.0001]] + res = stats.johnsonsu.stats(*case[:4], moments='mvsk') + assert_allclose(res, case[4:], rtol=1e-14) + + +class TestTruncPareto: + def test_pdf(self): + # PDF is that of the truncated pareto distribution + b, c = 1.8, 5.3 + x = np.linspace(1.8, 5.3) + res = stats.truncpareto(b, c).pdf(x) + ref = stats.pareto(b).pdf(x) / stats.pareto(b).cdf(c) + assert_allclose(res, ref) + + @pytest.mark.parametrize('fix_loc', [True, False]) + @pytest.mark.parametrize('fix_scale', [True, False]) + @pytest.mark.parametrize('fix_b', [True, False]) + @pytest.mark.parametrize('fix_c', [True, False]) + def test_fit(self, fix_loc, fix_scale, fix_b, fix_c): + + rng = np.random.default_rng(6747363148258237171) + b, c, loc, scale = 1.8, 5.3, 1, 2.5 + dist = stats.truncpareto(b, c, loc=loc, scale=scale) + data = dist.rvs(size=500, random_state=rng) + + kwds = {} + if fix_loc: + kwds['floc'] = loc + if fix_scale: + kwds['fscale'] = scale + if fix_b: + kwds['f0'] = b + if fix_c: + kwds['f1'] = c + + if fix_loc and fix_scale and fix_b and fix_c: + message = "All parameters fixed. There is nothing to optimize." + with pytest.raises(RuntimeError, match=message): + stats.truncpareto.fit(data, **kwds) + else: + _assert_less_or_close_loglike(stats.truncpareto, data, **kwds) + + +class TestKappa3: + def test_sf(self): + # During development of gh-18822, we found that the override of + # kappa3.sf could experience overflow where the version in main did + # not. Check that this does not happen in final implementation. + sf0 = 1 - stats.kappa3.cdf(0.5, 1e5) + sf1 = stats.kappa3.sf(0.5, 1e5) + assert_allclose(sf1, sf0) + + +# Cases are (distribution name, log10 of smallest probability mass to test, +# log10 of the complement of the largest probability mass to test, atol, +# rtol). None uses default values. +@pytest.mark.parametrize("case", [("kappa3", None, None, None, None), + ("loglaplace", None, None, None, None), + ("lognorm", None, None, None, None), + ("lomax", None, None, None, None), + ("pareto", None, None, None, None),]) +def test_sf_isf_overrides(case): + # Test that SF is the inverse of ISF. Supplements + # `test_continuous_basic.check_sf_isf` for distributions with overridden + # `sf` and `isf` methods. + distname, lp1, lp2, atol, rtol = case + + lpm = np.log10(0.5) # log10 of the probability mass at the median + lp1 = lp1 or -290 + lp2 = lp2 or -14 + atol = atol or 0 + rtol = rtol or 1e-12 + dist = getattr(stats, distname) + params = dict(distcont)[distname] + dist_frozen = dist(*params) + + # Test (very deep) right tail to median. We can benchmark with random + # (loguniform) points, but strictly logspaced points are fine for tests. + ref = np.logspace(lp1, lpm) + res = dist_frozen.sf(dist_frozen.isf(ref)) + assert_allclose(res, ref, atol=atol, rtol=rtol) + + # test median to left tail + ref = 1 - np.logspace(lp2, lpm, 20) + res = dist_frozen.sf(dist_frozen.isf(ref)) + assert_allclose(res, ref, atol=atol, rtol=rtol) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_entropy.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..a25da83952b702da117ffbe11d0a9ded84a49f7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_entropy.py @@ -0,0 +1,286 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +# avoid new uses of the following; prefer assert/np.testing.assert_allclose +from numpy.testing import (assert_, assert_almost_equal, + assert_array_almost_equal) + +import pytest +from pytest import raises as assert_raises +import scipy.stats as stats + + +class TestEntropy: + def test_entropy_positive(self): + # See ticket #497 + pk = [0.5, 0.2, 0.3] + qk = [0.1, 0.25, 0.65] + eself = stats.entropy(pk, pk) + edouble = stats.entropy(pk, qk) + assert_(0.0 == eself) + assert_(edouble >= 0.0) + + def test_entropy_base(self): + pk = np.ones(16, float) + S = stats.entropy(pk, base=2.) + assert_(abs(S - 4.) < 1.e-5) + + qk = np.ones(16, float) + qk[:8] = 2. + S = stats.entropy(pk, qk) + S2 = stats.entropy(pk, qk, base=2.) + assert_(abs(S/S2 - np.log(2.)) < 1.e-5) + + def test_entropy_zero(self): + # Test for PR-479 + assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278, + decimal=12) + + def test_entropy_2d(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]] + assert_array_almost_equal(stats.entropy(pk, qk), + [0.1933259, 0.18609809]) + + def test_entropy_2d_zero(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]] + assert_array_almost_equal(stats.entropy(pk, qk), + [np.inf, 0.18609809]) + + pk[0][0] = 0.0 + assert_array_almost_equal(stats.entropy(pk, qk), + [0.17403988, 0.18609809]) + + def test_entropy_base_2d_nondefault_axis(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + assert_array_almost_equal(stats.entropy(pk, axis=1), + [0.63651417, 0.63651417, 0.66156324]) + + def test_entropy_2d_nondefault_axis(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]] + assert_array_almost_equal(stats.entropy(pk, qk, axis=1), + [0.231049, 0.231049, 0.127706]) + + def test_entropy_raises_value_error(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.1, 0.2], [0.6, 0.3]] + assert_raises(ValueError, stats.entropy, pk, qk) + + def test_base_entropy_with_axis_0_is_equal_to_default(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + assert_array_almost_equal(stats.entropy(pk, axis=0), + stats.entropy(pk)) + + def test_entropy_with_axis_0_is_equal_to_default(self): + pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] + qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]] + assert_array_almost_equal(stats.entropy(pk, qk, axis=0), + stats.entropy(pk, qk)) + + def test_base_entropy_transposed(self): + pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]) + assert_array_almost_equal(stats.entropy(pk.T).T, + stats.entropy(pk, axis=1)) + + def test_entropy_transposed(self): + pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]) + qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]) + assert_array_almost_equal(stats.entropy(pk.T, qk.T).T, + stats.entropy(pk, qk, axis=1)) + + def test_entropy_broadcasting(self): + np.random.rand(0) + x = np.random.rand(3) + y = np.random.rand(2, 1) + res = stats.entropy(x, y, axis=-1) + assert_equal(res[0], stats.entropy(x, y[0])) + assert_equal(res[1], stats.entropy(x, y[1])) + + def test_entropy_shape_mismatch(self): + x = np.random.rand(10, 1, 12) + y = np.random.rand(11, 2) + message = "Array shapes are incompatible for broadcasting." + with pytest.raises(ValueError, match=message): + stats.entropy(x, y) + + def test_input_validation(self): + x = np.random.rand(10) + message = "`base` must be a positive number." + with pytest.raises(ValueError, match=message): + stats.entropy(x, base=-2) + + +class TestDifferentialEntropy: + """ + Vasicek results are compared with the R package vsgoftest. + + # library(vsgoftest) + # + # samp <- c() + # entropy.estimate(x = samp, window = ) + + """ + + def test_differential_entropy_vasicek(self): + + random_state = np.random.RandomState(0) + values = random_state.standard_normal(100) + + entropy = stats.differential_entropy(values, method='vasicek') + assert_allclose(entropy, 1.342551, rtol=1e-6) + + entropy = stats.differential_entropy(values, window_length=1, + method='vasicek') + assert_allclose(entropy, 1.122044, rtol=1e-6) + + entropy = stats.differential_entropy(values, window_length=8, + method='vasicek') + assert_allclose(entropy, 1.349401, rtol=1e-6) + + def test_differential_entropy_vasicek_2d_nondefault_axis(self): + random_state = np.random.RandomState(0) + values = random_state.standard_normal((3, 100)) + + entropy = stats.differential_entropy(values, axis=1, method='vasicek') + assert_allclose( + entropy, + [1.342551, 1.341826, 1.293775], + rtol=1e-6, + ) + + entropy = stats.differential_entropy(values, axis=1, window_length=1, + method='vasicek') + assert_allclose( + entropy, + [1.122044, 1.102944, 1.129616], + rtol=1e-6, + ) + + entropy = stats.differential_entropy(values, axis=1, window_length=8, + method='vasicek') + assert_allclose( + entropy, + [1.349401, 1.338514, 1.292332], + rtol=1e-6, + ) + + def test_differential_entropy_raises_value_error(self): + random_state = np.random.RandomState(0) + values = random_state.standard_normal((3, 100)) + + error_str = ( + r"Window length \({window_length}\) must be positive and less " + r"than half the sample size \({sample_size}\)." + ) + + sample_size = values.shape[1] + + for window_length in {-1, 0, sample_size//2, sample_size}: + + formatted_error_str = error_str.format( + window_length=window_length, + sample_size=sample_size, + ) + + with assert_raises(ValueError, match=formatted_error_str): + stats.differential_entropy( + values, + window_length=window_length, + axis=1, + ) + + def test_base_differential_entropy_with_axis_0_is_equal_to_default(self): + random_state = np.random.RandomState(0) + values = random_state.standard_normal((100, 3)) + + entropy = stats.differential_entropy(values, axis=0) + default_entropy = stats.differential_entropy(values) + assert_allclose(entropy, default_entropy) + + def test_base_differential_entropy_transposed(self): + random_state = np.random.RandomState(0) + values = random_state.standard_normal((3, 100)) + + assert_allclose( + stats.differential_entropy(values.T).T, + stats.differential_entropy(values, axis=1), + ) + + def test_input_validation(self): + x = np.random.rand(10) + + message = "`base` must be a positive number or `None`." + with pytest.raises(ValueError, match=message): + stats.differential_entropy(x, base=-2) + + message = "`method` must be one of..." + with pytest.raises(ValueError, match=message): + stats.differential_entropy(x, method='ekki-ekki') + + @pytest.mark.parametrize('method', ['vasicek', 'van es', + 'ebrahimi', 'correa']) + def test_consistency(self, method): + # test that method is a consistent estimator + n = 10000 if method == 'correa' else 1000000 + rvs = stats.norm.rvs(size=n, random_state=0) + expected = stats.norm.entropy() + res = stats.differential_entropy(rvs, method=method) + assert_allclose(res, expected, rtol=0.005) + + # values from differential_entropy reference [6], table 1, n=50, m=7 + norm_rmse_std_cases = { # method: (RMSE, STD) + 'vasicek': (0.198, 0.109), + 'van es': (0.212, 0.110), + 'correa': (0.135, 0.112), + 'ebrahimi': (0.128, 0.109) + } + + @pytest.mark.parametrize('method, expected', + list(norm_rmse_std_cases.items())) + def test_norm_rmse_std(self, method, expected): + # test that RMSE and standard deviation of estimators matches values + # given in differential_entropy reference [6]. Incidentally, also + # tests vectorization. + reps, n, m = 10000, 50, 7 + rmse_expected, std_expected = expected + rvs = stats.norm.rvs(size=(reps, n), random_state=0) + true_entropy = stats.norm.entropy() + res = stats.differential_entropy(rvs, window_length=m, + method=method, axis=-1) + assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)), + rmse_expected, atol=0.005) + assert_allclose(np.std(res), std_expected, atol=0.002) + + # values from differential_entropy reference [6], table 2, n=50, m=7 + expon_rmse_std_cases = { # method: (RMSE, STD) + 'vasicek': (0.194, 0.148), + 'van es': (0.179, 0.149), + 'correa': (0.155, 0.152), + 'ebrahimi': (0.151, 0.148) + } + + @pytest.mark.parametrize('method, expected', + list(expon_rmse_std_cases.items())) + def test_expon_rmse_std(self, method, expected): + # test that RMSE and standard deviation of estimators matches values + # given in differential_entropy reference [6]. Incidentally, also + # tests vectorization. + reps, n, m = 10000, 50, 7 + rmse_expected, std_expected = expected + rvs = stats.expon.rvs(size=(reps, n), random_state=0) + true_entropy = stats.expon.entropy() + res = stats.differential_entropy(rvs, window_length=m, + method=method, axis=-1) + assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)), + rmse_expected, atol=0.005) + assert_allclose(np.std(res), std_expected, atol=0.002) + + @pytest.mark.parametrize('n, method', [(8, 'van es'), + (12, 'ebrahimi'), + (1001, 'vasicek')]) + def test_method_auto(self, n, method): + rvs = stats.norm.rvs(size=(n,), random_state=0) + res1 = stats.differential_entropy(rvs) + res2 = stats.differential_entropy(rvs, method=method) + assert res1 == res2 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_fast_gen_inversion.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_fast_gen_inversion.py new file mode 100644 index 0000000000000000000000000000000000000000..3369af9691501fcf5b0fb1bde06d82afceacae40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_fast_gen_inversion.py @@ -0,0 +1,430 @@ +import pytest +import warnings +import numpy as np +from numpy.testing import (assert_array_equal, assert_allclose, + suppress_warnings) +from copy import deepcopy +from scipy.stats.sampling import FastGeneratorInversion +from scipy import stats + + +def test_bad_args(): + # loc and scale must be scalar + with pytest.raises(ValueError, match="loc must be scalar"): + FastGeneratorInversion(stats.norm(loc=(1.2, 1.3))) + with pytest.raises(ValueError, match="scale must be scalar"): + FastGeneratorInversion(stats.norm(scale=[1.5, 5.7])) + + with pytest.raises(ValueError, match="'test' cannot be used to seed"): + FastGeneratorInversion(stats.norm(), random_state="test") + + msg = "Each of the 1 shape parameters must be a scalar" + with pytest.raises(ValueError, match=msg): + FastGeneratorInversion(stats.gamma([1.3, 2.5])) + + with pytest.raises(ValueError, match="`dist` must be a frozen"): + FastGeneratorInversion("xy") + + with pytest.raises(ValueError, match="Distribution 'truncnorm' is not"): + FastGeneratorInversion(stats.truncnorm(1.3, 4.5)) + + +def test_random_state(): + # fixed seed + gen = FastGeneratorInversion(stats.norm(), random_state=68734509) + x1 = gen.rvs(size=10) + gen.random_state = 68734509 + x2 = gen.rvs(size=10) + assert_array_equal(x1, x2) + + # Generator + urng = np.random.default_rng(20375857) + gen = FastGeneratorInversion(stats.norm(), random_state=urng) + x1 = gen.rvs(size=10) + gen.random_state = np.random.default_rng(20375857) + x2 = gen.rvs(size=10) + assert_array_equal(x1, x2) + + # RandomState + urng = np.random.RandomState(2364) + gen = FastGeneratorInversion(stats.norm(), random_state=urng) + x1 = gen.rvs(size=10) + gen.random_state = np.random.RandomState(2364) + x2 = gen.rvs(size=10) + assert_array_equal(x1, x2) + + # if evaluate_error is called, it must not interfere with the random_state + # used by rvs + gen = FastGeneratorInversion(stats.norm(), random_state=68734509) + x1 = gen.rvs(size=10) + _ = gen.evaluate_error(size=5) # this will generate 5 uniform rvs + x2 = gen.rvs(size=10) + gen.random_state = 68734509 + x3 = gen.rvs(size=20) + assert_array_equal(x2, x3[10:]) + + +dists_with_params = [ + ("alpha", (3.5,)), + ("anglit", ()), + ("argus", (3.5,)), + ("argus", (5.1,)), + ("beta", (1.5, 0.9)), + ("cosine", ()), + ("betaprime", (2.5, 3.3)), + ("bradford", (1.2,)), + ("burr", (1.3, 2.4)), + ("burr12", (0.7, 1.2)), + ("cauchy", ()), + ("chi2", (3.5,)), + ("chi", (4.5,)), + ("crystalball", (0.7, 1.2)), + ("expon", ()), + ("gamma", (1.5,)), + ("gennorm", (2.7,)), + ("gumbel_l", ()), + ("gumbel_r", ()), + ("hypsecant", ()), + ("invgauss", (3.1,)), + ("invweibull", (1.5,)), + ("laplace", ()), + ("logistic", ()), + ("maxwell", ()), + ("moyal", ()), + ("norm", ()), + ("pareto", (1.3,)), + ("powerlaw", (7.6,)), + ("rayleigh", ()), + ("semicircular", ()), + ("t", (5.7,)), + ("wald", ()), + ("weibull_max", (2.4,)), + ("weibull_min", (1.2,)), +] + + +@pytest.mark.parametrize(("distname, args"), dists_with_params) +def test_rvs_and_ppf(distname, args): + # check sample against rvs generated by rv_continuous + urng = np.random.default_rng(9807324628097097) + rng1 = getattr(stats, distname)(*args) + rvs1 = rng1.rvs(size=500, random_state=urng) + rng2 = FastGeneratorInversion(rng1, random_state=urng) + rvs2 = rng2.rvs(size=500) + assert stats.cramervonmises_2samp(rvs1, rvs2).pvalue > 0.01 + + # check ppf + q = [0.001, 0.1, 0.5, 0.9, 0.999] + assert_allclose(rng1.ppf(q), rng2.ppf(q), atol=1e-10) + + +@pytest.mark.parametrize(("distname, args"), dists_with_params) +def test_u_error(distname, args): + # check sample against rvs generated by rv_continuous + dist = getattr(stats, distname)(*args) + with suppress_warnings() as sup: + # filter the warnings thrown by UNU.RAN + sup.filter(RuntimeWarning) + rng = FastGeneratorInversion(dist) + u_error, x_error = rng.evaluate_error( + size=10_000, random_state=9807324628097097, x_error=False + ) + assert u_error <= 1e-10 + + +@pytest.mark.xfail(reason="geninvgauss CDF is not accurate") +def test_geninvgauss_uerror(): + dist = stats.geninvgauss(3.2, 1.5) + rng = FastGeneratorInversion(dist) + err = rng.evaluate_error(size=10_000, random_state=67982) + assert err[0] < 1e-10 + +# TODO: add more distributions +@pytest.mark.parametrize(("distname, args"), [("beta", (0.11, 0.11))]) +def test_error_extreme_params(distname, args): + # take extreme parameters where u-error might not be below the tolerance + # due to limitations of floating point arithmetic + with suppress_warnings() as sup: + # filter the warnings thrown by UNU.RAN for such extreme parameters + sup.filter(RuntimeWarning) + dist = getattr(stats, distname)(*args) + rng = FastGeneratorInversion(dist) + u_error, x_error = rng.evaluate_error( + size=10_000, random_state=980732462809709732623, x_error=True + ) + if u_error >= 2.5 * 1e-10: + assert x_error < 1e-9 + + +def test_evaluate_error_inputs(): + gen = FastGeneratorInversion(stats.norm()) + with pytest.raises(ValueError, match="size must be an integer"): + gen.evaluate_error(size=3.5) + with pytest.raises(ValueError, match="size must be an integer"): + gen.evaluate_error(size=(3, 3)) + + +def test_rvs_ppf_loc_scale(): + loc, scale = 3.5, 2.3 + dist = stats.norm(loc=loc, scale=scale) + rng = FastGeneratorInversion(dist, random_state=1234) + r = rng.rvs(size=1000) + r_rescaled = (r - loc) / scale + assert stats.cramervonmises(r_rescaled, "norm").pvalue > 0.01 + q = [0.001, 0.1, 0.5, 0.9, 0.999] + assert_allclose(rng._ppf(q), rng.ppf(q), atol=1e-10) + + +def test_domain(): + # only a basic check that the domain argument is passed to the + # UNU.RAN generators + rng = FastGeneratorInversion(stats.norm(), domain=(-1, 1)) + r = rng.rvs(size=100) + assert -1 <= r.min() < r.max() <= 1 + + # if loc and scale are used, new domain is loc + scale*domain + loc, scale = 3.5, 1.3 + dist = stats.norm(loc=loc, scale=scale) + rng = FastGeneratorInversion(dist, domain=(-1.5, 2)) + r = rng.rvs(size=100) + lb, ub = loc - scale * 1.5, loc + scale * 2 + assert lb <= r.min() < r.max() <= ub + + +@pytest.mark.parametrize(("distname, args, expected"), + [("beta", (3.5, 2.5), (0, 1)), + ("norm", (), (-np.inf, np.inf))]) +def test_support(distname, args, expected): + # test that the support is updated if truncation and loc/scale are applied + # use beta distribution since it is a transformed betaprime distribution, + # so it is important that the correct support is considered + # (i.e., the support of beta is (0,1), while betaprime is (0, inf)) + dist = getattr(stats, distname)(*args) + rng = FastGeneratorInversion(dist) + assert_array_equal(rng.support(), expected) + rng.loc = 1 + rng.scale = 2 + assert_array_equal(rng.support(), 1 + 2*np.array(expected)) + + +@pytest.mark.parametrize(("distname, args"), + [("beta", (3.5, 2.5)), ("norm", ())]) +def test_support_truncation(distname, args): + # similar test for truncation + dist = getattr(stats, distname)(*args) + rng = FastGeneratorInversion(dist, domain=(0.5, 0.7)) + assert_array_equal(rng.support(), (0.5, 0.7)) + rng.loc = 1 + rng.scale = 2 + assert_array_equal(rng.support(), (1 + 2 * 0.5, 1 + 2 * 0.7)) + + +def test_domain_shift_truncation(): + # center of norm is zero, it should be shifted to the left endpoint of + # domain. if this was not the case, PINV in UNURAN would raise a warning + # as the center is not inside the domain + with warnings.catch_warnings(): + warnings.simplefilter("error") + rng = FastGeneratorInversion(stats.norm(), domain=(1, 2)) + r = rng.rvs(size=100) + assert 1 <= r.min() < r.max() <= 2 + + +def test_non_rvs_methods_with_domain(): + # as a first step, compare truncated normal against stats.truncnorm + rng = FastGeneratorInversion(stats.norm(), domain=(2.3, 3.2)) + trunc_norm = stats.truncnorm(2.3, 3.2) + # take values that are inside and outside the domain + x = (2.0, 2.4, 3.0, 3.4) + p = (0.01, 0.5, 0.99) + assert_allclose(rng._cdf(x), trunc_norm.cdf(x)) + assert_allclose(rng._ppf(p), trunc_norm.ppf(p)) + loc, scale = 2, 3 + rng.loc = 2 + rng.scale = 3 + trunc_norm = stats.truncnorm(2.3, 3.2, loc=loc, scale=scale) + x = np.array(x) * scale + loc + assert_allclose(rng._cdf(x), trunc_norm.cdf(x)) + assert_allclose(rng._ppf(p), trunc_norm.ppf(p)) + + # do another sanity check with beta distribution + # in that case, it is important to use the correct domain since beta + # is a transformation of betaprime which has a different support + rng = FastGeneratorInversion(stats.beta(2.5, 3.5), domain=(0.3, 0.7)) + rng.loc = 2 + rng.scale = 2.5 + # the support is 2.75, , 3.75 (2 + 2.5 * 0.3, 2 + 2.5 * 0.7) + assert_array_equal(rng.support(), (2.75, 3.75)) + x = np.array([2.74, 2.76, 3.74, 3.76]) + # the cdf needs to be zero outside of the domain + y_cdf = rng._cdf(x) + assert_array_equal((y_cdf[0], y_cdf[3]), (0, 1)) + assert np.min(y_cdf[1:3]) > 0 + # ppf needs to map 0 and 1 to the boundaries + assert_allclose(rng._ppf(y_cdf), (2.75, 2.76, 3.74, 3.75)) + + +def test_non_rvs_methods_without_domain(): + norm_dist = stats.norm() + rng = FastGeneratorInversion(norm_dist) + x = np.linspace(-3, 3, num=10) + p = (0.01, 0.5, 0.99) + assert_allclose(rng._cdf(x), norm_dist.cdf(x)) + assert_allclose(rng._ppf(p), norm_dist.ppf(p)) + loc, scale = 0.5, 1.3 + rng.loc = loc + rng.scale = scale + norm_dist = stats.norm(loc=loc, scale=scale) + assert_allclose(rng._cdf(x), norm_dist.cdf(x)) + assert_allclose(rng._ppf(p), norm_dist.ppf(p)) + +@pytest.mark.parametrize(("domain, x"), + [(None, 0.5), + ((0, 1), 0.5), + ((0, 1), 1.5)]) +def test_scalar_inputs(domain, x): + """ pdf, cdf etc should map scalar values to scalars. check with and + w/o domain since domain impacts pdf, cdf etc + Take x inside and outside of domain """ + rng = FastGeneratorInversion(stats.norm(), domain=domain) + assert np.isscalar(rng._cdf(x)) + assert np.isscalar(rng._ppf(0.5)) + + +def test_domain_argus_large_chi(): + # for large chi, the Gamma distribution is used and the domain has to be + # transformed. this is a test to ensure that the transformation works + chi, lb, ub = 5.5, 0.25, 0.75 + rng = FastGeneratorInversion(stats.argus(chi), domain=(lb, ub)) + rng.random_state = 4574 + r = rng.rvs(size=500) + assert lb <= r.min() < r.max() <= ub + # perform goodness of fit test with conditional cdf + cdf = stats.argus(chi).cdf + prob = cdf(ub) - cdf(lb) + assert stats.cramervonmises(r, lambda x: cdf(x) / prob).pvalue > 0.05 + + +def test_setting_loc_scale(): + rng = FastGeneratorInversion(stats.norm(), random_state=765765864) + r1 = rng.rvs(size=1000) + rng.loc = 3.0 + rng.scale = 2.5 + r2 = rng.rvs(1000) + # rescaled r2 should be again standard normal + assert stats.cramervonmises_2samp(r1, (r2 - 3) / 2.5).pvalue > 0.05 + # reset values to default loc=0, scale=1 + rng.loc = 0 + rng.scale = 1 + r2 = rng.rvs(1000) + assert stats.cramervonmises_2samp(r1, r2).pvalue > 0.05 + + +def test_ignore_shape_range(): + msg = "No generator is defined for the shape parameters" + with pytest.raises(ValueError, match=msg): + rng = FastGeneratorInversion(stats.t(0.03)) + rng = FastGeneratorInversion(stats.t(0.03), ignore_shape_range=True) + # we can ignore the recommended range of shape parameters + # but u-error can be expected to be too large in that case + u_err, _ = rng.evaluate_error(size=1000, random_state=234) + assert u_err >= 1e-6 + +@pytest.mark.xfail_on_32bit( + "NumericalInversePolynomial.qrvs fails for Win 32-bit" +) +class TestQRVS: + def test_input_validation(self): + gen = FastGeneratorInversion(stats.norm()) + + match = "`qmc_engine` must be an instance of..." + with pytest.raises(ValueError, match=match): + gen.qrvs(qmc_engine=0) + + match = "`d` must be consistent with dimension of `qmc_engine`." + with pytest.raises(ValueError, match=match): + gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2)) + + qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)] + # `size=None` should not add anything to the shape, `size=1` should + sizes = [ + (None, tuple()), + (1, (1,)), + (4, (4,)), + ((4,), (4,)), + ((2, 4), (2, 4)), + ] + # Neither `d=None` nor `d=1` should add anything to the shape + ds = [(None, tuple()), (1, tuple()), (3, (3,))] + + @pytest.mark.parametrize("qrng", qrngs) + @pytest.mark.parametrize("size_in, size_out", sizes) + @pytest.mark.parametrize("d_in, d_out", ds) + def test_QRVS_shape_consistency(self, qrng, size_in, size_out, + d_in, d_out): + gen = FastGeneratorInversion(stats.norm()) + + # If d and qrng.d are inconsistent, an error is raised + if d_in is not None and qrng is not None and qrng.d != d_in: + match = "`d` must be consistent with dimension of `qmc_engine`." + with pytest.raises(ValueError, match=match): + gen.qrvs(size_in, d=d_in, qmc_engine=qrng) + return + + # Sometimes d is really determined by qrng + if d_in is None and qrng is not None and qrng.d != 1: + d_out = (qrng.d,) + + shape_expected = size_out + d_out + + qrng2 = deepcopy(qrng) + qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng) + if size_in is not None: + assert qrvs.shape == shape_expected + + if qrng2 is not None: + uniform = qrng2.random(np.prod(size_in) or 1) + qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected) + assert_allclose(qrvs, qrvs2, atol=1e-12) + + def test_QRVS_size_tuple(self): + # QMCEngine samples are always of shape (n, d). When `size` is a tuple, + # we set `n = prod(size)` in the call to qmc_engine.random, transform + # the sample, and reshape it to the final dimensions. When we reshape, + # we need to be careful, because the _columns_ of the sample returned + # by a QMCEngine are "independent"-ish, but the elements within the + # columns are not. We need to make sure that this doesn't get mixed up + # by reshaping: qrvs[..., i] should remain "independent"-ish of + # qrvs[..., i+1], but the elements within qrvs[..., i] should be + # transformed from the same low-discrepancy sequence. + + gen = FastGeneratorInversion(stats.norm()) + + size = (3, 4) + d = 5 + qrng = stats.qmc.Halton(d, seed=0) + qrng2 = stats.qmc.Halton(d, seed=0) + + uniform = qrng2.random(np.prod(size)) + + qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng) + qrvs2 = stats.norm.ppf(uniform) + + for i in range(d): + sample = qrvs[..., i] + sample2 = qrvs2[:, i].reshape(size) + assert_allclose(sample, sample2, atol=1e-12) + + +def test_burr_overflow(): + # this case leads to an overflow error if math.exp is used + # in the definition of the burr pdf instead of np.exp + # a direct implementation of the PDF as x**(-c-1) / (1+x**(-c))**(d+1) + # also leads to an overflow error in the setup + args = (1.89128135, 0.30195177) + with suppress_warnings() as sup: + # filter potential overflow warning + sup.filter(RuntimeWarning) + gen = FastGeneratorInversion(stats.burr(*args)) + u_error, _ = gen.evaluate_error(random_state=4326) + assert u_error <= 1e-10 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_fit.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_fit.py new file mode 100644 index 0000000000000000000000000000000000000000..bcb776f71e35c5e603c3d54a34a22768c0bf637d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_fit.py @@ -0,0 +1,1027 @@ +import os +import numpy as np +import numpy.testing as npt +from numpy.testing import assert_allclose, assert_equal +import pytest +from scipy import stats +from scipy.optimize import differential_evolution + +from .test_continuous_basic import distcont +from scipy.stats._distn_infrastructure import FitError +from scipy.stats._distr_params import distdiscrete +from scipy.stats import goodness_of_fit + + +# this is not a proper statistical test for convergence, but only +# verifies that the estimate and true values don't differ by too much + +fit_sizes = [1000, 5000, 10000] # sample sizes to try + +thresh_percent = 0.25 # percent of true parameters for fail cut-off +thresh_min = 0.75 # minimum difference estimate - true to fail test + +mle_failing_fits = [ + 'gausshyper', + 'genexpon', + 'gengamma', + 'kappa4', + 'ksone', + 'kstwo', + 'ncf', + 'ncx2', + 'truncexpon', + 'tukeylambda', + 'vonmises', + 'levy_stable', + 'trapezoid', + 'truncweibull_min', + 'studentized_range', +] + +# The MLE fit method of these distributions doesn't perform well when all +# parameters are fit, so test them with the location fixed at 0. +mle_use_floc0 = [ + 'burr', + 'chi', + 'chi2', + 'mielke', + 'pearson3', + 'genhalflogistic', + 'rdist', + 'pareto', + 'powerlaw', # distfn.nnlf(est2, rvs) > distfn.nnlf(est1, rvs) otherwise + 'powerlognorm', + 'wrapcauchy', + 'rel_breitwigner', +] + +mm_failing_fits = ['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', 'chi', + 'chi2', 'crystalball', 'dgamma', 'dweibull', 'f', + 'fatiguelife', 'fisk', 'foldcauchy', 'genextreme', + 'gengamma', 'genhyperbolic', 'gennorm', 'genpareto', + 'halfcauchy', 'invgamma', 'invweibull', 'jf_skew_t', + 'johnsonsu', 'kappa3', 'ksone', 'kstwo', 'levy', 'levy_l', + 'levy_stable', 'loglaplace', 'lomax', 'mielke', 'nakagami', + 'ncf', 'nct', 'ncx2', 'pareto', 'powerlognorm', 'powernorm', + 'rel_breitwigner', 'skewcauchy', 't', 'trapezoid', 'triang', + 'truncpareto', 'truncweibull_min', 'tukeylambda', + 'studentized_range'] + +# not sure if these fail, but they caused my patience to fail +mm_slow_fits = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon', + 'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb', + 'kappa4', 'kstwobign', 'recipinvgauss', + 'truncexpon', 'vonmises', 'vonmises_line'] + +failing_fits = {"MM": mm_failing_fits + mm_slow_fits, "MLE": mle_failing_fits} +fail_interval_censored = {"truncpareto"} + +# Don't run the fit test on these: +skip_fit = [ + 'erlang', # Subclass of gamma, generates a warning. + 'genhyperbolic', # too slow +] + + +def cases_test_cont_fit(): + # this tests the closeness of the estimated parameters to the true + # parameters with fit method of continuous distributions + # Note: is slow, some distributions don't converge with sample + # size <= 10000 + for distname, arg in distcont: + if distname not in skip_fit: + yield distname, arg + + +@pytest.mark.slow +@pytest.mark.parametrize('distname,arg', cases_test_cont_fit()) +@pytest.mark.parametrize('method', ["MLE", "MM"]) +def test_cont_fit(distname, arg, method): + if distname in failing_fits[method]: + # Skip failing fits unless overridden + try: + xfail = not int(os.environ['SCIPY_XFAIL']) + except Exception: + xfail = True + if xfail: + msg = "Fitting %s doesn't work reliably yet" % distname + msg += (" [Set environment variable SCIPY_XFAIL=1 to run this" + " test nevertheless.]") + pytest.xfail(msg) + + distfn = getattr(stats, distname) + + truearg = np.hstack([arg, [0.0, 1.0]]) + diffthreshold = np.max(np.vstack([truearg*thresh_percent, + np.full(distfn.numargs+2, thresh_min)]), + 0) + + for fit_size in fit_sizes: + # Note that if a fit succeeds, the other fit_sizes are skipped + np.random.seed(1234) + + with np.errstate(all='ignore'): + rvs = distfn.rvs(size=fit_size, *arg) + if method == 'MLE' and distfn.name in mle_use_floc0: + kwds = {'floc': 0} + else: + kwds = {} + # start with default values + est = distfn.fit(rvs, method=method, **kwds) + if method == 'MLE': + # Trivial test of the use of CensoredData. The fit() method + # will check that data contains no actual censored data, and + # do a regular uncensored fit. + data1 = stats.CensoredData(rvs) + est1 = distfn.fit(data1, **kwds) + msg = ('Different results fitting uncensored data wrapped as' + f' CensoredData: {distfn.name}: est={est} est1={est1}') + assert_allclose(est1, est, rtol=1e-10, err_msg=msg) + if method == 'MLE' and distname not in fail_interval_censored: + # Convert the first `nic` values in rvs to interval-censored + # values. The interval is small, so est2 should be close to + # est. + nic = 15 + interval = np.column_stack((rvs, rvs)) + interval[:nic, 0] *= 0.99 + interval[:nic, 1] *= 1.01 + interval.sort(axis=1) + data2 = stats.CensoredData(interval=interval) + est2 = distfn.fit(data2, **kwds) + msg = ('Different results fitting interval-censored' + f' data: {distfn.name}: est={est} est2={est2}') + assert_allclose(est2, est, rtol=0.05, err_msg=msg) + + diff = est - truearg + + # threshold for location + diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent, + thresh_min]) + + if np.any(np.isnan(est)): + raise AssertionError('nan returned in fit') + else: + if np.all(np.abs(diff) <= diffthreshold): + break + else: + txt = 'parameter: %s\n' % str(truearg) + txt += 'estimated: %s\n' % str(est) + txt += 'diff : %s\n' % str(diff) + raise AssertionError('fit not very good in %s\n' % distfn.name + txt) + + +def _check_loc_scale_mle_fit(name, data, desired, atol=None): + d = getattr(stats, name) + actual = d.fit(data)[-2:] + assert_allclose(actual, desired, atol=atol, + err_msg='poor mle fit of (loc, scale) in %s' % name) + + +def test_non_default_loc_scale_mle_fit(): + data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00]) + _check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3) + _check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3) + + +def test_expon_fit(): + """gh-6167""" + data = [0, 0, 0, 0, 2, 2, 2, 2] + phat = stats.expon.fit(data, floc=0) + assert_allclose(phat, [0, 1.0], atol=1e-3) + + +def test_fit_error(): + data = np.concatenate([np.zeros(29), np.ones(21)]) + message = "Optimization converged to parameters that are..." + with pytest.raises(FitError, match=message), \ + pytest.warns(RuntimeWarning): + stats.beta.fit(data) + + +@pytest.mark.parametrize("dist, params", + [(stats.norm, (0.5, 2.5)), # type: ignore[attr-defined] + (stats.binom, (10, 0.3, 2))]) # type: ignore[attr-defined] +def test_nnlf_and_related_methods(dist, params): + rng = np.random.default_rng(983459824) + + if hasattr(dist, 'pdf'): + logpxf = dist.logpdf + else: + logpxf = dist.logpmf + + x = dist.rvs(*params, size=100, random_state=rng) + ref = -logpxf(x, *params).sum() + res1 = dist.nnlf(params, x) + res2 = dist._penalized_nnlf(params, x) + assert_allclose(res1, ref) + assert_allclose(res2, ref) + + +def cases_test_fit_mle(): + # These fail default test or hang + skip_basic_fit = {'argus', 'foldnorm', 'truncpareto', 'truncweibull_min', + 'ksone', 'levy_stable', 'studentized_range', 'kstwo', + 'arcsine'} + + # Please keep this list in alphabetical order... + slow_basic_fit = {'alpha', + 'betaprime', 'binom', 'bradford', 'burr12', + 'chi', 'crystalball', 'dweibull', 'exponpow', + 'f', 'fatiguelife', 'fisk', 'foldcauchy', + 'genexpon', 'genextreme', 'gennorm', 'genpareto', + 'gompertz', 'halfgennorm', 'invgauss', 'invweibull', + 'jf_skew_t', 'johnsonsb', 'johnsonsu', 'kappa3', + 'kstwobign', 'loglaplace', 'lognorm', 'lomax', 'mielke', + 'nakagami', 'nbinom', 'norminvgauss', + 'pareto', 'pearson3', 'powerlaw', 'powernorm', + 'randint', 'rdist', 'recipinvgauss', 'rice', + 't', 'uniform', 'weibull_max', 'wrapcauchy'} + + # Please keep this list in alphabetical order... + xslow_basic_fit = {'beta', 'betabinom', 'burr', 'exponweib', + 'gausshyper', 'gengamma', 'genhalflogistic', + 'genhyperbolic', 'geninvgauss', + 'hypergeom', 'kappa4', 'loguniform', + 'ncf', 'nchypergeom_fisher', 'nchypergeom_wallenius', + 'nct', 'ncx2', 'nhypergeom', + 'powerlognorm', 'reciprocal', 'rel_breitwigner', + 'skellam', 'trapezoid', 'triang', 'truncnorm', + 'tukeylambda', 'zipfian'} + + for dist in dict(distdiscrete + distcont): + if dist in skip_basic_fit or not isinstance(dist, str): + reason = "tested separately" + yield pytest.param(dist, marks=pytest.mark.skip(reason=reason)) + elif dist in slow_basic_fit: + reason = "too slow (>= 0.25s)" + yield pytest.param(dist, marks=pytest.mark.slow(reason=reason)) + elif dist in xslow_basic_fit: + reason = "too slow (>= 1.0s)" + yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason)) + else: + yield dist + + +def cases_test_fit_mse(): + # the first four are so slow that I'm not sure whether they would pass + skip_basic_fit = {'levy_stable', 'studentized_range', 'ksone', 'skewnorm', + 'norminvgauss', # super slow (~1 hr) but passes + 'kstwo', # very slow (~25 min) but passes + 'geninvgauss', # quite slow (~4 minutes) but passes + 'gausshyper', 'genhyperbolic', # integration warnings + 'tukeylambda', # close, but doesn't meet tolerance + 'vonmises'} # can have negative CDF; doesn't play nice + + # Please keep this list in alphabetical order... + slow_basic_fit = {'alpha', 'anglit', 'arcsine', 'betabinom', 'bradford', + 'chi', 'chi2', 'crystalball', 'dgamma', 'dweibull', + 'erlang', 'exponnorm', 'exponpow', 'exponweib', + 'fatiguelife', 'fisk', 'foldcauchy', 'foldnorm', + 'gamma', 'genexpon', 'genextreme', 'genhalflogistic', + 'genlogistic', 'genpareto', 'gompertz', + 'hypergeom', 'invweibull', 'jf_skew_t', 'johnsonsb', + 'johnsonsu', 'kappa3', 'kstwobign', + 'laplace_asymmetric', 'loggamma', 'loglaplace', + 'lognorm', 'lomax', + 'maxwell', 'mielke', 'nakagami', 'nhypergeom', + 'pareto', 'powernorm', 'randint', 'recipinvgauss', + 'semicircular', + 't', 'triang', 'truncexpon', 'truncpareto', + 'truncweibull_min', + 'uniform', 'vonmises_line', + 'wald', 'weibull_max', 'weibull_min', 'wrapcauchy'} + + # Please keep this list in alphabetical order... + xslow_basic_fit = {'beta', 'betaprime', 'burr', 'burr12', + 'f', 'gengamma', 'gennorm', + 'halfgennorm', 'invgamma', 'invgauss', + 'kappa4', 'loguniform', + 'ncf', 'nchypergeom_fisher', 'nchypergeom_wallenius', + 'nct', 'ncx2', + 'pearson3', 'powerlaw', 'powerlognorm', + 'rdist', 'reciprocal', 'rel_breitwigner', 'rice', + 'trapezoid', 'truncnorm', + 'zipfian'} + + warns_basic_fit = {'skellam'} # can remove mark after gh-14901 is resolved + + for dist in dict(distdiscrete + distcont): + if dist in skip_basic_fit or not isinstance(dist, str): + reason = "Fails. Oh well." + yield pytest.param(dist, marks=pytest.mark.skip(reason=reason)) + elif dist in slow_basic_fit: + reason = "too slow (>= 0.25s)" + yield pytest.param(dist, marks=pytest.mark.slow(reason=reason)) + elif dist in xslow_basic_fit: + reason = "too slow (>= 1.0s)" + yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason)) + elif dist in warns_basic_fit: + mark = pytest.mark.filterwarnings('ignore::RuntimeWarning') + yield pytest.param(dist, marks=mark) + else: + yield dist + + +def cases_test_fitstart(): + for distname, shapes in dict(distcont).items(): + if (not isinstance(distname, str) or + distname in {'studentized_range', 'recipinvgauss'}): # slow + continue + yield distname, shapes + + +@pytest.mark.parametrize('distname, shapes', cases_test_fitstart()) +def test_fitstart(distname, shapes): + dist = getattr(stats, distname) + rng = np.random.default_rng(216342614) + data = rng.random(10) + + with np.errstate(invalid='ignore', divide='ignore'): # irrelevant to test + guess = dist._fitstart(data) + + assert dist._argcheck(*guess[:-2]) + + +def assert_nlff_less_or_close(dist, data, params1, params0, rtol=1e-7, atol=0, + nlff_name='nnlf'): + nlff = getattr(dist, nlff_name) + nlff1 = nlff(params1, data) + nlff0 = nlff(params0, data) + if not (nlff1 < nlff0): + np.testing.assert_allclose(nlff1, nlff0, rtol=rtol, atol=atol) + + +class TestFit: + dist = stats.binom # type: ignore[attr-defined] + seed = 654634816187 + rng = np.random.default_rng(seed) + data = stats.binom.rvs(5, 0.5, size=100, random_state=rng) # type: ignore[attr-defined] # noqa: E501 + shape_bounds_a = [(1, 10), (0, 1)] + shape_bounds_d = {'n': (1, 10), 'p': (0, 1)} + atol = 5e-2 + rtol = 1e-2 + tols = {'atol': atol, 'rtol': rtol} + + def opt(self, *args, **kwds): + return differential_evolution(*args, seed=0, **kwds) + + def test_dist_iv(self): + message = "`dist` must be an instance of..." + with pytest.raises(ValueError, match=message): + stats.fit(10, self.data, self.shape_bounds_a) + + def test_data_iv(self): + message = "`data` must be exactly one-dimensional." + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, [[1, 2, 3]], self.shape_bounds_a) + + message = "All elements of `data` must be finite numbers." + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, [1, 2, 3, np.nan], self.shape_bounds_a) + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, [1, 2, 3, np.inf], self.shape_bounds_a) + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, ['1', '2', '3'], self.shape_bounds_a) + + def test_bounds_iv(self): + message = "Bounds provided for the following unrecognized..." + shape_bounds = {'n': (1, 10), 'p': (0, 1), '1': (0, 10)} + with pytest.warns(RuntimeWarning, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "Each element of a `bounds` sequence must be a tuple..." + shape_bounds = [(1, 10, 3), (0, 1)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "Each element of `bounds` must be a tuple specifying..." + shape_bounds = [(1, 10, 3), (0, 1, 0.5)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + shape_bounds = [1, 0] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "A `bounds` sequence must contain at least 2 elements..." + shape_bounds = [(1, 10)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "A `bounds` sequence may not contain more than 3 elements..." + bounds = [(1, 10), (1, 10), (1, 10), (1, 10)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, bounds) + + message = "There are no values for `p` on the interval..." + shape_bounds = {'n': (1, 10), 'p': (1, 0)} + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "There are no values for `n` on the interval..." + shape_bounds = [(10, 1), (0, 1)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "There are no integer values for `n` on the interval..." + shape_bounds = [(1.4, 1.6), (0, 1)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + message = "The intersection of user-provided bounds for `n`" + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data) + shape_bounds = [(-np.inf, np.inf), (0, 1)] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, shape_bounds) + + def test_guess_iv(self): + message = "Guesses provided for the following unrecognized..." + guess = {'n': 1, 'p': 0.5, '1': 255} + with pytest.warns(RuntimeWarning, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "Each element of `guess` must be a scalar..." + guess = {'n': 1, 'p': 'hi'} + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + guess = [1, 'f'] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + guess = [[1, 2]] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "A `guess` sequence must contain at least 2..." + guess = [1] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "A `guess` sequence may not contain more than 3..." + guess = [1, 2, 3, 4] + with pytest.raises(ValueError, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "Guess for parameter `n` rounded.*|Guess for parameter `p` clipped.*" + guess = {'n': 4.5, 'p': -0.5} + with pytest.warns(RuntimeWarning, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "Guess for parameter `loc` rounded..." + guess = [5, 0.5, 0.5] + with pytest.warns(RuntimeWarning, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "Guess for parameter `p` clipped..." + guess = {'n': 5, 'p': -0.5} + with pytest.warns(RuntimeWarning, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + message = "Guess for parameter `loc` clipped..." + guess = [5, 0.5, 1] + with pytest.warns(RuntimeWarning, match=message): + stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess) + + def basic_fit_test(self, dist_name, method): + + N = 5000 + dist_data = dict(distcont + distdiscrete) + rng = np.random.default_rng(self.seed) + dist = getattr(stats, dist_name) + shapes = np.array(dist_data[dist_name]) + bounds = np.empty((len(shapes) + 2, 2), dtype=np.float64) + bounds[:-2, 0] = shapes/10.**np.sign(shapes) + bounds[:-2, 1] = shapes*10.**np.sign(shapes) + bounds[-2] = (0, 10) + bounds[-1] = (1e-16, 10) + loc = rng.uniform(*bounds[-2]) + scale = rng.uniform(*bounds[-1]) + ref = list(dist_data[dist_name]) + [loc, scale] + + if getattr(dist, 'pmf', False): + ref = ref[:-1] + ref[-1] = np.floor(loc) + data = dist.rvs(*ref, size=N, random_state=rng) + bounds = bounds[:-1] + if getattr(dist, 'pdf', False): + data = dist.rvs(*ref, size=N, random_state=rng) + + with npt.suppress_warnings() as sup: + sup.filter(RuntimeWarning, "overflow encountered") + res = stats.fit(dist, data, bounds, method=method, + optimizer=self.opt) + + nlff_names = {'mle': 'nnlf', 'mse': '_penalized_nlpsf'} + nlff_name = nlff_names[method] + assert_nlff_less_or_close(dist, data, res.params, ref, **self.tols, + nlff_name=nlff_name) + + @pytest.mark.parametrize("dist_name", cases_test_fit_mle()) + def test_basic_fit_mle(self, dist_name): + self.basic_fit_test(dist_name, "mle") + + @pytest.mark.parametrize("dist_name", cases_test_fit_mse()) + def test_basic_fit_mse(self, dist_name): + self.basic_fit_test(dist_name, "mse") + + def test_arcsine(self): + # Can't guarantee that all distributions will fit all data with + # arbitrary bounds. This distribution just happens to fail above. + # Try something slightly different. + N = 1000 + rng = np.random.default_rng(self.seed) + dist = stats.arcsine + shapes = (1., 2.) + data = dist.rvs(*shapes, size=N, random_state=rng) + shape_bounds = {'loc': (0.1, 10), 'scale': (0.1, 10)} + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols) + + def test_argus(self): + # Can't guarantee that all distributions will fit all data with + # arbitrary bounds. This distribution just happens to fail above. + # Try something slightly different. + N = 1000 + rng = np.random.default_rng(self.seed) + dist = stats.argus + shapes = (1., 2., 3.) + data = dist.rvs(*shapes, size=N, random_state=rng) + shape_bounds = {'chi': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)} + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + + assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols) + + def test_foldnorm(self): + # Can't guarantee that all distributions will fit all data with + # arbitrary bounds. This distribution just happens to fail above. + # Try something slightly different. + N = 1000 + rng = np.random.default_rng(self.seed) + dist = stats.foldnorm + shapes = (1.952125337355587, 2., 3.) + data = dist.rvs(*shapes, size=N, random_state=rng) + shape_bounds = {'c': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)} + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + + assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols) + + def test_truncpareto(self): + # Can't guarantee that all distributions will fit all data with + # arbitrary bounds. This distribution just happens to fail above. + # Try something slightly different. + N = 1000 + rng = np.random.default_rng(self.seed) + dist = stats.truncpareto + shapes = (1.8, 5.3, 2.3, 4.1) + data = dist.rvs(*shapes, size=N, random_state=rng) + shape_bounds = [(0.1, 10)]*4 + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + + assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols) + + def test_truncweibull_min(self): + # Can't guarantee that all distributions will fit all data with + # arbitrary bounds. This distribution just happens to fail above. + # Try something slightly different. + N = 1000 + rng = np.random.default_rng(self.seed) + dist = stats.truncweibull_min + shapes = (2.5, 0.25, 1.75, 2., 3.) + data = dist.rvs(*shapes, size=N, random_state=rng) + shape_bounds = [(0.1, 10)]*5 + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + + assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols) + + def test_missing_shape_bounds(self): + # some distributions have a small domain w.r.t. a parameter, e.g. + # $p \in [0, 1]$ for binomial distribution + # User does not need to provide these because the intersection of the + # user's bounds (none) and the distribution's domain is finite + N = 1000 + rng = np.random.default_rng(self.seed) + + dist = stats.binom + n, p, loc = 10, 0.65, 0 + data = dist.rvs(n, p, loc=loc, size=N, random_state=rng) + shape_bounds = {'n': np.array([0, 20])} # check arrays are OK, too + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + assert_allclose(res.params, (n, p, loc), **self.tols) + + dist = stats.bernoulli + p, loc = 0.314159, 0 + data = dist.rvs(p, loc=loc, size=N, random_state=rng) + res = stats.fit(dist, data, optimizer=self.opt) + assert_allclose(res.params, (p, loc), **self.tols) + + def test_fit_only_loc_scale(self): + # fit only loc + N = 5000 + rng = np.random.default_rng(self.seed) + + dist = stats.norm + loc, scale = 1.5, 1 + data = dist.rvs(loc=loc, size=N, random_state=rng) + loc_bounds = (0, 5) + bounds = {'loc': loc_bounds} + res = stats.fit(dist, data, bounds, optimizer=self.opt) + assert_allclose(res.params, (loc, scale), **self.tols) + + # fit only scale + loc, scale = 0, 2.5 + data = dist.rvs(scale=scale, size=N, random_state=rng) + scale_bounds = (0.01, 5) + bounds = {'scale': scale_bounds} + res = stats.fit(dist, data, bounds, optimizer=self.opt) + assert_allclose(res.params, (loc, scale), **self.tols) + + # fit only loc and scale + dist = stats.norm + loc, scale = 1.5, 2.5 + data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng) + bounds = {'loc': loc_bounds, 'scale': scale_bounds} + res = stats.fit(dist, data, bounds, optimizer=self.opt) + assert_allclose(res.params, (loc, scale), **self.tols) + + def test_everything_fixed(self): + N = 5000 + rng = np.random.default_rng(self.seed) + + dist = stats.norm + loc, scale = 1.5, 2.5 + data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng) + + # loc, scale fixed to 0, 1 by default + res = stats.fit(dist, data) + assert_allclose(res.params, (0, 1), **self.tols) + + # loc, scale explicitly fixed + bounds = {'loc': (loc, loc), 'scale': (scale, scale)} + res = stats.fit(dist, data, bounds) + assert_allclose(res.params, (loc, scale), **self.tols) + + # `n` gets fixed during polishing + dist = stats.binom + n, p, loc = 10, 0.65, 0 + data = dist.rvs(n, p, loc=loc, size=N, random_state=rng) + shape_bounds = {'n': (0, 20), 'p': (0.65, 0.65)} + res = stats.fit(dist, data, shape_bounds, optimizer=self.opt) + assert_allclose(res.params, (n, p, loc), **self.tols) + + def test_failure(self): + N = 5000 + rng = np.random.default_rng(self.seed) + + dist = stats.nbinom + shapes = (5, 0.5) + data = dist.rvs(*shapes, size=N, random_state=rng) + + assert data.min() == 0 + # With lower bounds on location at 0.5, likelihood is zero + bounds = [(0, 30), (0, 1), (0.5, 10)] + res = stats.fit(dist, data, bounds) + message = "Optimization converged to parameter values that are" + assert res.message.startswith(message) + assert res.success is False + + @pytest.mark.xslow + def test_guess(self): + # Test that guess helps DE find the desired solution + N = 2000 + # With some seeds, `fit` doesn't need a guess + rng = np.random.default_rng(1963904448561) + dist = stats.nhypergeom + params = (20, 7, 12, 0) + bounds = [(2, 200), (0.7, 70), (1.2, 120), (0, 10)] + + data = dist.rvs(*params, size=N, random_state=rng) + + res = stats.fit(dist, data, bounds, optimizer=self.opt) + assert not np.allclose(res.params, params, **self.tols) + + res = stats.fit(dist, data, bounds, guess=params, optimizer=self.opt) + assert_allclose(res.params, params, **self.tols) + + def test_mse_accuracy_1(self): + # Test maximum spacing estimation against example from Wikipedia + # https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples + data = [2, 4] + dist = stats.expon + bounds = {'loc': (0, 0), 'scale': (1e-8, 10)} + res_mle = stats.fit(dist, data, bounds=bounds, method='mle') + assert_allclose(res_mle.params.scale, 3, atol=1e-3) + res_mse = stats.fit(dist, data, bounds=bounds, method='mse') + assert_allclose(res_mse.params.scale, 3.915, atol=1e-3) + + def test_mse_accuracy_2(self): + # Test maximum spacing estimation against example from Wikipedia + # https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples + rng = np.random.default_rng(9843212616816518964) + + dist = stats.uniform + n = 10 + data = dist(3, 6).rvs(size=n, random_state=rng) + bounds = {'loc': (0, 10), 'scale': (1e-8, 10)} + res = stats.fit(dist, data, bounds=bounds, method='mse') + # (loc=3.608118420015416, scale=5.509323262055043) + + x = np.sort(data) + a = (n*x[0] - x[-1])/(n - 1) + b = (n*x[-1] - x[0])/(n - 1) + ref = a, b-a # (3.6081133632151503, 5.509328130317254) + assert_allclose(res.params, ref, rtol=1e-4) + + +# Data from Matlab: https://www.mathworks.com/help/stats/lillietest.html +examgrades = [65, 61, 81, 88, 69, 89, 55, 84, 86, 84, 71, 81, 84, 81, 78, 67, + 96, 66, 73, 75, 59, 71, 69, 63, 79, 76, 63, 85, 87, 88, 80, 71, + 65, 84, 71, 75, 81, 79, 64, 65, 84, 77, 70, 75, 84, 75, 73, 92, + 90, 79, 80, 71, 73, 71, 58, 79, 73, 64, 77, 82, 81, 59, 54, 82, + 57, 79, 79, 73, 74, 82, 63, 64, 73, 69, 87, 68, 81, 73, 83, 73, + 80, 73, 73, 71, 66, 78, 64, 74, 68, 67, 75, 75, 80, 85, 74, 76, + 80, 77, 93, 70, 86, 80, 81, 83, 68, 60, 85, 64, 74, 82, 81, 77, + 66, 85, 75, 81, 69, 60, 83, 72] + + +class TestGoodnessOfFit: + + def test_gof_iv(self): + dist = stats.norm + x = [1, 2, 3] + + message = r"`dist` must be a \(non-frozen\) instance of..." + with pytest.raises(TypeError, match=message): + goodness_of_fit(stats.norm(), x) + + message = "`data` must be a one-dimensional array of numbers." + with pytest.raises(ValueError, match=message): + goodness_of_fit(dist, [[1, 2, 3]]) + + message = "`statistic` must be one of..." + with pytest.raises(ValueError, match=message): + goodness_of_fit(dist, x, statistic='mm') + + message = "`n_mc_samples` must be an integer." + with pytest.raises(TypeError, match=message): + goodness_of_fit(dist, x, n_mc_samples=1000.5) + + message = "'herring' cannot be used to seed a" + with pytest.raises(ValueError, match=message): + goodness_of_fit(dist, x, random_state='herring') + + def test_against_ks(self): + rng = np.random.default_rng(8517426291317196949) + x = examgrades + known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)} + res = goodness_of_fit(stats.norm, x, known_params=known_params, + statistic='ks', random_state=rng) + ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact') + assert_allclose(res.statistic, ref.statistic) # ~0.0848 + assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.335 + + def test_against_lilliefors(self): + rng = np.random.default_rng(2291803665717442724) + x = examgrades + res = goodness_of_fit(stats.norm, x, statistic='ks', random_state=rng) + known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)} + ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact') + assert_allclose(res.statistic, ref.statistic) # ~0.0848 + assert_allclose(res.pvalue, 0.0348, atol=5e-3) + + def test_against_cvm(self): + rng = np.random.default_rng(8674330857509546614) + x = examgrades + known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)} + res = goodness_of_fit(stats.norm, x, known_params=known_params, + statistic='cvm', random_state=rng) + ref = stats.cramervonmises(x, stats.norm(**known_params).cdf) + assert_allclose(res.statistic, ref.statistic) # ~0.090 + assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.636 + + def test_against_anderson_case_0(self): + # "Case 0" is where loc and scale are known [1] + rng = np.random.default_rng(7384539336846690410) + x = np.arange(1, 101) + # loc that produced critical value of statistic found w/ root_scalar + known_params = {'loc': 45.01575354024957, 'scale': 30} + res = goodness_of_fit(stats.norm, x, known_params=known_params, + statistic='ad', random_state=rng) + assert_allclose(res.statistic, 2.492) # See [1] Table 1A 1.0 + assert_allclose(res.pvalue, 0.05, atol=5e-3) + + def test_against_anderson_case_1(self): + # "Case 1" is where scale is known and loc is fit [1] + rng = np.random.default_rng(5040212485680146248) + x = np.arange(1, 101) + # scale that produced critical value of statistic found w/ root_scalar + known_params = {'scale': 29.957112639101933} + res = goodness_of_fit(stats.norm, x, known_params=known_params, + statistic='ad', random_state=rng) + assert_allclose(res.statistic, 0.908) # See [1] Table 1B 1.1 + assert_allclose(res.pvalue, 0.1, atol=5e-3) + + def test_against_anderson_case_2(self): + # "Case 2" is where loc is known and scale is fit [1] + rng = np.random.default_rng(726693985720914083) + x = np.arange(1, 101) + # loc that produced critical value of statistic found w/ root_scalar + known_params = {'loc': 44.5680212261933} + res = goodness_of_fit(stats.norm, x, known_params=known_params, + statistic='ad', random_state=rng) + assert_allclose(res.statistic, 2.904) # See [1] Table 1B 1.2 + assert_allclose(res.pvalue, 0.025, atol=5e-3) + + def test_against_anderson_case_3(self): + # "Case 3" is where both loc and scale are fit [1] + rng = np.random.default_rng(6763691329830218206) + # c that produced critical value of statistic found w/ root_scalar + x = stats.skewnorm.rvs(1.4477847789132101, loc=1, scale=2, size=100, + random_state=rng) + res = goodness_of_fit(stats.norm, x, statistic='ad', random_state=rng) + assert_allclose(res.statistic, 0.559) # See [1] Table 1B 1.2 + assert_allclose(res.pvalue, 0.15, atol=5e-3) + + @pytest.mark.slow + def test_against_anderson_gumbel_r(self): + rng = np.random.default_rng(7302761058217743) + # c that produced critical value of statistic found w/ root_scalar + x = stats.genextreme(0.051896837188595134, loc=0.5, + scale=1.5).rvs(size=1000, random_state=rng) + res = goodness_of_fit(stats.gumbel_r, x, statistic='ad', + random_state=rng) + ref = stats.anderson(x, dist='gumbel_r') + assert_allclose(res.statistic, ref.critical_values[0]) + assert_allclose(res.pvalue, ref.significance_level[0]/100, atol=5e-3) + + def test_against_filliben_norm(self): + # Test against `stats.fit` ref. [7] Section 8 "Example" + rng = np.random.default_rng(8024266430745011915) + y = [6, 1, -4, 8, -2, 5, 0] + known_params = {'loc': 0, 'scale': 1} + res = stats.goodness_of_fit(stats.norm, y, known_params=known_params, + statistic="filliben", random_state=rng) + # Slight discrepancy presumably due to roundoff in Filliben's + # calculation. Using exact order statistic medians instead of + # Filliben's approximation doesn't account for it. + assert_allclose(res.statistic, 0.98538, atol=1e-4) + assert 0.75 < res.pvalue < 0.9 + + # Using R's ppcc library: + # library(ppcc) + # options(digits=16) + # x < - c(6, 1, -4, 8, -2, 5, 0) + # set.seed(100) + # ppccTest(x, "qnorm", ppos="Filliben") + # Discrepancy with + assert_allclose(res.statistic, 0.98540957187084, rtol=2e-5) + assert_allclose(res.pvalue, 0.8875, rtol=2e-3) + + def test_filliben_property(self): + # Filliben's statistic should be independent of data location and scale + rng = np.random.default_rng(8535677809395478813) + x = rng.normal(loc=10, scale=0.5, size=100) + res = stats.goodness_of_fit(stats.norm, x, + statistic="filliben", random_state=rng) + known_params = {'loc': 0, 'scale': 1} + ref = stats.goodness_of_fit(stats.norm, x, known_params=known_params, + statistic="filliben", random_state=rng) + assert_allclose(res.statistic, ref.statistic, rtol=1e-15) + + @pytest.mark.parametrize('case', [(25, [.928, .937, .950, .958, .966]), + (50, [.959, .965, .972, .977, .981]), + (95, [.977, .979, .983, .986, .989])]) + def test_against_filliben_norm_table(self, case): + # Test against `stats.fit` ref. [7] Table 1 + rng = np.random.default_rng(504569995557928957) + n, ref = case + x = rng.random(n) + known_params = {'loc': 0, 'scale': 1} + res = stats.goodness_of_fit(stats.norm, x, known_params=known_params, + statistic="filliben", random_state=rng) + percentiles = np.array([0.005, 0.01, 0.025, 0.05, 0.1]) + res = stats.scoreatpercentile(res.null_distribution, percentiles*100) + assert_allclose(res, ref, atol=2e-3) + + @pytest.mark.slow + @pytest.mark.parametrize('case', [(5, 0.95772790260469, 0.4755), + (6, 0.95398832257958, 0.3848), + (7, 0.9432692889277, 0.2328)]) + def test_against_ppcc(self, case): + # Test against R ppcc, e.g. + # library(ppcc) + # options(digits=16) + # x < - c(0.52325412, 1.06907699, -0.36084066, 0.15305959, 0.99093194) + # set.seed(100) + # ppccTest(x, "qrayleigh", ppos="Filliben") + n, ref_statistic, ref_pvalue = case + rng = np.random.default_rng(7777775561439803116) + x = rng.normal(size=n) + res = stats.goodness_of_fit(stats.rayleigh, x, statistic="filliben", + random_state=rng) + assert_allclose(res.statistic, ref_statistic, rtol=1e-4) + assert_allclose(res.pvalue, ref_pvalue, atol=1.5e-2) + + def test_params_effects(self): + # Ensure that `guessed_params`, `fit_params`, and `known_params` have + # the intended effects. + rng = np.random.default_rng(9121950977643805391) + x = stats.skewnorm.rvs(-5.044559778383153, loc=1, scale=2, size=50, + random_state=rng) + + # Show that `guessed_params` don't fit to the guess, + # but `fit_params` and `known_params` respect the provided fit + guessed_params = {'c': 13.4} + fit_params = {'scale': 13.73} + known_params = {'loc': -13.85} + rng = np.random.default_rng(9121950977643805391) + res1 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2, + guessed_params=guessed_params, + fit_params=fit_params, + known_params=known_params, random_state=rng) + assert not np.allclose(res1.fit_result.params.c, 13.4) + assert_equal(res1.fit_result.params.scale, 13.73) + assert_equal(res1.fit_result.params.loc, -13.85) + + # Show that changing the guess changes the parameter that gets fit, + # and it changes the null distribution + guessed_params = {'c': 2} + rng = np.random.default_rng(9121950977643805391) + res2 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2, + guessed_params=guessed_params, + fit_params=fit_params, + known_params=known_params, random_state=rng) + assert not np.allclose(res2.fit_result.params.c, + res1.fit_result.params.c, rtol=1e-8) + assert not np.allclose(res2.null_distribution, + res1.null_distribution, rtol=1e-8) + assert_equal(res2.fit_result.params.scale, 13.73) + assert_equal(res2.fit_result.params.loc, -13.85) + + # If we set all parameters as fit_params and known_params, + # they're all fixed to those values, but the null distribution + # varies. + fit_params = {'c': 13.4, 'scale': 13.73} + rng = np.random.default_rng(9121950977643805391) + res3 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2, + guessed_params=guessed_params, + fit_params=fit_params, + known_params=known_params, random_state=rng) + assert_equal(res3.fit_result.params.c, 13.4) + assert_equal(res3.fit_result.params.scale, 13.73) + assert_equal(res3.fit_result.params.loc, -13.85) + assert not np.allclose(res3.null_distribution, res1.null_distribution) + + def test_custom_statistic(self): + # Test support for custom statistic function. + + # References: + # [1] Pyke, R. (1965). "Spacings". Journal of the Royal Statistical + # Society: Series B (Methodological), 27(3): 395-436. + # [2] Burrows, P. M. (1979). "Selected Percentage Points of + # Greenwood's Statistics". Journal of the Royal Statistical + # Society. Series A (General), 142(2): 256-258. + + # Use the Greenwood statistic for illustration; see [1, p.402]. + def greenwood(dist, data, *, axis): + x = np.sort(data, axis=axis) + y = dist.cdf(x) + d = np.diff(y, axis=axis, prepend=0, append=1) + return np.sum(d ** 2, axis=axis) + + # Run the Monte Carlo test with sample size = 5 on a fully specified + # null distribution, and compare the simulated quantiles to the exact + # ones given in [2, Table 1, column (n = 5)]. + rng = np.random.default_rng(9121950977643805391) + data = stats.expon.rvs(size=5, random_state=rng) + result = goodness_of_fit(stats.expon, data, + known_params={'loc': 0, 'scale': 1}, + statistic=greenwood, random_state=rng) + p = [.01, .05, .1, .2, .3, .4, .5, .6, .7, .8, .9, .95, .99] + exact_quantiles = [ + .183863, .199403, .210088, .226040, .239947, .253677, .268422, + .285293, .306002, .334447, .382972, .432049, .547468] + simulated_quantiles = np.quantile(result.null_distribution, p) + assert_allclose(simulated_quantiles, exact_quantiles, atol=0.005) + +class TestFitResult: + def test_plot_iv(self): + rng = np.random.default_rng(1769658657308472721) + data = stats.norm.rvs(0, 1, size=100, random_state=rng) + + def optimizer(*args, **kwargs): + return differential_evolution(*args, **kwargs, seed=rng) + + bounds = [(0, 30), (0, 1)] + res = stats.fit(stats.norm, data, bounds, optimizer=optimizer) + try: + import matplotlib # noqa: F401 + message = r"`plot_type` must be one of \{'..." + with pytest.raises(ValueError, match=message): + res.plot(plot_type='llama') + except (ModuleNotFoundError, ImportError): + # Avoid trying to call MPL with numpy 2.0-dev, because that fails + # too often due to ABI mismatches and is hard to avoid. This test + # will work fine again once MPL has done a 2.0-compatible release. + if not np.__version__.startswith('2.0.0.dev0'): + message = r"matplotlib must be installed to use method `plot`." + with pytest.raises(ModuleNotFoundError, match=message): + res.plot(plot_type='llama') diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_hypotests.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_hypotests.py new file mode 100644 index 0000000000000000000000000000000000000000..d9204f0a360d24dc8d4dc776f1ea11ab12788e1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_hypotests.py @@ -0,0 +1,1879 @@ +from itertools import product + +import numpy as np +import random +import functools +import pytest +from numpy.testing import (assert_, assert_equal, assert_allclose, + assert_almost_equal) # avoid new uses +from pytest import raises as assert_raises + +import scipy.stats as stats +from scipy.stats import distributions +from scipy.stats._hypotests import (epps_singleton_2samp, cramervonmises, + _cdf_cvm, cramervonmises_2samp, + _pval_cvm_2samp_exact, barnard_exact, + boschloo_exact) +from scipy.stats._mannwhitneyu import mannwhitneyu, _mwu_state +from .common_tests import check_named_results +from scipy._lib._testutils import _TestPythranFunc + + +class TestEppsSingleton: + def test_statistic_1(self): + # first example in Goerg & Kaiser, also in original paper of + # Epps & Singleton. Note: values do not match exactly, the + # value of the interquartile range varies depending on how + # quantiles are computed + x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35, + 2.69, 0.46, -0.94, -0.37, 12.07]) + y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71, + 4.29, 5.00, 7.74, 8.38, 8.60]) + w, p = epps_singleton_2samp(x, y) + assert_almost_equal(w, 15.14, decimal=1) + assert_almost_equal(p, 0.00442, decimal=3) + + def test_statistic_2(self): + # second example in Goerg & Kaiser, again not a perfect match + x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10, + 10, 10, 10)) + y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1, + 5, 8, 10)) + w, p = epps_singleton_2samp(x, y) + assert_allclose(w, 8.900, atol=0.001) + assert_almost_equal(p, 0.06364, decimal=3) + + def test_epps_singleton_array_like(self): + np.random.seed(1234) + x, y = np.arange(30), np.arange(28) + + w1, p1 = epps_singleton_2samp(list(x), list(y)) + w2, p2 = epps_singleton_2samp(tuple(x), tuple(y)) + w3, p3 = epps_singleton_2samp(x, y) + + assert_(w1 == w2 == w3) + assert_(p1 == p2 == p3) + + def test_epps_singleton_size(self): + # raise error if less than 5 elements + x, y = (1, 2, 3, 4), np.arange(10) + assert_raises(ValueError, epps_singleton_2samp, x, y) + + def test_epps_singleton_nonfinite(self): + # raise error if there are non-finite values + x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10) + assert_raises(ValueError, epps_singleton_2samp, x, y) + + def test_names(self): + x, y = np.arange(20), np.arange(30) + res = epps_singleton_2samp(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + +class TestCvm: + # the expected values of the cdfs are taken from Table 1 in + # Csorgo / Faraway: The Exact and Asymptotic Distribution of + # Cramér-von Mises Statistics, 1996. + def test_cdf_4(self): + assert_allclose( + _cdf_cvm([0.02983, 0.04111, 0.12331, 0.94251], 4), + [0.01, 0.05, 0.5, 0.999], + atol=1e-4) + + def test_cdf_10(self): + assert_allclose( + _cdf_cvm([0.02657, 0.03830, 0.12068, 0.56643], 10), + [0.01, 0.05, 0.5, 0.975], + atol=1e-4) + + def test_cdf_1000(self): + assert_allclose( + _cdf_cvm([0.02481, 0.03658, 0.11889, 1.16120], 1000), + [0.01, 0.05, 0.5, 0.999], + atol=1e-4) + + def test_cdf_inf(self): + assert_allclose( + _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204]), + [0.01, 0.05, 0.5, 0.999], + atol=1e-4) + + def test_cdf_support(self): + # cdf has support on [1/(12*n), n/3] + assert_equal(_cdf_cvm([1/(12*533), 533/3], 533), [0, 1]) + assert_equal(_cdf_cvm([1/(12*(27 + 1)), (27 + 1)/3], 27), [0, 1]) + + def test_cdf_large_n(self): + # test that asymptotic cdf and cdf for large samples are close + assert_allclose( + _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100], 10000), + _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100]), + atol=1e-4) + + def test_large_x(self): + # for large values of x and n, the series used to compute the cdf + # converges slowly. + # this leads to bug in R package goftest and MAPLE code that is + # the basis of the implementation in scipy + # note: cdf = 1 for x >= 1000/3 and n = 1000 + assert_(0.99999 < _cdf_cvm(333.3, 1000) < 1.0) + assert_(0.99999 < _cdf_cvm(333.3) < 1.0) + + def test_low_p(self): + # _cdf_cvm can return values larger than 1. In that case, we just + # return a p-value of zero. + n = 12 + res = cramervonmises(np.ones(n)*0.8, 'norm') + assert_(_cdf_cvm(res.statistic, n) > 1.0) + assert_equal(res.pvalue, 0) + + def test_invalid_input(self): + assert_raises(ValueError, cramervonmises, [1.5], "norm") + assert_raises(ValueError, cramervonmises, (), "norm") + + def test_values_R(self): + # compared against R package goftest, version 1.1.1 + # goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6), "pnorm") + res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm") + assert_allclose(res.statistic, 0.288156, atol=1e-6) + assert_allclose(res.pvalue, 0.1453465, atol=1e-6) + + # goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6), + # "pnorm", mean = 3, sd = 1.5) + res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm", (3, 1.5)) + assert_allclose(res.statistic, 0.9426685, atol=1e-6) + assert_allclose(res.pvalue, 0.002026417, atol=1e-6) + + # goftest::cvm.test(c(1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5), "pexp") + res = cramervonmises([1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5], "expon") + assert_allclose(res.statistic, 0.8421854, atol=1e-6) + assert_allclose(res.pvalue, 0.004433406, atol=1e-6) + + def test_callable_cdf(self): + x, args = np.arange(5), (1.4, 0.7) + r1 = cramervonmises(x, distributions.expon.cdf) + r2 = cramervonmises(x, "expon") + assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue)) + + r1 = cramervonmises(x, distributions.beta.cdf, args) + r2 = cramervonmises(x, "beta", args) + assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue)) + + +class TestMannWhitneyU: + def setup_method(self): + _mwu_state._recursive = True + + # All magic numbers are from R wilcox.test unless otherwise specified + # https://rdrr.io/r/stats/wilcox.test.html + + # --- Test Input Validation --- + + def test_input_validation(self): + x = np.array([1, 2]) # generic, valid inputs + y = np.array([3, 4]) + with assert_raises(ValueError, match="`x` and `y` must be of nonzero"): + mannwhitneyu([], y) + with assert_raises(ValueError, match="`x` and `y` must be of nonzero"): + mannwhitneyu(x, []) + with assert_raises(ValueError, match="`use_continuity` must be one"): + mannwhitneyu(x, y, use_continuity='ekki') + with assert_raises(ValueError, match="`alternative` must be one of"): + mannwhitneyu(x, y, alternative='ekki') + with assert_raises(ValueError, match="`axis` must be an integer"): + mannwhitneyu(x, y, axis=1.5) + with assert_raises(ValueError, match="`method` must be one of"): + mannwhitneyu(x, y, method='ekki') + + def test_auto(self): + # Test that default method ('auto') chooses intended method + + np.random.seed(1) + n = 8 # threshold to switch from exact to asymptotic + + # both inputs are smaller than threshold; should use exact + x = np.random.rand(n-1) + y = np.random.rand(n-1) + auto = mannwhitneyu(x, y) + asymptotic = mannwhitneyu(x, y, method='asymptotic') + exact = mannwhitneyu(x, y, method='exact') + assert auto.pvalue == exact.pvalue + assert auto.pvalue != asymptotic.pvalue + + # one input is smaller than threshold; should use exact + x = np.random.rand(n-1) + y = np.random.rand(n+1) + auto = mannwhitneyu(x, y) + asymptotic = mannwhitneyu(x, y, method='asymptotic') + exact = mannwhitneyu(x, y, method='exact') + assert auto.pvalue == exact.pvalue + assert auto.pvalue != asymptotic.pvalue + + # other input is smaller than threshold; should use exact + auto = mannwhitneyu(y, x) + asymptotic = mannwhitneyu(x, y, method='asymptotic') + exact = mannwhitneyu(x, y, method='exact') + assert auto.pvalue == exact.pvalue + assert auto.pvalue != asymptotic.pvalue + + # both inputs are larger than threshold; should use asymptotic + x = np.random.rand(n+1) + y = np.random.rand(n+1) + auto = mannwhitneyu(x, y) + asymptotic = mannwhitneyu(x, y, method='asymptotic') + exact = mannwhitneyu(x, y, method='exact') + assert auto.pvalue != exact.pvalue + assert auto.pvalue == asymptotic.pvalue + + # both inputs are smaller than threshold, but there is a tie + # should use asymptotic + x = np.random.rand(n-1) + y = np.random.rand(n-1) + y[3] = x[3] + auto = mannwhitneyu(x, y) + asymptotic = mannwhitneyu(x, y, method='asymptotic') + exact = mannwhitneyu(x, y, method='exact') + assert auto.pvalue != exact.pvalue + assert auto.pvalue == asymptotic.pvalue + + # --- Test Basic Functionality --- + + x = [210.052110, 110.190630, 307.918612] + y = [436.08811482466416, 416.37397329768191, 179.96975939463582, + 197.8118754228619, 34.038757281225756, 138.54220550921517, + 128.7769351470246, 265.92721427951852, 275.6617533155341, + 592.34083395416258, 448.73177590617018, 300.61495185038905, + 187.97508449019588] + + # This test was written for mann_whitney_u in gh-4933. + # Originally, the p-values for alternatives were swapped; + # this has been corrected and the tests have been refactored for + # compactness, but otherwise the tests are unchanged. + # R code for comparison, e.g.: + # options(digits = 16) + # x = c(210.052110, 110.190630, 307.918612) + # y = c(436.08811482466416, 416.37397329768191, 179.96975939463582, + # 197.8118754228619, 34.038757281225756, 138.54220550921517, + # 128.7769351470246, 265.92721427951852, 275.6617533155341, + # 592.34083395416258, 448.73177590617018, 300.61495185038905, + # 187.97508449019588) + # wilcox.test(x, y, alternative="g", exact=TRUE) + cases_basic = [[{"alternative": 'two-sided', "method": "asymptotic"}, + (16, 0.6865041817876)], + [{"alternative": 'less', "method": "asymptotic"}, + (16, 0.3432520908938)], + [{"alternative": 'greater', "method": "asymptotic"}, + (16, 0.7047591913255)], + [{"alternative": 'two-sided', "method": "exact"}, + (16, 0.7035714285714)], + [{"alternative": 'less', "method": "exact"}, + (16, 0.3517857142857)], + [{"alternative": 'greater', "method": "exact"}, + (16, 0.6946428571429)]] + + @pytest.mark.parametrize(("kwds", "expected"), cases_basic) + def test_basic(self, kwds, expected): + res = mannwhitneyu(self.x, self.y, **kwds) + assert_allclose(res, expected) + + cases_continuity = [[{"alternative": 'two-sided', "use_continuity": True}, + (23, 0.6865041817876)], + [{"alternative": 'less', "use_continuity": True}, + (23, 0.7047591913255)], + [{"alternative": 'greater', "use_continuity": True}, + (23, 0.3432520908938)], + [{"alternative": 'two-sided', "use_continuity": False}, + (23, 0.6377328900502)], + [{"alternative": 'less', "use_continuity": False}, + (23, 0.6811335549749)], + [{"alternative": 'greater', "use_continuity": False}, + (23, 0.3188664450251)]] + + @pytest.mark.parametrize(("kwds", "expected"), cases_continuity) + def test_continuity(self, kwds, expected): + # When x and y are interchanged, less and greater p-values should + # swap (compare to above). This wouldn't happen if the continuity + # correction were applied in the wrong direction. Note that less and + # greater p-values do not sum to 1 when continuity correction is on, + # which is what we'd expect. Also check that results match R when + # continuity correction is turned off. + # Note that method='asymptotic' -> exact=FALSE + # and use_continuity=False -> correct=FALSE, e.g.: + # wilcox.test(x, y, alternative="t", exact=FALSE, correct=FALSE) + res = mannwhitneyu(self.y, self.x, method='asymptotic', **kwds) + assert_allclose(res, expected) + + def test_tie_correct(self): + # Test tie correction against R's wilcox.test + # options(digits = 16) + # x = c(1, 2, 3, 4) + # y = c(1, 2, 3, 4, 5) + # wilcox.test(x, y, exact=FALSE) + x = [1, 2, 3, 4] + y0 = np.array([1, 2, 3, 4, 5]) + dy = np.array([0, 1, 0, 1, 0])*0.01 + dy2 = np.array([0, 0, 1, 0, 0])*0.01 + y = [y0-0.01, y0-dy, y0-dy2, y0, y0+dy2, y0+dy, y0+0.01] + res = mannwhitneyu(x, y, axis=-1, method="asymptotic") + U_expected = [10, 9, 8.5, 8, 7.5, 7, 6] + p_expected = [1, 0.9017048037317, 0.804080657472, 0.7086240584439, + 0.6197963884941, 0.5368784563079, 0.3912672792826] + assert_equal(res.statistic, U_expected) + assert_allclose(res.pvalue, p_expected) + + # --- Test Exact Distribution of U --- + + # These are tabulated values of the CDF of the exact distribution of + # the test statistic from pg 52 of reference [1] (Mann-Whitney Original) + pn3 = {1: [0.25, 0.5, 0.75], 2: [0.1, 0.2, 0.4, 0.6], + 3: [0.05, .1, 0.2, 0.35, 0.5, 0.65]} + pn4 = {1: [0.2, 0.4, 0.6], 2: [0.067, 0.133, 0.267, 0.4, 0.6], + 3: [0.028, 0.057, 0.114, 0.2, .314, 0.429, 0.571], + 4: [0.014, 0.029, 0.057, 0.1, 0.171, 0.243, 0.343, 0.443, 0.557]} + pm5 = {1: [0.167, 0.333, 0.5, 0.667], + 2: [0.047, 0.095, 0.19, 0.286, 0.429, 0.571], + 3: [0.018, 0.036, 0.071, 0.125, 0.196, 0.286, 0.393, 0.5, 0.607], + 4: [0.008, 0.016, 0.032, 0.056, 0.095, 0.143, + 0.206, 0.278, 0.365, 0.452, 0.548], + 5: [0.004, 0.008, 0.016, 0.028, 0.048, 0.075, 0.111, + 0.155, 0.21, 0.274, 0.345, .421, 0.5, 0.579]} + pm6 = {1: [0.143, 0.286, 0.428, 0.571], + 2: [0.036, 0.071, 0.143, 0.214, 0.321, 0.429, 0.571], + 3: [0.012, 0.024, 0.048, 0.083, 0.131, + 0.19, 0.274, 0.357, 0.452, 0.548], + 4: [0.005, 0.01, 0.019, 0.033, 0.057, 0.086, 0.129, + 0.176, 0.238, 0.305, 0.381, 0.457, 0.543], # the last element + # of the previous list, 0.543, has been modified from 0.545; + # I assume it was a typo + 5: [0.002, 0.004, 0.009, 0.015, 0.026, 0.041, 0.063, 0.089, + 0.123, 0.165, 0.214, 0.268, 0.331, 0.396, 0.465, 0.535], + 6: [0.001, 0.002, 0.004, 0.008, 0.013, 0.021, 0.032, 0.047, + 0.066, 0.09, 0.12, 0.155, 0.197, 0.242, 0.294, 0.350, + 0.409, 0.469, 0.531]} + + def test_exact_distribution(self): + # I considered parametrize. I decided against it. + p_tables = {3: self.pn3, 4: self.pn4, 5: self.pm5, 6: self.pm6} + for n, table in p_tables.items(): + for m, p in table.items(): + # check p-value against table + u = np.arange(0, len(p)) + assert_allclose(_mwu_state.cdf(k=u, m=m, n=n), p, atol=1e-3) + + # check identity CDF + SF - PMF = 1 + # ( In this implementation, SF(U) includes PMF(U) ) + u2 = np.arange(0, m*n+1) + assert_allclose(_mwu_state.cdf(k=u2, m=m, n=n) + + _mwu_state.sf(k=u2, m=m, n=n) + - _mwu_state.pmf(k=u2, m=m, n=n), 1) + + # check symmetry about mean of U, i.e. pmf(U) = pmf(m*n-U) + pmf = _mwu_state.pmf(k=u2, m=m, n=n) + assert_allclose(pmf, pmf[::-1]) + + # check symmetry w.r.t. interchange of m, n + pmf2 = _mwu_state.pmf(k=u2, m=n, n=m) + assert_allclose(pmf, pmf2) + + def test_asymptotic_behavior(self): + np.random.seed(0) + + # for small samples, the asymptotic test is not very accurate + x = np.random.rand(5) + y = np.random.rand(5) + res1 = mannwhitneyu(x, y, method="exact") + res2 = mannwhitneyu(x, y, method="asymptotic") + assert res1.statistic == res2.statistic + assert np.abs(res1.pvalue - res2.pvalue) > 1e-2 + + # for large samples, they agree reasonably well + x = np.random.rand(40) + y = np.random.rand(40) + res1 = mannwhitneyu(x, y, method="exact") + res2 = mannwhitneyu(x, y, method="asymptotic") + assert res1.statistic == res2.statistic + assert np.abs(res1.pvalue - res2.pvalue) < 1e-3 + + # --- Test Corner Cases --- + + def test_exact_U_equals_mean(self): + # Test U == m*n/2 with exact method + # Without special treatment, two-sided p-value > 1 because both + # one-sided p-values are > 0.5 + res_l = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="less", + method="exact") + res_g = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="greater", + method="exact") + assert_equal(res_l.pvalue, res_g.pvalue) + assert res_l.pvalue > 0.5 + + res = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="two-sided", + method="exact") + assert_equal(res, (3, 1)) + # U == m*n/2 for asymptotic case tested in test_gh_2118 + # The reason it's tricky for the asymptotic test has to do with + # continuity correction. + + cases_scalar = [[{"alternative": 'two-sided', "method": "asymptotic"}, + (0, 1)], + [{"alternative": 'less', "method": "asymptotic"}, + (0, 0.5)], + [{"alternative": 'greater', "method": "asymptotic"}, + (0, 0.977249868052)], + [{"alternative": 'two-sided', "method": "exact"}, (0, 1)], + [{"alternative": 'less', "method": "exact"}, (0, 0.5)], + [{"alternative": 'greater', "method": "exact"}, (0, 1)]] + + @pytest.mark.parametrize(("kwds", "result"), cases_scalar) + def test_scalar_data(self, kwds, result): + # just making sure scalars work + assert_allclose(mannwhitneyu(1, 2, **kwds), result) + + def test_equal_scalar_data(self): + # when two scalars are equal, there is an -0.5/0 in the asymptotic + # approximation. R gives pvalue=1.0 for alternatives 'less' and + # 'greater' but NA for 'two-sided'. I don't see why, so I don't + # see a need for a special case to match that behavior. + assert_equal(mannwhitneyu(1, 1, method="exact"), (0.5, 1)) + assert_equal(mannwhitneyu(1, 1, method="asymptotic"), (0.5, 1)) + + # without continuity correction, this becomes 0/0, which really + # is undefined + assert_equal(mannwhitneyu(1, 1, method="asymptotic", + use_continuity=False), (0.5, np.nan)) + + # --- Test Enhancements / Bug Reports --- + + @pytest.mark.parametrize("method", ["asymptotic", "exact"]) + def test_gh_12837_11113(self, method): + # Test that behavior for broadcastable nd arrays is appropriate: + # output shape is correct and all values are equal to when the test + # is performed on one pair of samples at a time. + # Tests that gh-12837 and gh-11113 (requests for n-d input) + # are resolved + np.random.seed(0) + + # arrays are broadcastable except for axis = -3 + axis = -3 + m, n = 7, 10 # sample sizes + x = np.random.rand(m, 3, 8) + y = np.random.rand(6, n, 1, 8) + 0.1 + res = mannwhitneyu(x, y, method=method, axis=axis) + + shape = (6, 3, 8) # appropriate shape of outputs, given inputs + assert res.pvalue.shape == shape + assert res.statistic.shape == shape + + # move axis of test to end for simplicity + x, y = np.moveaxis(x, axis, -1), np.moveaxis(y, axis, -1) + + x = x[None, ...] # give x a zeroth dimension + assert x.ndim == y.ndim + + x = np.broadcast_to(x, shape + (m,)) + y = np.broadcast_to(y, shape + (n,)) + assert x.shape[:-1] == shape + assert y.shape[:-1] == shape + + # loop over pairs of samples + statistics = np.zeros(shape) + pvalues = np.zeros(shape) + for indices in product(*[range(i) for i in shape]): + xi = x[indices] + yi = y[indices] + temp = mannwhitneyu(xi, yi, method=method) + statistics[indices] = temp.statistic + pvalues[indices] = temp.pvalue + + np.testing.assert_equal(res.pvalue, pvalues) + np.testing.assert_equal(res.statistic, statistics) + + def test_gh_11355(self): + # Test for correct behavior with NaN/Inf in input + x = [1, 2, 3, 4] + y = [3, 6, 7, 8, 9, 3, 2, 1, 4, 4, 5] + res1 = mannwhitneyu(x, y) + + # Inf is not a problem. This is a rank test, and it's the largest value + y[4] = np.inf + res2 = mannwhitneyu(x, y) + + assert_equal(res1.statistic, res2.statistic) + assert_equal(res1.pvalue, res2.pvalue) + + # NaNs should propagate by default. + y[4] = np.nan + res3 = mannwhitneyu(x, y) + assert_equal(res3.statistic, np.nan) + assert_equal(res3.pvalue, np.nan) + + cases_11355 = [([1, 2, 3, 4], + [3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5], + 10, 0.1297704873477), + ([1, 2, 3, 4], + [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5], + 8.5, 0.08735617507695), + ([1, 2, np.inf, 4], + [3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5], + 17.5, 0.5988856695752), + ([1, 2, np.inf, 4], + [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5], + 16, 0.4687165824462), + ([1, np.inf, np.inf, 4], + [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5], + 24.5, 0.7912517950119)] + + @pytest.mark.parametrize(("x", "y", "statistic", "pvalue"), cases_11355) + def test_gh_11355b(self, x, y, statistic, pvalue): + # Test for correct behavior with NaN/Inf in input + res = mannwhitneyu(x, y, method='asymptotic') + assert_allclose(res.statistic, statistic, atol=1e-12) + assert_allclose(res.pvalue, pvalue, atol=1e-12) + + cases_9184 = [[True, "less", "asymptotic", 0.900775348204], + [True, "greater", "asymptotic", 0.1223118025635], + [True, "two-sided", "asymptotic", 0.244623605127], + [False, "less", "asymptotic", 0.8896643190401], + [False, "greater", "asymptotic", 0.1103356809599], + [False, "two-sided", "asymptotic", 0.2206713619198], + [True, "less", "exact", 0.8967698967699], + [True, "greater", "exact", 0.1272061272061], + [True, "two-sided", "exact", 0.2544122544123]] + + @pytest.mark.parametrize(("use_continuity", "alternative", + "method", "pvalue_exp"), cases_9184) + def test_gh_9184(self, use_continuity, alternative, method, pvalue_exp): + # gh-9184 might be considered a doc-only bug. Please see the + # documentation to confirm that mannwhitneyu correctly notes + # that the output statistic is that of the first sample (x). In any + # case, check the case provided there against output from R. + # R code: + # options(digits=16) + # x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46) + # y <- c(1.15, 0.88, 0.90, 0.74, 1.21) + # wilcox.test(x, y, alternative = "less", exact = FALSE) + # wilcox.test(x, y, alternative = "greater", exact = FALSE) + # wilcox.test(x, y, alternative = "two.sided", exact = FALSE) + # wilcox.test(x, y, alternative = "less", exact = FALSE, + # correct=FALSE) + # wilcox.test(x, y, alternative = "greater", exact = FALSE, + # correct=FALSE) + # wilcox.test(x, y, alternative = "two.sided", exact = FALSE, + # correct=FALSE) + # wilcox.test(x, y, alternative = "less", exact = TRUE) + # wilcox.test(x, y, alternative = "greater", exact = TRUE) + # wilcox.test(x, y, alternative = "two.sided", exact = TRUE) + statistic_exp = 35 + x = (0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46) + y = (1.15, 0.88, 0.90, 0.74, 1.21) + res = mannwhitneyu(x, y, use_continuity=use_continuity, + alternative=alternative, method=method) + assert_equal(res.statistic, statistic_exp) + assert_allclose(res.pvalue, pvalue_exp) + + def test_gh_6897(self): + # Test for correct behavior with empty input + with assert_raises(ValueError, match="`x` and `y` must be of nonzero"): + mannwhitneyu([], []) + + def test_gh_4067(self): + # Test for correct behavior with all NaN input - default is propagate + a = np.array([np.nan, np.nan, np.nan, np.nan, np.nan]) + b = np.array([np.nan, np.nan, np.nan, np.nan, np.nan]) + res = mannwhitneyu(a, b) + assert_equal(res.statistic, np.nan) + assert_equal(res.pvalue, np.nan) + + # All cases checked against R wilcox.test, e.g. + # options(digits=16) + # x = c(1, 2, 3) + # y = c(1.5, 2.5) + # wilcox.test(x, y, exact=FALSE, alternative='less') + + cases_2118 = [[[1, 2, 3], [1.5, 2.5], "greater", (3, 0.6135850036578)], + [[1, 2, 3], [1.5, 2.5], "less", (3, 0.6135850036578)], + [[1, 2, 3], [1.5, 2.5], "two-sided", (3, 1.0)], + [[1, 2, 3], [2], "greater", (1.5, 0.681324055883)], + [[1, 2, 3], [2], "less", (1.5, 0.681324055883)], + [[1, 2, 3], [2], "two-sided", (1.5, 1)], + [[1, 2], [1, 2], "greater", (2, 0.667497228949)], + [[1, 2], [1, 2], "less", (2, 0.667497228949)], + [[1, 2], [1, 2], "two-sided", (2, 1)]] + + @pytest.mark.parametrize(["x", "y", "alternative", "expected"], cases_2118) + def test_gh_2118(self, x, y, alternative, expected): + # test cases in which U == m*n/2 when method is asymptotic + # applying continuity correction could result in p-value > 1 + res = mannwhitneyu(x, y, use_continuity=True, alternative=alternative, + method="asymptotic") + assert_allclose(res, expected, rtol=1e-12) + + def test_gh19692_smaller_table(self): + # In gh-19692, we noted that the shape of the cache used in calculating + # p-values was dependent on the order of the inputs because the sample + # sizes n1 and n2 changed. This was indicative of unnecessary cache + # growth and redundant calculation. Check that this is resolved. + rng = np.random.default_rng(7600451795963068007) + x = rng.random(size=5) + y = rng.random(size=11) + _mwu_state._fmnks = -np.ones((1, 1, 1)) # reset cache + stats.mannwhitneyu(x, y, method='exact') + shape = _mwu_state._fmnks.shape + assert shape[0] <= 6 and shape[1] <= 12 # one more than sizes + stats.mannwhitneyu(y, x, method='exact') + assert shape == _mwu_state._fmnks.shape # unchanged when sizes are reversed + + # Also, we weren't exploiting the symmmetry of the null distribution + # to its full potential. Ensure that the null distribution is not + # evaluated explicitly for `k > m*n/2`. + _mwu_state._fmnks = -np.ones((1, 1, 1)) # reset cache + stats.mannwhitneyu(x, 0*y, method='exact', alternative='greater') + shape = _mwu_state._fmnks.shape + assert shape[-1] == 1 # k is smallest possible + stats.mannwhitneyu(0*x, y, method='exact', alternative='greater') + assert shape == _mwu_state._fmnks.shape + + @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided']) + def test_permutation_method(self, alternative): + rng = np.random.default_rng(7600451795963068007) + x = rng.random(size=(2, 5)) + y = rng.random(size=(2, 6)) + res = stats.mannwhitneyu(x, y, method=stats.PermutationMethod(), + alternative=alternative, axis=1) + res2 = stats.mannwhitneyu(x, y, method='exact', + alternative=alternative, axis=1) + assert_allclose(res.statistic, res2.statistic, rtol=1e-15) + assert_allclose(res.pvalue, res2.pvalue, rtol=1e-15) + + def teardown_method(self): + _mwu_state._recursive = None + + +class TestMannWhitneyU_iterative(TestMannWhitneyU): + def setup_method(self): + _mwu_state._recursive = False + + def teardown_method(self): + _mwu_state._recursive = None + + +@pytest.mark.xslow +def test_mann_whitney_u_switch(): + # Check that mannwhiteneyu switches between recursive and iterative + # implementations at n = 500 + + # ensure that recursion is not enforced + _mwu_state._recursive = None + _mwu_state._fmnks = -np.ones((1, 1, 1)) + + rng = np.random.default_rng(9546146887652) + x = rng.random(5) + + # use iterative algorithm because n > 500 + y = rng.random(501) + stats.mannwhitneyu(x, y, method='exact') + # iterative algorithm doesn't modify _mwu_state._fmnks + assert np.all(_mwu_state._fmnks == -1) + + # use recursive algorithm because n <= 500 + y = rng.random(500) + stats.mannwhitneyu(x, y, method='exact') + + # recursive algorithm has modified _mwu_state._fmnks + assert not np.all(_mwu_state._fmnks == -1) + + +class TestSomersD(_TestPythranFunc): + def setup_method(self): + self.dtypes = self.ALL_INTEGER + self.ALL_FLOAT + self.arguments = {0: (np.arange(10), + self.ALL_INTEGER + self.ALL_FLOAT), + 1: (np.arange(10), + self.ALL_INTEGER + self.ALL_FLOAT)} + input_array = [self.arguments[idx][0] for idx in self.arguments] + # In this case, self.partialfunc can simply be stats.somersd, + # since `alternative` is an optional argument. If it is required, + # we can use functools.partial to freeze the value, because + # we only mainly test various array inputs, not str, etc. + self.partialfunc = functools.partial(stats.somersd, + alternative='two-sided') + self.expected = self.partialfunc(*input_array) + + def pythranfunc(self, *args): + res = self.partialfunc(*args) + assert_allclose(res.statistic, self.expected.statistic, atol=1e-15) + assert_allclose(res.pvalue, self.expected.pvalue, atol=1e-15) + + def test_pythranfunc_keywords(self): + # Not specifying the optional keyword args + table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]] + res1 = stats.somersd(table) + # Specifying the optional keyword args with default value + optional_args = self.get_optional_args(stats.somersd) + res2 = stats.somersd(table, **optional_args) + # Check if the results are the same in two cases + assert_allclose(res1.statistic, res2.statistic, atol=1e-15) + assert_allclose(res1.pvalue, res2.pvalue, atol=1e-15) + + def test_like_kendalltau(self): + # All tests correspond with one in test_stats.py `test_kendalltau` + + # case without ties, con-dis equal zero + x = [5, 2, 1, 3, 6, 4, 7, 8] + y = [5, 2, 6, 3, 1, 8, 7, 4] + # Cross-check with result from SAS FREQ: + expected = (0.000000000000000, 1.000000000000000) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # case without ties, con-dis equal zero + x = [0, 5, 2, 1, 3, 6, 4, 7, 8] + y = [5, 2, 0, 6, 3, 1, 8, 7, 4] + # Cross-check with result from SAS FREQ: + expected = (0.000000000000000, 1.000000000000000) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # case without ties, con-dis close to zero + x = [5, 2, 1, 3, 6, 4, 7] + y = [5, 2, 6, 3, 1, 7, 4] + # Cross-check with result from SAS FREQ: + expected = (-0.142857142857140, 0.630326953157670) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # simple case without ties + x = np.arange(10) + y = np.arange(10) + # Cross-check with result from SAS FREQ: + # SAS p value is not provided. + expected = (1.000000000000000, 0) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # swap a couple values and a couple more + x = np.arange(10) + y = np.array([0, 2, 1, 3, 4, 6, 5, 7, 8, 9]) + # Cross-check with result from SAS FREQ: + expected = (0.911111111111110, 0.000000000000000) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # same in opposite direction + x = np.arange(10) + y = np.arange(10)[::-1] + # Cross-check with result from SAS FREQ: + # SAS p value is not provided. + expected = (-1.000000000000000, 0) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # swap a couple values and a couple more + x = np.arange(10) + y = np.array([9, 7, 8, 6, 5, 3, 4, 2, 1, 0]) + # Cross-check with result from SAS FREQ: + expected = (-0.9111111111111111, 0.000000000000000) + res = stats.somersd(x, y) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # with some ties + x1 = [12, 2, 1, 12, 2] + x2 = [1, 4, 7, 1, 0] + # Cross-check with result from SAS FREQ: + expected = (-0.500000000000000, 0.304901788178780) + res = stats.somersd(x1, x2) + assert_allclose(res.statistic, expected[0], atol=1e-15) + assert_allclose(res.pvalue, expected[1], atol=1e-15) + + # with only ties in one or both inputs + # SAS will not produce an output for these: + # NOTE: No statistics are computed for x * y because x has fewer + # than 2 nonmissing levels. + # WARNING: No OUTPUT data set is produced for this table because a + # row or column variable has fewer than 2 nonmissing levels and no + # statistics are computed. + + res = stats.somersd([2, 2, 2], [2, 2, 2]) + assert_allclose(res.statistic, np.nan) + assert_allclose(res.pvalue, np.nan) + + res = stats.somersd([2, 0, 2], [2, 2, 2]) + assert_allclose(res.statistic, np.nan) + assert_allclose(res.pvalue, np.nan) + + res = stats.somersd([2, 2, 2], [2, 0, 2]) + assert_allclose(res.statistic, np.nan) + assert_allclose(res.pvalue, np.nan) + + res = stats.somersd([0], [0]) + assert_allclose(res.statistic, np.nan) + assert_allclose(res.pvalue, np.nan) + + # empty arrays provided as input + res = stats.somersd([], []) + assert_allclose(res.statistic, np.nan) + assert_allclose(res.pvalue, np.nan) + + # test unequal length inputs + x = np.arange(10.) + y = np.arange(20.) + assert_raises(ValueError, stats.somersd, x, y) + + def test_asymmetry(self): + # test that somersd is asymmetric w.r.t. input order and that + # convention is as described: first input is row variable & independent + # data is from Wikipedia: + # https://en.wikipedia.org/wiki/Somers%27_D + # but currently that example contradicts itself - it says X is + # independent yet take D_XY + + x = [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 1, 2, + 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3] + y = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + # Cross-check with result from SAS FREQ: + d_cr = 0.272727272727270 + d_rc = 0.342857142857140 + p = 0.092891940883700 # same p-value for either direction + res = stats.somersd(x, y) + assert_allclose(res.statistic, d_cr, atol=1e-15) + assert_allclose(res.pvalue, p, atol=1e-4) + assert_equal(res.table.shape, (3, 2)) + res = stats.somersd(y, x) + assert_allclose(res.statistic, d_rc, atol=1e-15) + assert_allclose(res.pvalue, p, atol=1e-15) + assert_equal(res.table.shape, (2, 3)) + + def test_somers_original(self): + # test against Somers' original paper [1] + + # Table 5A + # Somers' convention was column IV + table = np.array([[8, 2], [6, 5], [3, 4], [1, 3], [2, 3]]) + # Our convention (and that of SAS FREQ) is row IV + table = table.T + dyx = 129/340 + assert_allclose(stats.somersd(table).statistic, dyx) + + # table 7A - d_yx = 1 + table = np.array([[25, 0], [85, 0], [0, 30]]) + dxy, dyx = 3300/5425, 3300/3300 + assert_allclose(stats.somersd(table).statistic, dxy) + assert_allclose(stats.somersd(table.T).statistic, dyx) + + # table 7B - d_yx < 0 + table = np.array([[25, 0], [0, 30], [85, 0]]) + dyx = -1800/3300 + assert_allclose(stats.somersd(table.T).statistic, dyx) + + def test_contingency_table_with_zero_rows_cols(self): + # test that zero rows/cols in contingency table don't affect result + + N = 100 + shape = 4, 6 + size = np.prod(shape) + + np.random.seed(0) + s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape) + res = stats.somersd(s) + + s2 = np.insert(s, 2, np.zeros(shape[1]), axis=0) + res2 = stats.somersd(s2) + + s3 = np.insert(s, 2, np.zeros(shape[0]), axis=1) + res3 = stats.somersd(s3) + + s4 = np.insert(s2, 2, np.zeros(shape[0]+1), axis=1) + res4 = stats.somersd(s4) + + # Cross-check with result from SAS FREQ: + assert_allclose(res.statistic, -0.116981132075470, atol=1e-15) + assert_allclose(res.statistic, res2.statistic) + assert_allclose(res.statistic, res3.statistic) + assert_allclose(res.statistic, res4.statistic) + + assert_allclose(res.pvalue, 0.156376448188150, atol=1e-15) + assert_allclose(res.pvalue, res2.pvalue) + assert_allclose(res.pvalue, res3.pvalue) + assert_allclose(res.pvalue, res4.pvalue) + + def test_invalid_contingency_tables(self): + N = 100 + shape = 4, 6 + size = np.prod(shape) + + np.random.seed(0) + # start with a valid contingency table + s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape) + + s5 = s - 2 + message = "All elements of the contingency table must be non-negative" + with assert_raises(ValueError, match=message): + stats.somersd(s5) + + s6 = s + 0.01 + message = "All elements of the contingency table must be integer" + with assert_raises(ValueError, match=message): + stats.somersd(s6) + + message = ("At least two elements of the contingency " + "table must be nonzero.") + with assert_raises(ValueError, match=message): + stats.somersd([[]]) + + with assert_raises(ValueError, match=message): + stats.somersd([[1]]) + + s7 = np.zeros((3, 3)) + with assert_raises(ValueError, match=message): + stats.somersd(s7) + + s7[0, 1] = 1 + with assert_raises(ValueError, match=message): + stats.somersd(s7) + + def test_only_ranks_matter(self): + # only ranks of input data should matter + x = [1, 2, 3] + x2 = [-1, 2.1, np.inf] + y = [3, 2, 1] + y2 = [0, -0.5, -np.inf] + res = stats.somersd(x, y) + res2 = stats.somersd(x2, y2) + assert_equal(res.statistic, res2.statistic) + assert_equal(res.pvalue, res2.pvalue) + + def test_contingency_table_return(self): + # check that contingency table is returned + x = np.arange(10) + y = np.arange(10) + res = stats.somersd(x, y) + assert_equal(res.table, np.eye(10)) + + def test_somersd_alternative(self): + # Test alternative parameter, asymptotic method (due to tie) + + # Based on scipy.stats.test_stats.TestCorrSpearman2::test_alternative + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 7] + + # strong positive correlation + expected = stats.somersd(x1, x2, alternative="two-sided") + assert expected.statistic > 0 + + # rank correlation > 0 -> large "less" p-value + res = stats.somersd(x1, x2, alternative="less") + assert_equal(res.statistic, expected.statistic) + assert_allclose(res.pvalue, 1 - (expected.pvalue / 2)) + + # rank correlation > 0 -> small "greater" p-value + res = stats.somersd(x1, x2, alternative="greater") + assert_equal(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue / 2) + + # reverse the direction of rank correlation + x2.reverse() + + # strong negative correlation + expected = stats.somersd(x1, x2, alternative="two-sided") + assert expected.statistic < 0 + + # rank correlation < 0 -> large "greater" p-value + res = stats.somersd(x1, x2, alternative="greater") + assert_equal(res.statistic, expected.statistic) + assert_allclose(res.pvalue, 1 - (expected.pvalue / 2)) + + # rank correlation < 0 -> small "less" p-value + res = stats.somersd(x1, x2, alternative="less") + assert_equal(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue / 2) + + with pytest.raises(ValueError, match="`alternative` must be..."): + stats.somersd(x1, x2, alternative="ekki-ekki") + + @pytest.mark.parametrize("positive_correlation", (False, True)) + def test_somersd_perfect_correlation(self, positive_correlation): + # Before the addition of `alternative`, perfect correlation was + # treated as a special case. Now it is treated like any other case, but + # make sure there are no divide by zero warnings or associated errors + + x1 = np.arange(10) + x2 = x1 if positive_correlation else np.flip(x1) + expected_statistic = 1 if positive_correlation else -1 + + # perfect correlation -> small "two-sided" p-value (0) + res = stats.somersd(x1, x2, alternative="two-sided") + assert res.statistic == expected_statistic + assert res.pvalue == 0 + + # rank correlation > 0 -> large "less" p-value (1) + res = stats.somersd(x1, x2, alternative="less") + assert res.statistic == expected_statistic + assert res.pvalue == (1 if positive_correlation else 0) + + # rank correlation > 0 -> small "greater" p-value (0) + res = stats.somersd(x1, x2, alternative="greater") + assert res.statistic == expected_statistic + assert res.pvalue == (0 if positive_correlation else 1) + + def test_somersd_large_inputs_gh18132(self): + # Test that large inputs where potential overflows could occur give + # the expected output. This is tested in the case of binary inputs. + # See gh-18126. + + # generate lists of random classes 1-2 (binary) + classes = [1, 2] + n_samples = 10 ** 6 + random.seed(6272161) + x = random.choices(classes, k=n_samples) + y = random.choices(classes, k=n_samples) + + # get value to compare with: sklearn output + # from sklearn import metrics + # val_auc_sklearn = metrics.roc_auc_score(x, y) + # # convert to the Gini coefficient (Gini = (AUC*2)-1) + # val_sklearn = 2 * val_auc_sklearn - 1 + val_sklearn = -0.001528138777036947 + + # calculate the Somers' D statistic, which should be equal to the + # result of val_sklearn until approximately machine precision + val_scipy = stats.somersd(x, y).statistic + assert_allclose(val_sklearn, val_scipy, atol=1e-15) + + +class TestBarnardExact: + """Some tests to show that barnard_exact() works correctly.""" + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[43, 40], [10, 39]], (3.555406779643, 0.000362832367)), + ([[100, 2], [1000, 5]], (-1.776382925679, 0.135126970878)), + ([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)), + ([[5, 1], [10, 10]], (1.449486150679, 0.156277546306)), + ([[5, 15], [20, 20]], (-1.851640199545, 0.066363501421)), + ([[5, 16], [20, 25]], (-1.609639949352, 0.116984852192)), + ([[10, 5], [10, 1]], (-1.449486150679, 0.177536588915)), + ([[5, 0], [1, 4]], (2.581988897472, 0.013671875000)), + ([[0, 1], [3, 2]], (-1.095445115010, 0.509667991877)), + ([[0, 2], [6, 4]], (-1.549193338483, 0.197019618792)), + ([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)), + ], + ) + def test_precise(self, input_sample, expected): + """The expected values have been generated by R, using a resolution + for the nuisance parameter of 1e-6 : + ```R + library(Barnard) + options(digits=10) + barnard.test(43, 40, 10, 39, dp=1e-6, pooled=TRUE) + ``` + """ + res = barnard_exact(input_sample) + statistic, pvalue = res.statistic, res.pvalue + assert_allclose([statistic, pvalue], expected) + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[43, 40], [10, 39]], (3.920362887717, 0.000289470662)), + ([[100, 2], [1000, 5]], (-1.139432816087, 0.950272080594)), + ([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)), + ([[5, 1], [10, 10]], (1.622375939458, 0.150599922226)), + ([[5, 15], [20, 20]], (-1.974771239528, 0.063038448651)), + ([[5, 16], [20, 25]], (-1.722122973346, 0.133329494287)), + ([[10, 5], [10, 1]], (-1.765469659009, 0.250566655215)), + ([[5, 0], [1, 4]], (5.477225575052, 0.007812500000)), + ([[0, 1], [3, 2]], (-1.224744871392, 0.509667991877)), + ([[0, 2], [6, 4]], (-1.732050807569, 0.197019618792)), + ([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)), + ], + ) + def test_pooled_param(self, input_sample, expected): + """The expected values have been generated by R, using a resolution + for the nuisance parameter of 1e-6 : + ```R + library(Barnard) + options(digits=10) + barnard.test(43, 40, 10, 39, dp=1e-6, pooled=FALSE) + ``` + """ + res = barnard_exact(input_sample, pooled=False) + statistic, pvalue = res.statistic, res.pvalue + assert_allclose([statistic, pvalue], expected) + + def test_raises(self): + # test we raise an error for wrong input number of nuisances. + error_msg = ( + "Number of points `n` must be strictly positive, found 0" + ) + with assert_raises(ValueError, match=error_msg): + barnard_exact([[1, 2], [3, 4]], n=0) + + # test we raise an error for wrong shape of input. + error_msg = "The input `table` must be of shape \\(2, 2\\)." + with assert_raises(ValueError, match=error_msg): + barnard_exact(np.arange(6).reshape(2, 3)) + + # Test all values must be positives + error_msg = "All values in `table` must be nonnegative." + with assert_raises(ValueError, match=error_msg): + barnard_exact([[-1, 2], [3, 4]]) + + # Test value error on wrong alternative param + error_msg = ( + "`alternative` should be one of {'two-sided', 'less', 'greater'}," + " found .*" + ) + with assert_raises(ValueError, match=error_msg): + barnard_exact([[1, 2], [3, 4]], "not-correct") + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[0, 0], [4, 3]], (1.0, 0)), + ], + ) + def test_edge_cases(self, input_sample, expected): + res = barnard_exact(input_sample) + statistic, pvalue = res.statistic, res.pvalue + assert_equal(pvalue, expected[0]) + assert_equal(statistic, expected[1]) + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[0, 5], [0, 10]], (1.0, np.nan)), + ([[5, 0], [10, 0]], (1.0, np.nan)), + ], + ) + def test_row_or_col_zero(self, input_sample, expected): + res = barnard_exact(input_sample) + statistic, pvalue = res.statistic, res.pvalue + assert_equal(pvalue, expected[0]) + assert_equal(statistic, expected[1]) + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[2, 7], [8, 2]], (-2.518474945157, 0.009886140845)), + ([[7, 200], [300, 8]], (-21.320036698460, 0.0)), + ([[21, 28], [1957, 6]], (-30.489638143953, 0.0)), + ], + ) + @pytest.mark.parametrize("alternative", ["greater", "less"]) + def test_less_greater(self, input_sample, expected, alternative): + """ + "The expected values have been generated by R, using a resolution + for the nuisance parameter of 1e-6 : + ```R + library(Barnard) + options(digits=10) + a = barnard.test(2, 7, 8, 2, dp=1e-6, pooled=TRUE) + a$p.value[1] + ``` + In this test, we are using the "one-sided" return value `a$p.value[1]` + to test our pvalue. + """ + expected_stat, less_pvalue_expect = expected + + if alternative == "greater": + input_sample = np.array(input_sample)[:, ::-1] + expected_stat = -expected_stat + + res = barnard_exact(input_sample, alternative=alternative) + statistic, pvalue = res.statistic, res.pvalue + assert_allclose( + [statistic, pvalue], [expected_stat, less_pvalue_expect], atol=1e-7 + ) + + +class TestBoschlooExact: + """Some tests to show that boschloo_exact() works correctly.""" + + ATOL = 1e-7 + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[2, 7], [8, 2]], (0.01852173, 0.009886142)), + ([[5, 1], [10, 10]], (0.9782609, 0.9450994)), + ([[5, 16], [20, 25]], (0.08913823, 0.05827348)), + ([[10, 5], [10, 1]], (0.1652174, 0.08565611)), + ([[5, 0], [1, 4]], (1, 1)), + ([[0, 1], [3, 2]], (0.5, 0.34375)), + ([[2, 7], [8, 2]], (0.01852173, 0.009886142)), + ([[7, 12], [8, 3]], (0.06406797, 0.03410916)), + ([[10, 24], [25, 37]], (0.2009359, 0.1512882)), + ], + ) + def test_less(self, input_sample, expected): + """The expected values have been generated by R, using a resolution + for the nuisance parameter of 1e-8 : + ```R + library(Exact) + options(digits=10) + data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE) + a = exact.test(data, method="Boschloo", alternative="less", + tsmethod="central", np.interval=TRUE, beta=1e-8) + ``` + """ + res = boschloo_exact(input_sample, alternative="less") + statistic, pvalue = res.statistic, res.pvalue + assert_allclose([statistic, pvalue], expected, atol=self.ATOL) + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[43, 40], [10, 39]], (0.0002875544, 0.0001615562)), + ([[2, 7], [8, 2]], (0.9990149, 0.9918327)), + ([[5, 1], [10, 10]], (0.1652174, 0.09008534)), + ([[5, 15], [20, 20]], (0.9849087, 0.9706997)), + ([[5, 16], [20, 25]], (0.972349, 0.9524124)), + ([[5, 0], [1, 4]], (0.02380952, 0.006865367)), + ([[0, 1], [3, 2]], (1, 1)), + ([[0, 2], [6, 4]], (1, 1)), + ([[2, 7], [8, 2]], (0.9990149, 0.9918327)), + ([[7, 12], [8, 3]], (0.9895302, 0.9771215)), + ([[10, 24], [25, 37]], (0.9012936, 0.8633275)), + ], + ) + def test_greater(self, input_sample, expected): + """The expected values have been generated by R, using a resolution + for the nuisance parameter of 1e-8 : + ```R + library(Exact) + options(digits=10) + data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE) + a = exact.test(data, method="Boschloo", alternative="greater", + tsmethod="central", np.interval=TRUE, beta=1e-8) + ``` + """ + res = boschloo_exact(input_sample, alternative="greater") + statistic, pvalue = res.statistic, res.pvalue + assert_allclose([statistic, pvalue], expected, atol=self.ATOL) + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[43, 40], [10, 39]], (0.0002875544, 0.0003231115)), + ([[2, 7], [8, 2]], (0.01852173, 0.01977228)), + ([[5, 1], [10, 10]], (0.1652174, 0.1801707)), + ([[5, 16], [20, 25]], (0.08913823, 0.116547)), + ([[5, 0], [1, 4]], (0.02380952, 0.01373073)), + ([[0, 1], [3, 2]], (0.5, 0.6875)), + ([[2, 7], [8, 2]], (0.01852173, 0.01977228)), + ([[7, 12], [8, 3]], (0.06406797, 0.06821831)), + ], + ) + def test_two_sided(self, input_sample, expected): + """The expected values have been generated by R, using a resolution + for the nuisance parameter of 1e-8 : + ```R + library(Exact) + options(digits=10) + data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE) + a = exact.test(data, method="Boschloo", alternative="two.sided", + tsmethod="central", np.interval=TRUE, beta=1e-8) + ``` + """ + res = boschloo_exact(input_sample, alternative="two-sided", n=64) + # Need n = 64 for python 32-bit + statistic, pvalue = res.statistic, res.pvalue + assert_allclose([statistic, pvalue], expected, atol=self.ATOL) + + def test_raises(self): + # test we raise an error for wrong input number of nuisances. + error_msg = ( + "Number of points `n` must be strictly positive, found 0" + ) + with assert_raises(ValueError, match=error_msg): + boschloo_exact([[1, 2], [3, 4]], n=0) + + # test we raise an error for wrong shape of input. + error_msg = "The input `table` must be of shape \\(2, 2\\)." + with assert_raises(ValueError, match=error_msg): + boschloo_exact(np.arange(6).reshape(2, 3)) + + # Test all values must be positives + error_msg = "All values in `table` must be nonnegative." + with assert_raises(ValueError, match=error_msg): + boschloo_exact([[-1, 2], [3, 4]]) + + # Test value error on wrong alternative param + error_msg = ( + r"`alternative` should be one of \('two-sided', 'less', " + r"'greater'\), found .*" + ) + with assert_raises(ValueError, match=error_msg): + boschloo_exact([[1, 2], [3, 4]], "not-correct") + + @pytest.mark.parametrize( + "input_sample,expected", + [ + ([[0, 5], [0, 10]], (np.nan, np.nan)), + ([[5, 0], [10, 0]], (np.nan, np.nan)), + ], + ) + def test_row_or_col_zero(self, input_sample, expected): + res = boschloo_exact(input_sample) + statistic, pvalue = res.statistic, res.pvalue + assert_equal(pvalue, expected[0]) + assert_equal(statistic, expected[1]) + + def test_two_sided_gt_1(self): + # Check that returned p-value does not exceed 1 even when twice + # the minimum of the one-sided p-values does. See gh-15345. + tbl = [[1, 1], [13, 12]] + pl = boschloo_exact(tbl, alternative='less').pvalue + pg = boschloo_exact(tbl, alternative='greater').pvalue + assert 2*min(pl, pg) > 1 + pt = boschloo_exact(tbl, alternative='two-sided').pvalue + assert pt == 1.0 + + @pytest.mark.parametrize("alternative", ("less", "greater")) + def test_against_fisher_exact(self, alternative): + # Check that the statistic of `boschloo_exact` is the same as the + # p-value of `fisher_exact` (for one-sided tests). See gh-15345. + tbl = [[2, 7], [8, 2]] + boschloo_stat = boschloo_exact(tbl, alternative=alternative).statistic + fisher_p = stats.fisher_exact(tbl, alternative=alternative)[1] + assert_allclose(boschloo_stat, fisher_p) + + +class TestCvm_2samp: + def test_invalid_input(self): + y = np.arange(5) + msg = 'x and y must contain at least two observations.' + with pytest.raises(ValueError, match=msg): + cramervonmises_2samp([], y) + with pytest.raises(ValueError, match=msg): + cramervonmises_2samp(y, [1]) + msg = 'method must be either auto, exact or asymptotic' + with pytest.raises(ValueError, match=msg): + cramervonmises_2samp(y, y, 'xyz') + + def test_list_input(self): + x = [2, 3, 4, 7, 6] + y = [0.2, 0.7, 12, 18] + r1 = cramervonmises_2samp(x, y) + r2 = cramervonmises_2samp(np.array(x), np.array(y)) + assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue)) + + def test_example_conover(self): + # Example 2 in Section 6.2 of W.J. Conover: Practical Nonparametric + # Statistics, 1971. + x = [7.6, 8.4, 8.6, 8.7, 9.3, 9.9, 10.1, 10.6, 11.2] + y = [5.2, 5.7, 5.9, 6.5, 6.8, 8.2, 9.1, 9.8, 10.8, 11.3, 11.5, 12.3, + 12.5, 13.4, 14.6] + r = cramervonmises_2samp(x, y) + assert_allclose(r.statistic, 0.262, atol=1e-3) + assert_allclose(r.pvalue, 0.18, atol=1e-2) + + @pytest.mark.parametrize('statistic, m, n, pval', + [(710, 5, 6, 48./462), + (1897, 7, 7, 117./1716), + (576, 4, 6, 2./210), + (1764, 6, 7, 2./1716)]) + def test_exact_pvalue(self, statistic, m, n, pval): + # the exact values are taken from Anderson: On the distribution of the + # two-sample Cramer-von-Mises criterion, 1962. + # The values are taken from Table 2, 3, 4 and 5 + assert_equal(_pval_cvm_2samp_exact(statistic, m, n), pval) + + def test_large_sample(self): + # for large samples, the statistic U gets very large + # do a sanity check that p-value is not 0, 1 or nan + np.random.seed(4367) + x = distributions.norm.rvs(size=1000000) + y = distributions.norm.rvs(size=900000) + r = cramervonmises_2samp(x, y) + assert_(0 < r.pvalue < 1) + r = cramervonmises_2samp(x, y+0.1) + assert_(0 < r.pvalue < 1) + + def test_exact_vs_asymptotic(self): + np.random.seed(0) + x = np.random.rand(7) + y = np.random.rand(8) + r1 = cramervonmises_2samp(x, y, method='exact') + r2 = cramervonmises_2samp(x, y, method='asymptotic') + assert_equal(r1.statistic, r2.statistic) + assert_allclose(r1.pvalue, r2.pvalue, atol=1e-2) + + def test_method_auto(self): + x = np.arange(20) + y = [0.5, 4.7, 13.1] + r1 = cramervonmises_2samp(x, y, method='exact') + r2 = cramervonmises_2samp(x, y, method='auto') + assert_equal(r1.pvalue, r2.pvalue) + # switch to asymptotic if one sample has more than 20 observations + x = np.arange(21) + r1 = cramervonmises_2samp(x, y, method='asymptotic') + r2 = cramervonmises_2samp(x, y, method='auto') + assert_equal(r1.pvalue, r2.pvalue) + + def test_same_input(self): + # make sure trivial edge case can be handled + # note that _cdf_cvm_inf(0) = nan. implementation avoids nan by + # returning pvalue=1 for very small values of the statistic + x = np.arange(15) + res = cramervonmises_2samp(x, x) + assert_equal((res.statistic, res.pvalue), (0.0, 1.0)) + # check exact p-value + res = cramervonmises_2samp(x[:4], x[:4]) + assert_equal((res.statistic, res.pvalue), (0.0, 1.0)) + + +class TestTukeyHSD: + + data_same_size = ([24.5, 23.5, 26.4, 27.1, 29.9], + [28.4, 34.2, 29.5, 32.2, 30.1], + [26.1, 28.3, 24.3, 26.2, 27.8]) + data_diff_size = ([24.5, 23.5, 26.28, 26.4, 27.1, 29.9, 30.1, 30.1], + [28.4, 34.2, 29.5, 32.2, 30.1], + [26.1, 28.3, 24.3, 26.2, 27.8]) + extreme_size = ([24.5, 23.5, 26.4], + [28.4, 34.2, 29.5, 32.2, 30.1, 28.4, 34.2, 29.5, 32.2, + 30.1], + [26.1, 28.3, 24.3, 26.2, 27.8]) + + sas_same_size = """ + Comparison LowerCL Difference UpperCL Significance + 2 - 3 0.6908830568 4.34 7.989116943 1 + 2 - 1 0.9508830568 4.6 8.249116943 1 + 3 - 2 -7.989116943 -4.34 -0.6908830568 1 + 3 - 1 -3.389116943 0.26 3.909116943 0 + 1 - 2 -8.249116943 -4.6 -0.9508830568 1 + 1 - 3 -3.909116943 -0.26 3.389116943 0 + """ + + sas_diff_size = """ + Comparison LowerCL Difference UpperCL Significance + 2 - 1 0.2679292645 3.645 7.022070736 1 + 2 - 3 0.5934764007 4.34 8.086523599 1 + 1 - 2 -7.022070736 -3.645 -0.2679292645 1 + 1 - 3 -2.682070736 0.695 4.072070736 0 + 3 - 2 -8.086523599 -4.34 -0.5934764007 1 + 3 - 1 -4.072070736 -0.695 2.682070736 0 + """ + + sas_extreme = """ + Comparison LowerCL Difference UpperCL Significance + 2 - 3 1.561605075 4.34 7.118394925 1 + 2 - 1 2.740784879 6.08 9.419215121 1 + 3 - 2 -7.118394925 -4.34 -1.561605075 1 + 3 - 1 -1.964526566 1.74 5.444526566 0 + 1 - 2 -9.419215121 -6.08 -2.740784879 1 + 1 - 3 -5.444526566 -1.74 1.964526566 0 + """ + + @pytest.mark.parametrize("data,res_expect_str,atol", + ((data_same_size, sas_same_size, 1e-4), + (data_diff_size, sas_diff_size, 1e-4), + (extreme_size, sas_extreme, 1e-10), + ), + ids=["equal size sample", + "unequal sample size", + "extreme sample size differences"]) + def test_compare_sas(self, data, res_expect_str, atol): + ''' + SAS code used to generate results for each sample: + DATA ACHE; + INPUT BRAND RELIEF; + CARDS; + 1 24.5 + ... + 3 27.8 + ; + ods graphics on; ODS RTF;ODS LISTING CLOSE; + PROC ANOVA DATA=ACHE; + CLASS BRAND; + MODEL RELIEF=BRAND; + MEANS BRAND/TUKEY CLDIFF; + TITLE 'COMPARE RELIEF ACROSS MEDICINES - ANOVA EXAMPLE'; + ods output CLDiffs =tc; + proc print data=tc; + format LowerCL 17.16 UpperCL 17.16 Difference 17.16; + title "Output with many digits"; + RUN; + QUIT; + ODS RTF close; + ODS LISTING; + ''' + res_expect = np.asarray(res_expect_str.replace(" - ", " ").split()[5:], + dtype=float).reshape((6, 6)) + res_tukey = stats.tukey_hsd(*data) + conf = res_tukey.confidence_interval() + # loop over the comparisons + for i, j, l, s, h, sig in res_expect: + i, j = int(i) - 1, int(j) - 1 + assert_allclose(conf.low[i, j], l, atol=atol) + assert_allclose(res_tukey.statistic[i, j], s, atol=atol) + assert_allclose(conf.high[i, j], h, atol=atol) + assert_allclose((res_tukey.pvalue[i, j] <= .05), sig == 1) + + matlab_sm_siz = """ + 1 2 -8.2491590248597 -4.6 -0.9508409751403 0.0144483269098 + 1 3 -3.9091590248597 -0.26 3.3891590248597 0.9803107240900 + 2 3 0.6908409751403 4.34 7.9891590248597 0.0203311368795 + """ + + matlab_diff_sz = """ + 1 2 -7.02207069748501 -3.645 -0.26792930251500 0.03371498443080 + 1 3 -2.68207069748500 0.695 4.07207069748500 0.85572267328807 + 2 3 0.59347644287720 4.34 8.08652355712281 0.02259047020620 + """ + + @pytest.mark.parametrize("data,res_expect_str,atol", + ((data_same_size, matlab_sm_siz, 1e-12), + (data_diff_size, matlab_diff_sz, 1e-7)), + ids=["equal size sample", + "unequal size sample"]) + def test_compare_matlab(self, data, res_expect_str, atol): + """ + vals = [24.5, 23.5, 26.4, 27.1, 29.9, 28.4, 34.2, 29.5, 32.2, 30.1, + 26.1, 28.3, 24.3, 26.2, 27.8] + names = {'zero', 'zero', 'zero', 'zero', 'zero', 'one', 'one', 'one', + 'one', 'one', 'two', 'two', 'two', 'two', 'two'} + [p,t,stats] = anova1(vals,names,"off"); + [c,m,h,nms] = multcompare(stats, "CType","hsd"); + """ + res_expect = np.asarray(res_expect_str.split(), + dtype=float).reshape((3, 6)) + res_tukey = stats.tukey_hsd(*data) + conf = res_tukey.confidence_interval() + # loop over the comparisons + for i, j, l, s, h, p in res_expect: + i, j = int(i) - 1, int(j) - 1 + assert_allclose(conf.low[i, j], l, atol=atol) + assert_allclose(res_tukey.statistic[i, j], s, atol=atol) + assert_allclose(conf.high[i, j], h, atol=atol) + assert_allclose(res_tukey.pvalue[i, j], p, atol=atol) + + def test_compare_r(self): + """ + Testing against results and p-values from R: + from: https://www.rdocumentation.org/packages/stats/versions/3.6.2/ + topics/TukeyHSD + > require(graphics) + > summary(fm1 <- aov(breaks ~ tension, data = warpbreaks)) + > TukeyHSD(fm1, "tension", ordered = TRUE) + > plot(TukeyHSD(fm1, "tension")) + Tukey multiple comparisons of means + 95% family-wise confidence level + factor levels have been ordered + Fit: aov(formula = breaks ~ tension, data = warpbreaks) + $tension + """ + str_res = """ + diff lwr upr p adj + 2 - 3 4.722222 -4.8376022 14.28205 0.4630831 + 1 - 3 14.722222 5.1623978 24.28205 0.0014315 + 1 - 2 10.000000 0.4401756 19.55982 0.0384598 + """ + res_expect = np.asarray(str_res.replace(" - ", " ").split()[5:], + dtype=float).reshape((3, 6)) + data = ([26, 30, 54, 25, 70, 52, 51, 26, 67, + 27, 14, 29, 19, 29, 31, 41, 20, 44], + [18, 21, 29, 17, 12, 18, 35, 30, 36, + 42, 26, 19, 16, 39, 28, 21, 39, 29], + [36, 21, 24, 18, 10, 43, 28, 15, 26, + 20, 21, 24, 17, 13, 15, 15, 16, 28]) + + res_tukey = stats.tukey_hsd(*data) + conf = res_tukey.confidence_interval() + # loop over the comparisons + for i, j, s, l, h, p in res_expect: + i, j = int(i) - 1, int(j) - 1 + # atols are set to the number of digits present in the r result. + assert_allclose(conf.low[i, j], l, atol=1e-7) + assert_allclose(res_tukey.statistic[i, j], s, atol=1e-6) + assert_allclose(conf.high[i, j], h, atol=1e-5) + assert_allclose(res_tukey.pvalue[i, j], p, atol=1e-7) + + def test_engineering_stat_handbook(self): + ''' + Example sourced from: + https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm + ''' + group1 = [6.9, 5.4, 5.8, 4.6, 4.0] + group2 = [8.3, 6.8, 7.8, 9.2, 6.5] + group3 = [8.0, 10.5, 8.1, 6.9, 9.3] + group4 = [5.8, 3.8, 6.1, 5.6, 6.2] + res = stats.tukey_hsd(group1, group2, group3, group4) + conf = res.confidence_interval() + lower = np.asarray([ + [0, 0, 0, -2.25], + [.29, 0, -2.93, .13], + [1.13, 0, 0, .97], + [0, 0, 0, 0]]) + upper = np.asarray([ + [0, 0, 0, 1.93], + [4.47, 0, 1.25, 4.31], + [5.31, 0, 0, 5.15], + [0, 0, 0, 0]]) + + for (i, j) in [(1, 0), (2, 0), (0, 3), (1, 2), (2, 3)]: + assert_allclose(conf.low[i, j], lower[i, j], atol=1e-2) + assert_allclose(conf.high[i, j], upper[i, j], atol=1e-2) + + def test_rand_symm(self): + # test some expected identities of the results + np.random.seed(1234) + data = np.random.rand(3, 100) + res = stats.tukey_hsd(*data) + conf = res.confidence_interval() + # the confidence intervals should be negated symmetric of each other + assert_equal(conf.low, -conf.high.T) + # the `high` and `low` center diagonals should be the same since the + # mean difference in a self comparison is 0. + assert_equal(np.diagonal(conf.high), conf.high[0, 0]) + assert_equal(np.diagonal(conf.low), conf.low[0, 0]) + # statistic array should be antisymmetric with zeros on the diagonal + assert_equal(res.statistic, -res.statistic.T) + assert_equal(np.diagonal(res.statistic), 0) + # p-values should be symmetric and 1 when compared to itself + assert_equal(res.pvalue, res.pvalue.T) + assert_equal(np.diagonal(res.pvalue), 1) + + def test_no_inf(self): + with assert_raises(ValueError, match="...must be finite."): + stats.tukey_hsd([1, 2, 3], [2, np.inf], [6, 7, 3]) + + def test_is_1d(self): + with assert_raises(ValueError, match="...must be one-dimensional"): + stats.tukey_hsd([[1, 2], [2, 3]], [2, 5], [5, 23, 6]) + + def test_no_empty(self): + with assert_raises(ValueError, match="...must be greater than one"): + stats.tukey_hsd([], [2, 5], [4, 5, 6]) + + @pytest.mark.parametrize("nargs", (0, 1)) + def test_not_enough_treatments(self, nargs): + with assert_raises(ValueError, match="...more than 1 treatment."): + stats.tukey_hsd(*([[23, 7, 3]] * nargs)) + + @pytest.mark.parametrize("cl", [-.5, 0, 1, 2]) + def test_conf_level_invalid(self, cl): + with assert_raises(ValueError, match="must be between 0 and 1"): + r = stats.tukey_hsd([23, 7, 3], [3, 4], [9, 4]) + r.confidence_interval(cl) + + def test_2_args_ttest(self): + # that with 2 treatments the `pvalue` is equal to that of `ttest_ind` + res_tukey = stats.tukey_hsd(*self.data_diff_size[:2]) + res_ttest = stats.ttest_ind(*self.data_diff_size[:2]) + assert_allclose(res_ttest.pvalue, res_tukey.pvalue[0, 1]) + assert_allclose(res_ttest.pvalue, res_tukey.pvalue[1, 0]) + + +class TestPoissonMeansTest: + @pytest.mark.parametrize("c1, n1, c2, n2, p_expect", ( + # example from [1], 6. Illustrative examples: Example 1 + [0, 100, 3, 100, 0.0884], + [2, 100, 6, 100, 0.1749] + )) + def test_paper_examples(self, c1, n1, c2, n2, p_expect): + res = stats.poisson_means_test(c1, n1, c2, n2) + assert_allclose(res.pvalue, p_expect, atol=1e-4) + + @pytest.mark.parametrize("c1, n1, c2, n2, p_expect, alt, d", ( + # These test cases are produced by the wrapped fortran code from the + # original authors. Using a slightly modified version of this fortran, + # found here, https://github.com/nolanbconaway/poisson-etest, + # additional tests were created. + [20, 10, 20, 10, 0.9999997568929630, 'two-sided', 0], + [10, 10, 10, 10, 0.9999998403241203, 'two-sided', 0], + [50, 15, 1, 1, 0.09920321053409643, 'two-sided', .05], + [3, 100, 20, 300, 0.12202725450896404, 'two-sided', 0], + [3, 12, 4, 20, 0.40416087318539173, 'greater', 0], + [4, 20, 3, 100, 0.008053640402974236, 'greater', 0], + # publishing paper does not include a `less` alternative, + # so it was calculated with switched argument order and + # alternative="greater" + [4, 20, 3, 10, 0.3083216325432898, 'less', 0], + [1, 1, 50, 15, 0.09322998607245102, 'less', 0] + )) + def test_fortran_authors(self, c1, n1, c2, n2, p_expect, alt, d): + res = stats.poisson_means_test(c1, n1, c2, n2, alternative=alt, diff=d) + assert_allclose(res.pvalue, p_expect, atol=2e-6, rtol=1e-16) + + def test_different_results(self): + # The implementation in Fortran is known to break down at higher + # counts and observations, so we expect different results. By + # inspection we can infer the p-value to be near one. + count1, count2 = 10000, 10000 + nobs1, nobs2 = 10000, 10000 + res = stats.poisson_means_test(count1, nobs1, count2, nobs2) + assert_allclose(res.pvalue, 1) + + def test_less_than_zero_lambda_hat2(self): + # demonstrates behavior that fixes a known fault from original Fortran. + # p-value should clearly be near one. + count1, count2 = 0, 0 + nobs1, nobs2 = 1, 1 + res = stats.poisson_means_test(count1, nobs1, count2, nobs2) + assert_allclose(res.pvalue, 1) + + def test_input_validation(self): + count1, count2 = 0, 0 + nobs1, nobs2 = 1, 1 + + # test non-integral events + message = '`k1` and `k2` must be integers.' + with assert_raises(TypeError, match=message): + stats.poisson_means_test(.7, nobs1, count2, nobs2) + with assert_raises(TypeError, match=message): + stats.poisson_means_test(count1, nobs1, .7, nobs2) + + # test negative events + message = '`k1` and `k2` must be greater than or equal to 0.' + with assert_raises(ValueError, match=message): + stats.poisson_means_test(-1, nobs1, count2, nobs2) + with assert_raises(ValueError, match=message): + stats.poisson_means_test(count1, nobs1, -1, nobs2) + + # test negative sample size + message = '`n1` and `n2` must be greater than 0.' + with assert_raises(ValueError, match=message): + stats.poisson_means_test(count1, -1, count2, nobs2) + with assert_raises(ValueError, match=message): + stats.poisson_means_test(count1, nobs1, count2, -1) + + # test negative difference + message = 'diff must be greater than or equal to 0.' + with assert_raises(ValueError, match=message): + stats.poisson_means_test(count1, nobs1, count2, nobs2, diff=-1) + + # test invalid alternatvie + message = 'Alternative must be one of ...' + with assert_raises(ValueError, match=message): + stats.poisson_means_test(1, 2, 1, 2, alternative='error') + + +class TestBWSTest: + + def test_bws_input_validation(self): + rng = np.random.default_rng(4571775098104213308) + + x, y = rng.random(size=(2, 7)) + + message = '`x` and `y` must be exactly one-dimensional.' + with pytest.raises(ValueError, match=message): + stats.bws_test([x, x], [y, y]) + + message = '`x` and `y` must not contain NaNs.' + with pytest.raises(ValueError, match=message): + stats.bws_test([np.nan], y) + + message = '`x` and `y` must be of nonzero size.' + with pytest.raises(ValueError, match=message): + stats.bws_test(x, []) + + message = 'alternative` must be one of...' + with pytest.raises(ValueError, match=message): + stats.bws_test(x, y, alternative='ekki-ekki') + + message = 'method` must be an instance of...' + with pytest.raises(ValueError, match=message): + stats.bws_test(x, y, method=42) + + + def test_against_published_reference(self): + # Test against Example 2 in bws_test Reference [1], pg 9 + # https://link.springer.com/content/pdf/10.1007/BF02762032.pdf + x = [1, 2, 3, 4, 6, 7, 8] + y = [5, 9, 10, 11, 12, 13, 14] + res = stats.bws_test(x, y, alternative='two-sided') + assert_allclose(res.statistic, 5.132, atol=1e-3) + assert_equal(res.pvalue, 10/3432) + + + @pytest.mark.parametrize(('alternative', 'statistic', 'pvalue'), + [('two-sided', 1.7510204081633, 0.1264422777777), + ('less', -1.7510204081633, 0.05754662004662), + ('greater', -1.7510204081633, 0.9424533799534)]) + def test_against_R(self, alternative, statistic, pvalue): + # Test against R library BWStest function bws_test + # library(BWStest) + # options(digits=16) + # x = c(...) + # y = c(...) + # bws_test(x, y, alternative='two.sided') + rng = np.random.default_rng(4571775098104213308) + x, y = rng.random(size=(2, 7)) + res = stats.bws_test(x, y, alternative=alternative) + assert_allclose(res.statistic, statistic, rtol=1e-13) + assert_allclose(res.pvalue, pvalue, atol=1e-2, rtol=1e-1) + + @pytest.mark.parametrize(('alternative', 'statistic', 'pvalue'), + [('two-sided', 1.142629265891, 0.2903950180801), + ('less', 0.99629665877411, 0.8545660222131), + ('greater', 0.99629665877411, 0.1454339777869)]) + def test_against_R_imbalanced(self, alternative, statistic, pvalue): + # Test against R library BWStest function bws_test + # library(BWStest) + # options(digits=16) + # x = c(...) + # y = c(...) + # bws_test(x, y, alternative='two.sided') + rng = np.random.default_rng(5429015622386364034) + x = rng.random(size=9) + y = rng.random(size=8) + res = stats.bws_test(x, y, alternative=alternative) + assert_allclose(res.statistic, statistic, rtol=1e-13) + assert_allclose(res.pvalue, pvalue, atol=1e-2, rtol=1e-1) + + def test_method(self): + # Test that `method` parameter has the desired effect + rng = np.random.default_rng(1520514347193347862) + x, y = rng.random(size=(2, 10)) + + rng = np.random.default_rng(1520514347193347862) + method = stats.PermutationMethod(n_resamples=10, random_state=rng) + res1 = stats.bws_test(x, y, method=method) + + assert len(res1.null_distribution) == 10 + + rng = np.random.default_rng(1520514347193347862) + method = stats.PermutationMethod(n_resamples=10, random_state=rng) + res2 = stats.bws_test(x, y, method=method) + + assert_allclose(res1.null_distribution, res2.null_distribution) + + rng = np.random.default_rng(5205143471933478621) + method = stats.PermutationMethod(n_resamples=10, random_state=rng) + res3 = stats.bws_test(x, y, method=method) + + assert not np.allclose(res3.null_distribution, res1.null_distribution) + + def test_directions(self): + # Sanity check of the sign of the one-sided statistic + rng = np.random.default_rng(1520514347193347862) + x = rng.random(size=5) + y = x - 1 + + res = stats.bws_test(x, y, alternative='greater') + assert res.statistic > 0 + assert_equal(res.pvalue, 1 / len(res.null_distribution)) + + res = stats.bws_test(x, y, alternative='less') + assert res.statistic > 0 + assert_equal(res.pvalue, 1) + + res = stats.bws_test(y, x, alternative='less') + assert res.statistic < 0 + assert_equal(res.pvalue, 1 / len(res.null_distribution)) + + res = stats.bws_test(y, x, alternative='greater') + assert res.statistic < 0 + assert_equal(res.pvalue, 1) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_kdeoth.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_kdeoth.py new file mode 100644 index 0000000000000000000000000000000000000000..dec6fd65a19d0b8ac17456a172d6dba324445353 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_kdeoth.py @@ -0,0 +1,608 @@ +from scipy import stats, linalg, integrate +import numpy as np +from numpy.testing import (assert_almost_equal, assert_, assert_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_allclose) +import pytest +from pytest import raises as assert_raises + + +def test_kde_1d(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + xn = np.random.randn(n_basesample) + xnmean = xn.mean() + xnstd = xn.std(ddof=1) + + # get kde for original sample + gkde = stats.gaussian_kde(xn) + + # evaluate the density function for the kde for some points + xs = np.linspace(-7,7,501) + kdepdf = gkde.evaluate(xs) + normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) + intervall = xs[1] - xs[0] + + assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) + prob1 = gkde.integrate_box_1d(xnmean, np.inf) + prob2 = gkde.integrate_box_1d(-np.inf, xnmean) + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) + assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) + + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*intervall, decimal=2) + assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), + (kdepdf*normpdf).sum()*intervall, decimal=2) + + +def test_kde_1d_weighted(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + xn = np.random.randn(n_basesample) + wn = np.random.rand(n_basesample) + xnmean = np.average(xn, weights=wn) + xnstd = np.sqrt(np.average((xn-xnmean)**2, weights=wn)) + + # get kde for original sample + gkde = stats.gaussian_kde(xn, weights=wn) + + # evaluate the density function for the kde for some points + xs = np.linspace(-7,7,501) + kdepdf = gkde.evaluate(xs) + normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) + intervall = xs[1] - xs[0] + + assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) + prob1 = gkde.integrate_box_1d(xnmean, np.inf) + prob2 = gkde.integrate_box_1d(-np.inf, xnmean) + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) + assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) + + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*intervall, decimal=2) + assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), + (kdepdf*normpdf).sum()*intervall, decimal=2) + + +@pytest.mark.slow +def test_kde_2d(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + + mean = np.array([1.0, 3.0]) + covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) + + # Need transpose (shape (2, 500)) for kde + xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T + + # get kde for original sample + gkde = stats.gaussian_kde(xn) + + # evaluate the density function for the kde for some points + x, y = np.mgrid[-7:7:500j, -7:7:500j] + grid_coords = np.vstack([x.ravel(), y.ravel()]) + kdepdf = gkde.evaluate(grid_coords) + kdepdf = kdepdf.reshape(500, 500) + + normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), + mean=mean, cov=covariance) + intervall = y.ravel()[1] - y.ravel()[0] + + assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01) + + small = -1e100 + large = 1e100 + prob1 = gkde.integrate_box([small, mean[1]], [large, large]) + prob2 = gkde.integrate_box([small, small], [large, mean[1]]) + + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*(intervall**2), decimal=2) + assert_almost_equal(gkde.integrate_gaussian(mean, covariance), + (kdepdf*normpdf).sum()*(intervall**2), decimal=2) + + +@pytest.mark.slow +def test_kde_2d_weighted(): + #some basic tests comparing to normal distribution + np.random.seed(8765678) + n_basesample = 500 + + mean = np.array([1.0, 3.0]) + covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) + + # Need transpose (shape (2, 500)) for kde + xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T + wn = np.random.rand(n_basesample) + + # get kde for original sample + gkde = stats.gaussian_kde(xn, weights=wn) + + # evaluate the density function for the kde for some points + x, y = np.mgrid[-7:7:500j, -7:7:500j] + grid_coords = np.vstack([x.ravel(), y.ravel()]) + kdepdf = gkde.evaluate(grid_coords) + kdepdf = kdepdf.reshape(500, 500) + + normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), + mean=mean, cov=covariance) + intervall = y.ravel()[1] - y.ravel()[0] + + assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01) + + small = -1e100 + large = 1e100 + prob1 = gkde.integrate_box([small, mean[1]], [large, large]) + prob2 = gkde.integrate_box([small, small], [large, mean[1]]) + + assert_almost_equal(prob1, 0.5, decimal=1) + assert_almost_equal(prob2, 0.5, decimal=1) + assert_almost_equal(gkde.integrate_kde(gkde), + (kdepdf**2).sum()*(intervall**2), decimal=2) + assert_almost_equal(gkde.integrate_gaussian(mean, covariance), + (kdepdf*normpdf).sum()*(intervall**2), decimal=2) + + +def test_kde_bandwidth_method(): + def scotts_factor(kde_obj): + """Same as default, just check that it works.""" + return np.power(kde_obj.n, -1./(kde_obj.d+4)) + + np.random.seed(8765678) + n_basesample = 50 + xn = np.random.randn(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn) + # Supply a callable + gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor) + # Supply a scalar + gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor) + + xs = np.linspace(-7,7,51) + kdepdf = gkde.evaluate(xs) + kdepdf2 = gkde2.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf2) + kdepdf3 = gkde3.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf3) + + assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring') + + +def test_kde_bandwidth_method_weighted(): + def scotts_factor(kde_obj): + """Same as default, just check that it works.""" + return np.power(kde_obj.neff, -1./(kde_obj.d+4)) + + np.random.seed(8765678) + n_basesample = 50 + xn = np.random.randn(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn) + # Supply a callable + gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor) + # Supply a scalar + gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor) + + xs = np.linspace(-7,7,51) + kdepdf = gkde.evaluate(xs) + kdepdf2 = gkde2.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf2) + kdepdf3 = gkde3.evaluate(xs) + assert_almost_equal(kdepdf, kdepdf3) + + assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring') + + +# Subclasses that should stay working (extracted from various sources). +# Unfortunately the earlier design of gaussian_kde made it necessary for users +# to create these kinds of subclasses, or call _compute_covariance() directly. + +class _kde_subclass1(stats.gaussian_kde): + def __init__(self, dataset): + self.dataset = np.atleast_2d(dataset) + self.d, self.n = self.dataset.shape + self.covariance_factor = self.scotts_factor + self._compute_covariance() + + +class _kde_subclass2(stats.gaussian_kde): + def __init__(self, dataset): + self.covariance_factor = self.scotts_factor + super().__init__(dataset) + + +class _kde_subclass4(stats.gaussian_kde): + def covariance_factor(self): + return 0.5 * self.silverman_factor() + + +def test_gaussian_kde_subclassing(): + x1 = np.array([-7, -5, 1, 4, 5], dtype=float) + xs = np.linspace(-10, 10, num=50) + + # gaussian_kde itself + kde = stats.gaussian_kde(x1) + ys = kde(xs) + + # subclass 1 + kde1 = _kde_subclass1(x1) + y1 = kde1(xs) + assert_array_almost_equal_nulp(ys, y1, nulp=10) + + # subclass 2 + kde2 = _kde_subclass2(x1) + y2 = kde2(xs) + assert_array_almost_equal_nulp(ys, y2, nulp=10) + + # subclass 3 was removed because we have no obligation to maintain support + # for user invocation of private methods + + # subclass 4 + kde4 = _kde_subclass4(x1) + y4 = kde4(x1) + y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017] + + assert_array_almost_equal(y_expected, y4, decimal=6) + + # Not a subclass, but check for use of _compute_covariance() + kde5 = kde + kde5.covariance_factor = lambda: kde.factor + kde5._compute_covariance() + y5 = kde5(xs) + assert_array_almost_equal_nulp(ys, y5, nulp=10) + + +def test_gaussian_kde_covariance_caching(): + x1 = np.array([-7, -5, 1, 4, 5], dtype=float) + xs = np.linspace(-10, 10, num=5) + # These expected values are from scipy 0.10, before some changes to + # gaussian_kde. They were not compared with any external reference. + y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475] + + # Set the bandwidth, then reset it to the default. + kde = stats.gaussian_kde(x1) + kde.set_bandwidth(bw_method=0.5) + kde.set_bandwidth(bw_method='scott') + y2 = kde(xs) + + assert_array_almost_equal(y_expected, y2, decimal=7) + + +def test_gaussian_kde_monkeypatch(): + """Ugly, but people may rely on this. See scipy pull request 123, + specifically the linked ML thread "Width of the Gaussian in stats.kde". + If it is necessary to break this later on, that is to be discussed on ML. + """ + x1 = np.array([-7, -5, 1, 4, 5], dtype=float) + xs = np.linspace(-10, 10, num=50) + + # The old monkeypatched version to get at Silverman's Rule. + kde = stats.gaussian_kde(x1) + kde.covariance_factor = kde.silverman_factor + kde._compute_covariance() + y1 = kde(xs) + + # The new saner version. + kde2 = stats.gaussian_kde(x1, bw_method='silverman') + y2 = kde2(xs) + + assert_array_almost_equal_nulp(y1, y2, nulp=10) + + +def test_kde_integer_input(): + """Regression test for #1181.""" + x1 = np.arange(5) + kde = stats.gaussian_kde(x1) + y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721] + assert_array_almost_equal(kde(x1), y_expected, decimal=6) + + +_ftypes = ['float32', 'float64', 'float96', 'float128', 'int32', 'int64'] + + +@pytest.mark.parametrize("bw_type", _ftypes + ["scott", "silverman"]) +@pytest.mark.parametrize("dtype", _ftypes) +def test_kde_output_dtype(dtype, bw_type): + # Check whether the datatypes are available + dtype = getattr(np, dtype, None) + + if bw_type in ["scott", "silverman"]: + bw = bw_type + else: + bw_type = getattr(np, bw_type, None) + bw = bw_type(3) if bw_type else None + + if any(dt is None for dt in [dtype, bw]): + pytest.skip() + + weights = np.arange(5, dtype=dtype) + dataset = np.arange(5, dtype=dtype) + k = stats.gaussian_kde(dataset, bw_method=bw, weights=weights) + points = np.arange(5, dtype=dtype) + result = k(points) + # weights are always cast to float64 + assert result.dtype == np.result_type(dataset, points, np.float64(weights), + k.factor) + + +def test_pdf_logpdf_validation(): + rng = np.random.default_rng(64202298293133848336925499069837723291) + xn = rng.standard_normal((2, 10)) + gkde = stats.gaussian_kde(xn) + xs = rng.standard_normal((3, 10)) + + msg = "points have dimension 3, dataset has dimension 2" + with pytest.raises(ValueError, match=msg): + gkde.logpdf(xs) + + +def test_pdf_logpdf(): + np.random.seed(1) + n_basesample = 50 + xn = np.random.randn(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn) + + xs = np.linspace(-15, 12, 25) + pdf = gkde.evaluate(xs) + pdf2 = gkde.pdf(xs) + assert_almost_equal(pdf, pdf2, decimal=12) + + logpdf = np.log(pdf) + logpdf2 = gkde.logpdf(xs) + assert_almost_equal(logpdf, logpdf2, decimal=12) + + # There are more points than data + gkde = stats.gaussian_kde(xs) + pdf = np.log(gkde.evaluate(xn)) + pdf2 = gkde.logpdf(xn) + assert_almost_equal(pdf, pdf2, decimal=12) + + +def test_pdf_logpdf_weighted(): + np.random.seed(1) + n_basesample = 50 + xn = np.random.randn(n_basesample) + wn = np.random.rand(n_basesample) + + # Default + gkde = stats.gaussian_kde(xn, weights=wn) + + xs = np.linspace(-15, 12, 25) + pdf = gkde.evaluate(xs) + pdf2 = gkde.pdf(xs) + assert_almost_equal(pdf, pdf2, decimal=12) + + logpdf = np.log(pdf) + logpdf2 = gkde.logpdf(xs) + assert_almost_equal(logpdf, logpdf2, decimal=12) + + # There are more points than data + gkde = stats.gaussian_kde(xs, weights=np.random.rand(len(xs))) + pdf = np.log(gkde.evaluate(xn)) + pdf2 = gkde.logpdf(xn) + assert_almost_equal(pdf, pdf2, decimal=12) + + +def test_marginal_1_axis(): + rng = np.random.default_rng(6111799263660870475) + n_data = 50 + n_dim = 10 + dataset = rng.normal(size=(n_dim, n_data)) + points = rng.normal(size=(n_dim, 3)) + + dimensions = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) # dimensions to keep + + kde = stats.gaussian_kde(dataset) + marginal = kde.marginal(dimensions) + pdf = marginal.pdf(points[dimensions]) + + def marginal_pdf_single(point): + def f(x): + x = np.concatenate(([x], point[dimensions])) + return kde.pdf(x)[0] + return integrate.quad(f, -np.inf, np.inf)[0] + + def marginal_pdf(points): + return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points) + + ref = marginal_pdf(points) + + assert_allclose(pdf, ref, rtol=1e-6) + + +@pytest.mark.xslow +def test_marginal_2_axis(): + rng = np.random.default_rng(6111799263660870475) + n_data = 30 + n_dim = 4 + dataset = rng.normal(size=(n_dim, n_data)) + points = rng.normal(size=(n_dim, 3)) + + dimensions = np.array([1, 3]) # dimensions to keep + + kde = stats.gaussian_kde(dataset) + marginal = kde.marginal(dimensions) + pdf = marginal.pdf(points[dimensions]) + + def marginal_pdf(points): + def marginal_pdf_single(point): + def f(y, x): + w, z = point[dimensions] + x = np.array([x, w, y, z]) + return kde.pdf(x)[0] + return integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)[0] + + return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points) + + ref = marginal_pdf(points) + + assert_allclose(pdf, ref, rtol=1e-6) + + +def test_marginal_iv(): + # test input validation + rng = np.random.default_rng(6111799263660870475) + n_data = 30 + n_dim = 4 + dataset = rng.normal(size=(n_dim, n_data)) + points = rng.normal(size=(n_dim, 3)) + + kde = stats.gaussian_kde(dataset) + + # check that positive and negative indices are equivalent + dimensions1 = [-1, 1] + marginal1 = kde.marginal(dimensions1) + pdf1 = marginal1.pdf(points[dimensions1]) + + dimensions2 = [3, -3] + marginal2 = kde.marginal(dimensions2) + pdf2 = marginal2.pdf(points[dimensions2]) + + assert_equal(pdf1, pdf2) + + # IV for non-integer dimensions + message = "Elements of `dimensions` must be integers..." + with pytest.raises(ValueError, match=message): + kde.marginal([1, 2.5]) + + # IV for uniquenes + message = "All elements of `dimensions` must be unique." + with pytest.raises(ValueError, match=message): + kde.marginal([1, 2, 2]) + + # IV for non-integer dimensions + message = (r"Dimensions \[-5 6\] are invalid for a distribution in 4...") + with pytest.raises(ValueError, match=message): + kde.marginal([1, -5, 6]) + + +@pytest.mark.xslow +def test_logpdf_overflow(): + # regression test for gh-12988; testing against linalg instability for + # very high dimensionality kde + np.random.seed(1) + n_dimensions = 2500 + n_samples = 5000 + xn = np.array([np.random.randn(n_samples) + (n) for n in range( + 0, n_dimensions)]) + + # Default + gkde = stats.gaussian_kde(xn) + + logpdf = gkde.logpdf(np.arange(0, n_dimensions)) + np.testing.assert_equal(np.isneginf(logpdf[0]), False) + np.testing.assert_equal(np.isnan(logpdf[0]), False) + + +def test_weights_intact(): + # regression test for gh-9709: weights are not modified + np.random.seed(12345) + vals = np.random.lognormal(size=100) + weights = np.random.choice([1.0, 10.0, 100], size=vals.size) + orig_weights = weights.copy() + + stats.gaussian_kde(np.log10(vals), weights=weights) + assert_allclose(weights, orig_weights, atol=1e-14, rtol=1e-14) + + +def test_weights_integer(): + # integer weights are OK, cf gh-9709 (comment) + np.random.seed(12345) + values = [0.2, 13.5, 21.0, 75.0, 99.0] + weights = [1, 2, 4, 8, 16] # a list of integers + pdf_i = stats.gaussian_kde(values, weights=weights) + pdf_f = stats.gaussian_kde(values, weights=np.float64(weights)) + + xn = [0.3, 11, 88] + assert_allclose(pdf_i.evaluate(xn), + pdf_f.evaluate(xn), atol=1e-14, rtol=1e-14) + + +def test_seed(): + # Test the seed option of the resample method + def test_seed_sub(gkde_trail): + n_sample = 200 + # The results should be different without using seed + samp1 = gkde_trail.resample(n_sample) + samp2 = gkde_trail.resample(n_sample) + assert_raises( + AssertionError, assert_allclose, samp1, samp2, atol=1e-13 + ) + # Use integer seed + seed = 831 + samp1 = gkde_trail.resample(n_sample, seed=seed) + samp2 = gkde_trail.resample(n_sample, seed=seed) + assert_allclose(samp1, samp2, atol=1e-13) + # Use RandomState + rstate1 = np.random.RandomState(seed=138) + samp1 = gkde_trail.resample(n_sample, seed=rstate1) + rstate2 = np.random.RandomState(seed=138) + samp2 = gkde_trail.resample(n_sample, seed=rstate2) + assert_allclose(samp1, samp2, atol=1e-13) + + # check that np.random.Generator can be used (numpy >= 1.17) + if hasattr(np.random, 'default_rng'): + # obtain a np.random.Generator object + rng = np.random.default_rng(1234) + gkde_trail.resample(n_sample, seed=rng) + + np.random.seed(8765678) + n_basesample = 500 + wn = np.random.rand(n_basesample) + # Test 1D case + xn_1d = np.random.randn(n_basesample) + + gkde_1d = stats.gaussian_kde(xn_1d) + test_seed_sub(gkde_1d) + gkde_1d_weighted = stats.gaussian_kde(xn_1d, weights=wn) + test_seed_sub(gkde_1d_weighted) + + # Test 2D case + mean = np.array([1.0, 3.0]) + covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) + xn_2d = np.random.multivariate_normal(mean, covariance, size=n_basesample).T + + gkde_2d = stats.gaussian_kde(xn_2d) + test_seed_sub(gkde_2d) + gkde_2d_weighted = stats.gaussian_kde(xn_2d, weights=wn) + test_seed_sub(gkde_2d_weighted) + + +def test_singular_data_covariance_gh10205(): + # When the data lie in a lower-dimensional subspace and this causes + # and exception, check that the error message is informative. + rng = np.random.default_rng(2321583144339784787) + mu = np.array([1, 10, 20]) + sigma = np.array([[4, 10, 0], [10, 25, 0], [0, 0, 100]]) + data = rng.multivariate_normal(mu, sigma, 1000) + try: # doesn't raise any error on some platforms, and that's OK + stats.gaussian_kde(data.T) + except linalg.LinAlgError: + msg = "The data appears to lie in a lower-dimensional subspace..." + with assert_raises(linalg.LinAlgError, match=msg): + stats.gaussian_kde(data.T) + + +def test_fewer_points_than_dimensions_gh17436(): + # When the number of points is fewer than the number of dimensions, the + # the covariance matrix would be singular, and the exception tested in + # test_singular_data_covariance_gh10205 would occur. However, sometimes + # this occurs when the user passes in the transpose of what `gaussian_kde` + # expects. This can result in a huge covariance matrix, so bail early. + rng = np.random.default_rng(2046127537594925772) + rvs = rng.multivariate_normal(np.zeros(3), np.eye(3), size=5) + message = "Number of dimensions is greater than number of samples..." + with pytest.raises(ValueError, match=message): + stats.gaussian_kde(rvs) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_morestats.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_morestats.py new file mode 100644 index 0000000000000000000000000000000000000000..5fa02fe4200000131ace132c4b685c325d3703f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_morestats.py @@ -0,0 +1,2970 @@ +# Author: Travis Oliphant, 2002 +# +# Further enhancements and tests added by numerous SciPy developers. +# +import warnings +import sys +from functools import partial + +import numpy as np +from numpy.random import RandomState +from numpy.testing import (assert_array_equal, assert_almost_equal, + assert_array_less, assert_array_almost_equal, + assert_, assert_allclose, assert_equal, + suppress_warnings) +import pytest +from pytest import raises as assert_raises +import re +from scipy import optimize, stats, special +from scipy.stats._morestats import _abw_state, _get_As_weibull, _Avals_weibull +from .common_tests import check_named_results +from .._hypotests import _get_wilcoxon_distr, _get_wilcoxon_distr2 +from scipy.stats._binomtest import _binary_search_for_binom_tst +from scipy.stats._distr_params import distcont + +distcont = dict(distcont) # type: ignore + +# Matplotlib is not a scipy dependency but is optionally used in probplot, so +# check if it's available +try: + import matplotlib + matplotlib.rcParams['backend'] = 'Agg' + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +# test data gear.dat from NIST for Levene and Bartlett test +# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm +g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000] +g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988] +g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996] +g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996] +g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996] +g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996] +g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002] +g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006] +g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] +g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] + + +# The loggamma RVS stream is changing due to gh-13349; this version +# preserves the old stream so that tests don't change. +def _old_loggamma_rvs(*args, **kwargs): + return np.log(stats.gamma.rvs(*args, **kwargs)) + + +class TestBayes_mvs: + def test_basic(self): + # Expected values in this test simply taken from the function. For + # some checks regarding correctness of implementation, see review in + # gh-674 + data = [6, 9, 12, 7, 8, 8, 13] + mean, var, std = stats.bayes_mvs(data) + assert_almost_equal(mean.statistic, 9.0) + assert_allclose(mean.minmax, (7.103650222492964, 10.896349777507034), + rtol=1e-6) + + assert_almost_equal(var.statistic, 10.0) + assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018), + rtol=1e-09) + + assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14) + assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312), + rtol=1e-14) + + def test_empty_input(self): + assert_raises(ValueError, stats.bayes_mvs, []) + + def test_result_attributes(self): + x = np.arange(15) + attributes = ('statistic', 'minmax') + res = stats.bayes_mvs(x) + + for i in res: + check_named_results(i, attributes) + + +class TestMvsdist: + def test_basic(self): + data = [6, 9, 12, 7, 8, 8, 13] + mean, var, std = stats.mvsdist(data) + assert_almost_equal(mean.mean(), 9.0) + assert_allclose(mean.interval(0.9), (7.103650222492964, + 10.896349777507034), rtol=1e-14) + + assert_almost_equal(var.mean(), 10.0) + assert_allclose(var.interval(0.9), (3.1767242068607087, + 24.45910381334018), rtol=1e-09) + + assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14) + assert_allclose(std.interval(0.9), (1.7823367265645145, + 4.9456146050146312), rtol=1e-14) + + def test_empty_input(self): + assert_raises(ValueError, stats.mvsdist, []) + + def test_bad_arg(self): + # Raise ValueError if fewer than two data points are given. + data = [1] + assert_raises(ValueError, stats.mvsdist, data) + + def test_warns(self): + # regression test for gh-5270 + # make sure there are no spurious divide-by-zero warnings + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + [x.mean() for x in stats.mvsdist([1, 2, 3])] + [x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])] + + +class TestShapiro: + def test_basic(self): + x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, + 4.43, 0.21, 4.75, 0.71, 1.52, 3.24, + 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66] + w, pw = stats.shapiro(x1) + shapiro_test = stats.shapiro(x1) + assert_almost_equal(w, 0.90047299861907959, decimal=6) + assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6) + assert_almost_equal(pw, 0.042089745402336121, decimal=6) + assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6) + + x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, + 3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69, + 0.08, 3.67, 2.81, 3.49] + w, pw = stats.shapiro(x2) + shapiro_test = stats.shapiro(x2) + assert_almost_equal(w, 0.9590270, decimal=6) + assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6) + assert_almost_equal(pw, 0.52460, decimal=3) + assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3) + + # Verified against R + x3 = stats.norm.rvs(loc=5, scale=3, size=100, random_state=12345678) + w, pw = stats.shapiro(x3) + shapiro_test = stats.shapiro(x3) + assert_almost_equal(w, 0.9772805571556091, decimal=6) + assert_almost_equal(shapiro_test.statistic, 0.9772805571556091, decimal=6) + assert_almost_equal(pw, 0.08144091814756393, decimal=3) + assert_almost_equal(shapiro_test.pvalue, 0.08144091814756393, decimal=3) + + # Extracted from original paper + x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614, + 0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206, + 3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351] + W_expected = 0.83467 + p_expected = 0.000914 + w, pw = stats.shapiro(x4) + shapiro_test = stats.shapiro(x4) + assert_almost_equal(w, W_expected, decimal=4) + assert_almost_equal(shapiro_test.statistic, W_expected, decimal=4) + assert_almost_equal(pw, p_expected, decimal=5) + assert_almost_equal(shapiro_test.pvalue, p_expected, decimal=5) + + def test_2d(self): + x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, + 4.43, 0.21, 4.75], [0.71, 1.52, 3.24, + 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]] + w, pw = stats.shapiro(x1) + shapiro_test = stats.shapiro(x1) + assert_almost_equal(w, 0.90047299861907959, decimal=6) + assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6) + assert_almost_equal(pw, 0.042089745402336121, decimal=6) + assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6) + + x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, + 3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69, + 0.08, 3.67, 2.81, 3.49]] + w, pw = stats.shapiro(x2) + shapiro_test = stats.shapiro(x2) + assert_almost_equal(w, 0.9590270, decimal=6) + assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6) + assert_almost_equal(pw, 0.52460, decimal=3) + assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3) + + def test_empty_input(self): + assert_raises(ValueError, stats.shapiro, []) + assert_raises(ValueError, stats.shapiro, [[], [], []]) + + def test_not_enough_values(self): + assert_raises(ValueError, stats.shapiro, [1, 2]) + assert_raises(ValueError, stats.shapiro, np.array([[], [2]], dtype=object)) + + def test_bad_arg(self): + # Length of x is less than 3. + x = [1] + assert_raises(ValueError, stats.shapiro, x) + + def test_nan_input(self): + x = np.arange(10.) + x[9] = np.nan + + w, pw = stats.shapiro(x) + shapiro_test = stats.shapiro(x) + assert_equal(w, np.nan) + assert_equal(shapiro_test.statistic, np.nan) + # Originally, shapiro returned a p-value of 1 in this case, + # but there is no way to produce a numerical p-value if the + # statistic is not a number. NaN is more appropriate. + assert_almost_equal(pw, np.nan) + assert_almost_equal(shapiro_test.pvalue, np.nan) + + def test_gh14462(self): + # shapiro is theoretically location-invariant, but when the magnitude + # of the values is much greater than the variance, there can be + # numerical issues. Fixed by subtracting median from the data. + # See gh-14462. + + trans_val, maxlog = stats.boxcox([122500, 474400, 110400]) + res = stats.shapiro(trans_val) + + # Reference from R: + # options(digits=16) + # x = c(0.00000000e+00, 3.39996924e-08, -6.35166875e-09) + # shapiro.test(x) + ref = (0.86468431705371, 0.2805581751566) + + assert_allclose(res, ref, rtol=1e-5) + + def test_length_3_gh18322(self): + # gh-18322 reported that the p-value could be negative for input of + # length 3. Check that this is resolved. + res = stats.shapiro([0.6931471805599453, 0.0, 0.0]) + assert res.pvalue >= 0 + + # R `shapiro.test` doesn't produce an accurate p-value in the case + # above. Check that the formula used in `stats.shapiro` is not wrong. + # options(digits=16) + # x = c(-0.7746653110021126, -0.4344432067942129, 1.8157053280290931) + # shapiro.test(x) + x = [-0.7746653110021126, -0.4344432067942129, 1.8157053280290931] + res = stats.shapiro(x) + assert_allclose(res.statistic, 0.84658770645509) + assert_allclose(res.pvalue, 0.2313666489882, rtol=1e-6) + + +class TestAnderson: + def test_normal(self): + rs = RandomState(1234567890) + x1 = rs.standard_exponential(size=50) + x2 = rs.standard_normal(size=50) + A, crit, sig = stats.anderson(x1) + assert_array_less(crit[:-1], A) + A, crit, sig = stats.anderson(x2) + assert_array_less(A, crit[-2:]) + + v = np.ones(10) + v[0] = 0 + A, crit, sig = stats.anderson(v) + # The expected statistic 3.208057 was computed independently of scipy. + # For example, in R: + # > library(nortest) + # > v <- rep(1, 10) + # > v[1] <- 0 + # > result <- ad.test(v) + # > result$statistic + # A + # 3.208057 + assert_allclose(A, 3.208057) + + def test_expon(self): + rs = RandomState(1234567890) + x1 = rs.standard_exponential(size=50) + x2 = rs.standard_normal(size=50) + A, crit, sig = stats.anderson(x1, 'expon') + assert_array_less(A, crit[-2:]) + with np.errstate(all='ignore'): + A, crit, sig = stats.anderson(x2, 'expon') + assert_(A > crit[-1]) + + def test_gumbel(self): + # Regression test for gh-6306. Before that issue was fixed, + # this case would return a2=inf. + v = np.ones(100) + v[0] = 0.0 + a2, crit, sig = stats.anderson(v, 'gumbel') + # A brief reimplementation of the calculation of the statistic. + n = len(v) + xbar, s = stats.gumbel_l.fit(v) + logcdf = stats.gumbel_l.logcdf(v, xbar, s) + logsf = stats.gumbel_l.logsf(v, xbar, s) + i = np.arange(1, n+1) + expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1])) + + assert_allclose(a2, expected_a2) + + def test_bad_arg(self): + assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp') + + def test_result_attributes(self): + rs = RandomState(1234567890) + x = rs.standard_exponential(size=50) + res = stats.anderson(x) + attributes = ('statistic', 'critical_values', 'significance_level') + check_named_results(res, attributes) + + def test_gumbel_l(self): + # gh-2592, gh-6337 + # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. + rs = RandomState(1234567890) + x = rs.gumbel(size=100) + A1, crit1, sig1 = stats.anderson(x, 'gumbel') + A2, crit2, sig2 = stats.anderson(x, 'gumbel_l') + + assert_allclose(A2, A1) + + def test_gumbel_r(self): + # gh-2592, gh-6337 + # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. + rs = RandomState(1234567890) + x1 = rs.gumbel(size=100) + x2 = np.ones(100) + # A constant array is a degenerate case and breaks gumbel_r.fit, so + # change one value in x2. + x2[0] = 0.996 + A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r') + A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r') + + assert_array_less(A1, crit1[-2:]) + assert_(A2 > crit2[-1]) + + def test_weibull_min_case_A(self): + # data and reference values from `anderson` reference [7] + x = np.array([225, 171, 198, 189, 189, 135, 162, 135, 117, 162]) + res = stats.anderson(x, 'weibull_min') + m, loc, scale = res.fit_result.params + assert_allclose((m, loc, scale), (2.38, 99.02, 78.23), rtol=2e-3) + assert_allclose(res.statistic, 0.260, rtol=1e-3) + assert res.statistic < res.critical_values[0] + + c = 1 / m # ~0.42 + assert_allclose(c, 1/2.38, rtol=2e-3) + # interpolate between rows for c=0.4 and c=0.45, indices -3 and -2 + As40 = _Avals_weibull[-3] + As45 = _Avals_weibull[-2] + As_ref = As40 + (c - 0.4)/(0.45 - 0.4) * (As45 - As40) + # atol=1e-3 because results are rounded up to the next third decimal + assert np.all(res.critical_values > As_ref) + assert_allclose(res.critical_values, As_ref, atol=1e-3) + + def test_weibull_min_case_B(self): + # From `anderson` reference [7] + x = np.array([74, 57, 48, 29, 502, 12, 70, 21, + 29, 386, 59, 27, 153, 26, 326]) + message = "Maximum likelihood estimation has converged to " + with pytest.raises(ValueError, match=message): + stats.anderson(x, 'weibull_min') + + def test_weibull_warning_error(self): + # Check for warning message when there are too few observations + # This is also an example in which an error occurs during fitting + x = -np.array([225, 75, 57, 168, 107, 12, 61, 43, 29]) + wmessage = "Critical values of the test statistic are given for the..." + emessage = "An error occurred while fitting the Weibull distribution..." + wcontext = pytest.warns(UserWarning, match=wmessage) + econtext = pytest.raises(ValueError, match=emessage) + with wcontext, econtext: + stats.anderson(x, 'weibull_min') + + @pytest.mark.parametrize('distname', + ['norm', 'expon', 'gumbel_l', 'extreme1', + 'gumbel', 'gumbel_r', 'logistic', 'weibull_min']) + def test_anderson_fit_params(self, distname): + # check that anderson now returns a FitResult + rng = np.random.default_rng(330691555377792039) + real_distname = ('gumbel_l' if distname in {'extreme1', 'gumbel'} + else distname) + dist = getattr(stats, real_distname) + params = distcont[real_distname] + x = dist.rvs(*params, size=1000, random_state=rng) + res = stats.anderson(x, distname) + assert res.fit_result.success + + def test_anderson_weibull_As(self): + m = 1 # "when mi < 2, so that c > 0.5, the last line...should be used" + assert_equal(_get_As_weibull(1/m), _Avals_weibull[-1]) + m = np.inf + assert_equal(_get_As_weibull(1/m), _Avals_weibull[0]) + + +class TestAndersonKSamp: + def test_example1a(self): + # Example data from Scholz & Stephens (1987), originally + # published in Lehmann (1995, Nonparametrics, Statistical + # Methods Based on Ranks, p. 309) + # Pass a mixture of lists and arrays + t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] + t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) + t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) + t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) + + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False) + + assert_almost_equal(Tk, 4.449, 3) + assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], + tm[0:5], 4) + assert_allclose(p, 0.0021, atol=0.00025) + + def test_example1b(self): + # Example data from Scholz & Stephens (1987), originally + # published in Lehmann (1995, Nonparametrics, Statistical + # Methods Based on Ranks, p. 309) + # Pass arrays + t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]) + t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) + t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) + t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True) + + assert_almost_equal(Tk, 4.480, 3) + assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], + tm[0:5], 4) + assert_allclose(p, 0.0020, atol=0.00025) + + @pytest.mark.slow + def test_example2a(self): + # Example data taken from an earlier technical report of + # Scholz and Stephens + # Pass lists instead of arrays + t1 = [194, 15, 41, 29, 33, 181] + t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] + t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] + t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, + 118, 25, 156, 310, 76, 26, 44, 23, 62] + t5 = [130, 208, 70, 101, 208] + t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] + t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] + t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, + 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] + t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, + 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] + t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, + 22, 139, 210, 97, 30, 23, 13, 14] + t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] + t12 = [50, 254, 5, 283, 35, 12] + t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] + t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, + 61, 34] + + samples = (t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14) + Tk, tm, p = stats.anderson_ksamp(samples, midrank=False) + assert_almost_equal(Tk, 3.288, 3) + assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], + tm[0:5], 4) + assert_allclose(p, 0.0041, atol=0.00025) + + rng = np.random.default_rng(6989860141921615054) + method = stats.PermutationMethod(n_resamples=9999, random_state=rng) + res = stats.anderson_ksamp(samples, midrank=False, method=method) + assert_array_equal(res.statistic, Tk) + assert_array_equal(res.critical_values, tm) + assert_allclose(res.pvalue, p, atol=6e-4) + + def test_example2b(self): + # Example data taken from an earlier technical report of + # Scholz and Stephens + t1 = [194, 15, 41, 29, 33, 181] + t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] + t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] + t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, + 118, 25, 156, 310, 76, 26, 44, 23, 62] + t5 = [130, 208, 70, 101, 208] + t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] + t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] + t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, + 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] + t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, + 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] + t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, + 22, 139, 210, 97, 30, 23, 13, 14] + t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] + t12 = [50, 254, 5, 283, 35, 12] + t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] + t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, + 61, 34] + + Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, + t9, t10, t11, t12, t13, t14), + midrank=True) + + assert_almost_equal(Tk, 3.294, 3) + assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], + tm[0:5], 4) + assert_allclose(p, 0.0041, atol=0.00025) + + def test_R_kSamples(self): + # test values generates with R package kSamples + # package version 1.2-6 (2017-06-14) + # r1 = 1:100 + # continuous case (no ties) --> version 1 + # res <- kSamples::ad.test(r1, r1 + 40.5) + # res$ad[1, "T.AD"] # 41.105 + # res$ad[1, " asympt. P-value"] # 5.8399e-18 + # + # discrete case (ties allowed) --> version 2 (here: midrank=True) + # res$ad[2, "T.AD"] # 41.235 + # + # res <- kSamples::ad.test(r1, r1 + .5) + # res$ad[1, "T.AD"] # -1.2824 + # res$ad[1, " asympt. P-value"] # 1 + # res$ad[2, "T.AD"] # -1.2944 + # + # res <- kSamples::ad.test(r1, r1 + 7.5) + # res$ad[1, "T.AD"] # 1.4923 + # res$ad[1, " asympt. P-value"] # 0.077501 + # + # res <- kSamples::ad.test(r1, r1 + 6) + # res$ad[2, "T.AD"] # 0.63892 + # res$ad[2, " asympt. P-value"] # 0.17981 + # + # res <- kSamples::ad.test(r1, r1 + 11.5) + # res$ad[1, "T.AD"] # 4.5042 + # res$ad[1, " asympt. P-value"] # 0.00545 + # + # res <- kSamples::ad.test(r1, r1 + 13.5) + # res$ad[1, "T.AD"] # 6.2982 + # res$ad[1, " asympt. P-value"] # 0.00118 + + x1 = np.linspace(1, 100, 100) + # test case: different distributions;p-value floored at 0.001 + # test case for issue #5493 / #8536 + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value floored') + s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False) + assert_almost_equal(s, 41.105, 3) + assert_equal(p, 0.001) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value floored') + s, _, p = stats.anderson_ksamp([x1, x1 + 40.5]) + assert_almost_equal(s, 41.235, 3) + assert_equal(p, 0.001) + + # test case: similar distributions --> p-value capped at 0.25 + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value capped') + s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False) + assert_almost_equal(s, -1.2824, 4) + assert_equal(p, 0.25) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message='p-value capped') + s, _, p = stats.anderson_ksamp([x1, x1 + .5]) + assert_almost_equal(s, -1.2944, 4) + assert_equal(p, 0.25) + + # test case: check interpolated p-value in [0.01, 0.25] (no ties) + s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False) + assert_almost_equal(s, 1.4923, 4) + assert_allclose(p, 0.0775, atol=0.005, rtol=0) + + # test case: check interpolated p-value in [0.01, 0.25] (w/ ties) + s, _, p = stats.anderson_ksamp([x1, x1 + 6]) + assert_almost_equal(s, 0.6389, 4) + assert_allclose(p, 0.1798, atol=0.005, rtol=0) + + # test extended critical values for p=0.001 and p=0.005 + s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False) + assert_almost_equal(s, 4.5042, 4) + assert_allclose(p, 0.00545, atol=0.0005, rtol=0) + + s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False) + assert_almost_equal(s, 6.2982, 4) + assert_allclose(p, 0.00118, atol=0.0001, rtol=0) + + def test_not_enough_samples(self): + assert_raises(ValueError, stats.anderson_ksamp, np.ones(5)) + + def test_no_distinct_observations(self): + assert_raises(ValueError, stats.anderson_ksamp, + (np.ones(5), np.ones(5))) + + def test_empty_sample(self): + assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), [])) + + def test_result_attributes(self): + # Pass a mixture of lists and arrays + t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] + t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) + res = stats.anderson_ksamp((t1, t2), midrank=False) + + attributes = ('statistic', 'critical_values', 'significance_level') + check_named_results(res, attributes) + + assert_equal(res.significance_level, res.pvalue) + + +class TestAnsari: + + def test_small(self): + x = [1, 2, 3, 3, 4] + y = [3, 2, 6, 1, 6, 1, 4, 1] + with suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic.") + W, pval = stats.ansari(x, y) + assert_almost_equal(W, 23.5, 11) + assert_almost_equal(pval, 0.13499256881897437, 11) + + def test_approx(self): + ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99, + 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)) + parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, + 100, 96, 108, 103, 104, 114, 114, 113, 108, + 106, 99)) + + with suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic.") + W, pval = stats.ansari(ramsay, parekh) + + assert_almost_equal(W, 185.5, 11) + assert_almost_equal(pval, 0.18145819972867083, 11) + + def test_exact(self): + W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12]) + assert_almost_equal(W, 10.0, 11) + assert_almost_equal(pval, 0.533333333333333333, 7) + + def test_bad_arg(self): + assert_raises(ValueError, stats.ansari, [], [1]) + assert_raises(ValueError, stats.ansari, [1], []) + + def test_result_attributes(self): + x = [1, 2, 3, 3, 4] + y = [3, 2, 6, 1, 6, 1, 4, 1] + with suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic.") + res = stats.ansari(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_bad_alternative(self): + # invalid value for alternative must raise a ValueError + x1 = [1, 2, 3, 4] + x2 = [5, 6, 7, 8] + match = "'alternative' must be 'two-sided'" + with assert_raises(ValueError, match=match): + stats.ansari(x1, x2, alternative='foo') + + def test_alternative_exact(self): + x1 = [-5, 1, 5, 10, 15, 20, 25] # high scale, loc=10 + x2 = [7.5, 8.5, 9.5, 10.5, 11.5, 12.5] # low scale, loc=10 + # ratio of scales is greater than 1. So, the + # p-value must be high when `alternative='less'` + # and low when `alternative='greater'`. + statistic, pval = stats.ansari(x1, x2) + pval_l = stats.ansari(x1, x2, alternative='less').pvalue + pval_g = stats.ansari(x1, x2, alternative='greater').pvalue + assert pval_l > 0.95 + assert pval_g < 0.05 # level of significance. + # also check if the p-values sum up to 1 plus the probability + # mass under the calculated statistic. + prob = _abw_state.pmf(statistic, len(x1), len(x2)) + assert_allclose(pval_g + pval_l, 1 + prob, atol=1e-12) + # also check if one of the one-sided p-value equals half the + # two-sided p-value and the other one-sided p-value is its + # compliment. + assert_allclose(pval_g, pval/2, atol=1e-12) + assert_allclose(pval_l, 1+prob-pval/2, atol=1e-12) + # sanity check. The result should flip if + # we exchange x and y. + pval_l_reverse = stats.ansari(x2, x1, alternative='less').pvalue + pval_g_reverse = stats.ansari(x2, x1, alternative='greater').pvalue + assert pval_l_reverse < 0.05 + assert pval_g_reverse > 0.95 + + @pytest.mark.parametrize( + 'x, y, alternative, expected', + # the tests are designed in such a way that the + # if else statement in ansari test for exact + # mode is covered. + [([1, 2, 3, 4], [5, 6, 7, 8], 'less', 0.6285714285714), + ([1, 2, 3, 4], [5, 6, 7, 8], 'greater', 0.6285714285714), + ([1, 2, 3], [4, 5, 6, 7, 8], 'less', 0.8928571428571), + ([1, 2, 3], [4, 5, 6, 7, 8], 'greater', 0.2857142857143), + ([1, 2, 3, 4, 5], [6, 7, 8], 'less', 0.2857142857143), + ([1, 2, 3, 4, 5], [6, 7, 8], 'greater', 0.8928571428571)] + ) + def test_alternative_exact_with_R(self, x, y, alternative, expected): + # testing with R on arbitrary data + # Sample R code used for the third test case above: + # ```R + # > options(digits=16) + # > x <- c(1,2,3) + # > y <- c(4,5,6,7,8) + # > ansari.test(x, y, alternative='less', exact=TRUE) + # + # Ansari-Bradley test + # + # data: x and y + # AB = 6, p-value = 0.8928571428571 + # alternative hypothesis: true ratio of scales is less than 1 + # + # ``` + pval = stats.ansari(x, y, alternative=alternative).pvalue + assert_allclose(pval, expected, atol=1e-12) + + def test_alternative_approx(self): + # intuitive tests for approximation + x1 = stats.norm.rvs(0, 5, size=100, random_state=123) + x2 = stats.norm.rvs(0, 2, size=100, random_state=123) + # for m > 55 or n > 55, the test should automatically + # switch to approximation. + pval_l = stats.ansari(x1, x2, alternative='less').pvalue + pval_g = stats.ansari(x1, x2, alternative='greater').pvalue + assert_allclose(pval_l, 1.0, atol=1e-12) + assert_allclose(pval_g, 0.0, atol=1e-12) + # also check if one of the one-sided p-value equals half the + # two-sided p-value and the other one-sided p-value is its + # compliment. + x1 = stats.norm.rvs(0, 2, size=60, random_state=123) + x2 = stats.norm.rvs(0, 1.5, size=60, random_state=123) + pval = stats.ansari(x1, x2).pvalue + pval_l = stats.ansari(x1, x2, alternative='less').pvalue + pval_g = stats.ansari(x1, x2, alternative='greater').pvalue + assert_allclose(pval_g, pval/2, atol=1e-12) + assert_allclose(pval_l, 1-pval/2, atol=1e-12) + + +class TestBartlett: + + def test_data(self): + # https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + T, pval = stats.bartlett(*args) + assert_almost_equal(T, 20.78587342806484, 7) + assert_almost_equal(pval, 0.0136358632781, 7) + + def test_bad_arg(self): + # Too few args raises ValueError. + assert_raises(ValueError, stats.bartlett, [1]) + + def test_result_attributes(self): + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + res = stats.bartlett(*args) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_empty_arg(self): + args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, []) + assert_equal((np.nan, np.nan), stats.bartlett(*args)) + + # temporary fix for issue #9252: only accept 1d input + def test_1d_input(self): + x = np.array([[1, 2], [3, 4]]) + assert_raises(ValueError, stats.bartlett, g1, x) + + +class TestLevene: + + def test_data(self): + # https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + W, pval = stats.levene(*args) + assert_almost_equal(W, 1.7059176930008939, 7) + assert_almost_equal(pval, 0.0990829755522, 7) + + def test_trimmed1(self): + # Test that center='trimmed' gives the same result as center='mean' + # when proportiontocut=0. + W1, pval1 = stats.levene(g1, g2, g3, center='mean') + W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', + proportiontocut=0.0) + assert_almost_equal(W1, W2) + assert_almost_equal(pval1, pval2) + + def test_trimmed2(self): + x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] + y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] + np.random.seed(1234) + x2 = np.random.permutation(x) + + # Use center='trimmed' + W0, pval0 = stats.levene(x, y, center='trimmed', + proportiontocut=0.125) + W1, pval1 = stats.levene(x2, y, center='trimmed', + proportiontocut=0.125) + # Trim the data here, and use center='mean' + W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean') + # Result should be the same. + assert_almost_equal(W0, W2) + assert_almost_equal(W1, W2) + assert_almost_equal(pval1, pval2) + + def test_equal_mean_median(self): + x = np.linspace(-1, 1, 21) + np.random.seed(1234) + x2 = np.random.permutation(x) + y = x**3 + W1, pval1 = stats.levene(x, y, center='mean') + W2, pval2 = stats.levene(x2, y, center='median') + assert_almost_equal(W1, W2) + assert_almost_equal(pval1, pval2) + + def test_bad_keyword(self): + x = np.linspace(-1, 1, 21) + assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1) + + def test_bad_center_value(self): + x = np.linspace(-1, 1, 21) + assert_raises(ValueError, stats.levene, x, x, center='trim') + + def test_too_few_args(self): + assert_raises(ValueError, stats.levene, [1]) + + def test_result_attributes(self): + args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] + res = stats.levene(*args) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + # temporary fix for issue #9252: only accept 1d input + def test_1d_input(self): + x = np.array([[1, 2], [3, 4]]) + assert_raises(ValueError, stats.levene, g1, x) + + +class TestBinomTest: + """Tests for stats.binomtest.""" + + # Expected results here are from R binom.test, e.g. + # options(digits=16) + # binom.test(484, 967, p=0.48) + # + def test_two_sided_pvalues1(self): + # `tol` could be stricter on most architectures, but the value + # here is limited by accuracy of `binom.cdf` for large inputs on + # Linux_Python_37_32bit_full and aarch64 + rtol = 1e-10 # aarch64 observed rtol: 1.5e-11 + res = stats.binomtest(10079999, 21000000, 0.48) + assert_allclose(res.pvalue, 1.0, rtol=rtol) + res = stats.binomtest(10079990, 21000000, 0.48) + assert_allclose(res.pvalue, 0.9966892187965, rtol=rtol) + res = stats.binomtest(10080009, 21000000, 0.48) + assert_allclose(res.pvalue, 0.9970377203856, rtol=rtol) + res = stats.binomtest(10080017, 21000000, 0.48) + assert_allclose(res.pvalue, 0.9940754817328, rtol=1e-9) + + def test_two_sided_pvalues2(self): + rtol = 1e-10 # no aarch64 failure with 1e-15, preemptive bump + res = stats.binomtest(9, n=21, p=0.48) + assert_allclose(res.pvalue, 0.6689672431939, rtol=rtol) + res = stats.binomtest(4, 21, 0.48) + assert_allclose(res.pvalue, 0.008139563452106, rtol=rtol) + res = stats.binomtest(11, 21, 0.48) + assert_allclose(res.pvalue, 0.8278629664608, rtol=rtol) + res = stats.binomtest(7, 21, 0.48) + assert_allclose(res.pvalue, 0.1966772901718, rtol=rtol) + res = stats.binomtest(3, 10, .5) + assert_allclose(res.pvalue, 0.34375, rtol=rtol) + res = stats.binomtest(2, 2, .4) + assert_allclose(res.pvalue, 0.16, rtol=rtol) + res = stats.binomtest(2, 4, .3) + assert_allclose(res.pvalue, 0.5884, rtol=rtol) + + def test_edge_cases(self): + rtol = 1e-10 # aarch64 observed rtol: 1.33e-15 + res = stats.binomtest(484, 967, 0.5) + assert_allclose(res.pvalue, 1, rtol=rtol) + res = stats.binomtest(3, 47, 3/47) + assert_allclose(res.pvalue, 1, rtol=rtol) + res = stats.binomtest(13, 46, 13/46) + assert_allclose(res.pvalue, 1, rtol=rtol) + res = stats.binomtest(15, 44, 15/44) + assert_allclose(res.pvalue, 1, rtol=rtol) + res = stats.binomtest(7, 13, 0.5) + assert_allclose(res.pvalue, 1, rtol=rtol) + res = stats.binomtest(6, 11, 0.5) + assert_allclose(res.pvalue, 1, rtol=rtol) + + def test_binary_srch_for_binom_tst(self): + # Test that old behavior of binomtest is maintained + # by the new binary search method in cases where d + # exactly equals the input on one side. + n = 10 + p = 0.5 + k = 3 + # First test for the case where k > mode of PMF + i = np.arange(np.ceil(p * n), n+1) + d = stats.binom.pmf(k, n, p) + # Old way of calculating y, probably consistent with R. + y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0) + # New way with binary search. + ix = _binary_search_for_binom_tst(lambda x1: + -stats.binom.pmf(x1, n, p), + -d, np.ceil(p * n), n) + y2 = n - ix + int(d == stats.binom.pmf(ix, n, p)) + assert_allclose(y1, y2, rtol=1e-9) + # Now test for the other side. + k = 7 + i = np.arange(np.floor(p * n) + 1) + d = stats.binom.pmf(k, n, p) + # Old way of calculating y. + y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0) + # New way with binary search. + ix = _binary_search_for_binom_tst(lambda x1: + stats.binom.pmf(x1, n, p), + d, 0, np.floor(p * n)) + y2 = ix + 1 + assert_allclose(y1, y2, rtol=1e-9) + + # Expected results here are from R 3.6.2 binom.test + @pytest.mark.parametrize('alternative, pval, ci_low, ci_high', + [('less', 0.148831050443, + 0.0, 0.2772002496709138), + ('greater', 0.9004695898947, + 0.1366613252458672, 1.0), + ('two-sided', 0.2983720970096, + 0.1266555521019559, 0.2918426890886281)]) + def test_confidence_intervals1(self, alternative, pval, ci_low, ci_high): + res = stats.binomtest(20, n=100, p=0.25, alternative=alternative) + assert_allclose(res.pvalue, pval, rtol=1e-12) + assert_equal(res.statistic, 0.2) + ci = res.proportion_ci(confidence_level=0.95) + assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-12) + + # Expected results here are from R 3.6.2 binom.test. + @pytest.mark.parametrize('alternative, pval, ci_low, ci_high', + [('less', + 0.005656361, 0.0, 0.1872093), + ('greater', + 0.9987146, 0.008860761, 1.0), + ('two-sided', + 0.01191714, 0.006872485, 0.202706269)]) + def test_confidence_intervals2(self, alternative, pval, ci_low, ci_high): + res = stats.binomtest(3, n=50, p=0.2, alternative=alternative) + assert_allclose(res.pvalue, pval, rtol=1e-6) + assert_equal(res.statistic, 0.06) + ci = res.proportion_ci(confidence_level=0.99) + assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6) + + # Expected results here are from R 3.6.2 binom.test. + @pytest.mark.parametrize('alternative, pval, ci_high', + [('less', 0.05631351, 0.2588656), + ('greater', 1.0, 1.0), + ('two-sided', 0.07604122, 0.3084971)]) + def test_confidence_interval_exact_k0(self, alternative, pval, ci_high): + # Test with k=0, n = 10. + res = stats.binomtest(0, 10, p=0.25, alternative=alternative) + assert_allclose(res.pvalue, pval, rtol=1e-6) + ci = res.proportion_ci(confidence_level=0.95) + assert_equal(ci.low, 0.0) + assert_allclose(ci.high, ci_high, rtol=1e-6) + + # Expected results here are from R 3.6.2 binom.test. + @pytest.mark.parametrize('alternative, pval, ci_low', + [('less', 1.0, 0.0), + ('greater', 9.536743e-07, 0.7411344), + ('two-sided', 9.536743e-07, 0.6915029)]) + def test_confidence_interval_exact_k_is_n(self, alternative, pval, ci_low): + # Test with k = n = 10. + res = stats.binomtest(10, 10, p=0.25, alternative=alternative) + assert_allclose(res.pvalue, pval, rtol=1e-6) + ci = res.proportion_ci(confidence_level=0.95) + assert_equal(ci.high, 1.0) + assert_allclose(ci.low, ci_low, rtol=1e-6) + + # Expected results are from the prop.test function in R 3.6.2. + @pytest.mark.parametrize( + 'k, alternative, corr, conf, ci_low, ci_high', + [[3, 'two-sided', True, 0.95, 0.08094782, 0.64632928], + [3, 'two-sided', True, 0.99, 0.0586329, 0.7169416], + [3, 'two-sided', False, 0.95, 0.1077913, 0.6032219], + [3, 'two-sided', False, 0.99, 0.07956632, 0.6799753], + [3, 'less', True, 0.95, 0.0, 0.6043476], + [3, 'less', True, 0.99, 0.0, 0.6901811], + [3, 'less', False, 0.95, 0.0, 0.5583002], + [3, 'less', False, 0.99, 0.0, 0.6507187], + [3, 'greater', True, 0.95, 0.09644904, 1.0], + [3, 'greater', True, 0.99, 0.06659141, 1.0], + [3, 'greater', False, 0.95, 0.1268766, 1.0], + [3, 'greater', False, 0.99, 0.08974147, 1.0], + + [0, 'two-sided', True, 0.95, 0.0, 0.3445372], + [0, 'two-sided', False, 0.95, 0.0, 0.2775328], + [0, 'less', True, 0.95, 0.0, 0.2847374], + [0, 'less', False, 0.95, 0.0, 0.212942], + [0, 'greater', True, 0.95, 0.0, 1.0], + [0, 'greater', False, 0.95, 0.0, 1.0], + + [10, 'two-sided', True, 0.95, 0.6554628, 1.0], + [10, 'two-sided', False, 0.95, 0.7224672, 1.0], + [10, 'less', True, 0.95, 0.0, 1.0], + [10, 'less', False, 0.95, 0.0, 1.0], + [10, 'greater', True, 0.95, 0.7152626, 1.0], + [10, 'greater', False, 0.95, 0.787058, 1.0]] + ) + def test_ci_wilson_method(self, k, alternative, corr, conf, + ci_low, ci_high): + res = stats.binomtest(k, n=10, p=0.1, alternative=alternative) + if corr: + method = 'wilsoncc' + else: + method = 'wilson' + ci = res.proportion_ci(confidence_level=conf, method=method) + assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6) + + def test_estimate_equals_hypothesized_prop(self): + # Test the special case where the estimated proportion equals + # the hypothesized proportion. When alternative is 'two-sided', + # the p-value is 1. + res = stats.binomtest(4, 16, 0.25) + assert_equal(res.statistic, 0.25) + assert_equal(res.pvalue, 1.0) + + @pytest.mark.parametrize('k, n', [(0, 0), (-1, 2)]) + def test_invalid_k_n(self, k, n): + with pytest.raises(ValueError, + match="must be an integer not less than"): + stats.binomtest(k, n) + + def test_invalid_k_too_big(self): + with pytest.raises(ValueError, + match=r"k \(11\) must not be greater than n \(10\)."): + stats.binomtest(11, 10, 0.25) + + def test_invalid_k_wrong_type(self): + with pytest.raises(TypeError, + match="k must be an integer."): + stats.binomtest([10, 11], 21, 0.25) + + def test_invalid_p_range(self): + message = r'p \(-0.5\) must be in range...' + with pytest.raises(ValueError, match=message): + stats.binomtest(50, 150, p=-0.5) + message = r'p \(1.5\) must be in range...' + with pytest.raises(ValueError, match=message): + stats.binomtest(50, 150, p=1.5) + + def test_invalid_confidence_level(self): + res = stats.binomtest(3, n=10, p=0.1) + message = r"confidence_level \(-1\) must be in the interval" + with pytest.raises(ValueError, match=message): + res.proportion_ci(confidence_level=-1) + + def test_invalid_ci_method(self): + res = stats.binomtest(3, n=10, p=0.1) + with pytest.raises(ValueError, match=r"method \('plate of shrimp'\) must be"): + res.proportion_ci(method="plate of shrimp") + + def test_invalid_alternative(self): + with pytest.raises(ValueError, match=r"alternative \('ekki'\) not..."): + stats.binomtest(3, n=10, p=0.1, alternative='ekki') + + def test_alias(self): + res = stats.binomtest(3, n=10, p=0.1) + assert_equal(res.proportion_estimate, res.statistic) + + @pytest.mark.skipif(sys.maxsize <= 2**32, reason="32-bit does not overflow") + def test_boost_overflow_raises(self): + # Boost.Math error policy should raise exceptions in Python + with pytest.raises(OverflowError, match='Error in function...'): + stats.binomtest(5, 6, p=sys.float_info.min) + + +class TestFligner: + + def test_data(self): + # numbers from R: fligner.test in package stats + x1 = np.arange(5) + assert_array_almost_equal(stats.fligner(x1, x1**2), + (3.2282229927203536, 0.072379187848207877), + 11) + + def test_trimmed1(self): + # Perturb input to break ties in the transformed data + # See https://github.com/scipy/scipy/pull/8042 for more details + rs = np.random.RandomState(123) + + def _perturb(g): + return (np.asarray(g) + 1e-10 * rs.randn(len(g))).tolist() + + g1_ = _perturb(g1) + g2_ = _perturb(g2) + g3_ = _perturb(g3) + # Test that center='trimmed' gives the same result as center='mean' + # when proportiontocut=0. + Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean') + Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed', + proportiontocut=0.0) + assert_almost_equal(Xsq1, Xsq2) + assert_almost_equal(pval1, pval2) + + def test_trimmed2(self): + x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] + y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] + # Use center='trimmed' + Xsq1, pval1 = stats.fligner(x, y, center='trimmed', + proportiontocut=0.125) + # Trim the data here, and use center='mean' + Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean') + # Result should be the same. + assert_almost_equal(Xsq1, Xsq2) + assert_almost_equal(pval1, pval2) + + # The following test looks reasonable at first, but fligner() uses the + # function stats.rankdata(), and in one of the cases in this test, + # there are ties, while in the other (because of normal rounding + # errors) there are not. This difference leads to differences in the + # third significant digit of W. + # + #def test_equal_mean_median(self): + # x = np.linspace(-1,1,21) + # y = x**3 + # W1, pval1 = stats.fligner(x, y, center='mean') + # W2, pval2 = stats.fligner(x, y, center='median') + # assert_almost_equal(W1, W2) + # assert_almost_equal(pval1, pval2) + + def test_bad_keyword(self): + x = np.linspace(-1, 1, 21) + assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1) + + def test_bad_center_value(self): + x = np.linspace(-1, 1, 21) + assert_raises(ValueError, stats.fligner, x, x, center='trim') + + def test_bad_num_args(self): + # Too few args raises ValueError. + assert_raises(ValueError, stats.fligner, [1]) + + def test_empty_arg(self): + x = np.arange(5) + assert_equal((np.nan, np.nan), stats.fligner(x, x**2, [])) + + +def mood_cases_with_ties(): + # Generate random `x` and `y` arrays with ties both between and within the + # samples. Expected results are (statistic, pvalue) from SAS. + expected_results = [(-1.76658511464992, .0386488678399305), + (-.694031428192304, .2438312498647250), + (-1.15093525352151, .1248794365836150)] + seeds = [23453254, 1298352315, 987234597] + for si, seed in enumerate(seeds): + rng = np.random.default_rng(seed) + xy = rng.random(100) + # Generate random indices to make ties + tie_ind = rng.integers(low=0, high=99, size=5) + # Generate a random number of ties for each index. + num_ties_per_ind = rng.integers(low=1, high=5, size=5) + # At each `tie_ind`, mark the next `n` indices equal to that value. + for i, n in zip(tie_ind, num_ties_per_ind): + for j in range(i + 1, i + n): + xy[j] = xy[i] + # scramble order of xy before splitting into `x, y` + rng.shuffle(xy) + x, y = np.split(xy, 2) + yield x, y, 'less', *expected_results[si] + + +class TestMood: + @pytest.mark.parametrize("x,y,alternative,stat_expect,p_expect", + mood_cases_with_ties()) + def test_against_SAS(self, x, y, alternative, stat_expect, p_expect): + """ + Example code used to generate SAS output: + DATA myData; + INPUT X Y; + CARDS; + 1 0 + 1 1 + 1 2 + 1 3 + 1 4 + 2 0 + 2 1 + 2 4 + 2 9 + 2 16 + ods graphics on; + proc npar1way mood data=myData ; + class X; + ods output MoodTest=mt; + proc contents data=mt; + proc print data=mt; + format Prob1 17.16 Prob2 17.16 Statistic 17.16 Z 17.16 ; + title "Mood Two-Sample Test"; + proc print data=myData; + title "Data for above results"; + run; + """ + statistic, pvalue = stats.mood(x, y, alternative=alternative) + assert_allclose(stat_expect, statistic, atol=1e-16) + assert_allclose(p_expect, pvalue, atol=1e-16) + + @pytest.mark.parametrize("alternative, expected", + [('two-sided', (1.019938533549930, + .3077576129778760)), + ('less', (1.019938533549930, + 1 - .1538788064889380)), + ('greater', (1.019938533549930, + .1538788064889380))]) + def test_against_SAS_2(self, alternative, expected): + # Code to run in SAS in above function + x = [111, 107, 100, 99, 102, 106, 109, 108, 104, 99, + 101, 96, 97, 102, 107, 113, 116, 113, 110, 98] + y = [107, 108, 106, 98, 105, 103, 110, 105, 104, 100, + 96, 108, 103, 104, 114, 114, 113, 108, 106, 99] + res = stats.mood(x, y, alternative=alternative) + assert_allclose(res, expected) + + def test_mood_order_of_args(self): + # z should change sign when the order of arguments changes, pvalue + # should not change + np.random.seed(1234) + x1 = np.random.randn(10, 1) + x2 = np.random.randn(15, 1) + z1, p1 = stats.mood(x1, x2) + z2, p2 = stats.mood(x2, x1) + assert_array_almost_equal([z1, p1], [-z2, p2]) + + def test_mood_with_axis_none(self): + # Test with axis = None, compare with results from R + x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047, + 1.59528080213779, 0.329507771815361, -0.820468384118015, + 0.487429052428485, 0.738324705129217, 0.575781351653492, + -0.305388387156356, 1.51178116845085, 0.389843236411431, + -0.621240580541804, -2.2146998871775, 1.12493091814311, + -0.0449336090152309, -0.0161902630989461, 0.943836210685299, + 0.821221195098089, 0.593901321217509] + + x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882, + -1.13037567424629, -0.0802517565509893, 0.132420284381094, + 0.707954729271733, -0.23969802417184, 1.98447393665293, + -0.138787012119665, 0.417650750792556, 0.981752777463662, + -0.392695355503813, -1.03966897694891, 1.78222896030858, + -2.31106908460517, 0.878604580921265, 0.035806718015226, + 1.01282869212708, 0.432265154539617, 2.09081920524915, + -1.19992581964387, 1.58963820029007, 1.95465164222325, + 0.00493777682814261, -2.45170638784613, 0.477237302613617, + -0.596558168631403, 0.792203270299649, 0.289636710177348] + + x1 = np.array(x1) + x2 = np.array(x2) + x1.shape = (10, 2) + x2.shape = (15, 2) + assert_array_almost_equal(stats.mood(x1, x2, axis=None), + [-1.31716607555, 0.18778296257]) + + def test_mood_2d(self): + # Test if the results of mood test in 2-D case are consistent with the + # R result for the same inputs. Numbers from R mood.test(). + ny = 5 + np.random.seed(1234) + x1 = np.random.randn(10, ny) + x2 = np.random.randn(15, ny) + z_vectest, pval_vectest = stats.mood(x1, x2) + + for j in range(ny): + assert_array_almost_equal([z_vectest[j], pval_vectest[j]], + stats.mood(x1[:, j], x2[:, j])) + + # inverse order of dimensions + x1 = x1.transpose() + x2 = x2.transpose() + z_vectest, pval_vectest = stats.mood(x1, x2, axis=1) + + for i in range(ny): + # check axis handling is self consistent + assert_array_almost_equal([z_vectest[i], pval_vectest[i]], + stats.mood(x1[i, :], x2[i, :])) + + def test_mood_3d(self): + shape = (10, 5, 6) + np.random.seed(1234) + x1 = np.random.randn(*shape) + x2 = np.random.randn(*shape) + + for axis in range(3): + z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis) + # Tests that result for 3-D arrays is equal to that for the + # same calculation on a set of 1-D arrays taken from the + # 3-D array + axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis + for i in range(shape[axes_idx[axis][0]]): + for j in range(shape[axes_idx[axis][1]]): + if axis == 0: + slice1 = x1[:, i, j] + slice2 = x2[:, i, j] + elif axis == 1: + slice1 = x1[i, :, j] + slice2 = x2[i, :, j] + else: + slice1 = x1[i, j, :] + slice2 = x2[i, j, :] + + assert_array_almost_equal([z_vectest[i, j], + pval_vectest[i, j]], + stats.mood(slice1, slice2)) + + def test_mood_bad_arg(self): + # Raise ValueError when the sum of the lengths of the args is + # less than 3 + assert_raises(ValueError, stats.mood, [1], []) + + def test_mood_alternative(self): + + np.random.seed(0) + x = stats.norm.rvs(scale=0.75, size=100) + y = stats.norm.rvs(scale=1.25, size=100) + + stat1, p1 = stats.mood(x, y, alternative='two-sided') + stat2, p2 = stats.mood(x, y, alternative='less') + stat3, p3 = stats.mood(x, y, alternative='greater') + + assert stat1 == stat2 == stat3 + assert_allclose(p1, 0, atol=1e-7) + assert_allclose(p2, p1/2) + assert_allclose(p3, 1 - p1/2) + + with pytest.raises(ValueError, match="`alternative` must be..."): + stats.mood(x, y, alternative='ekki-ekki') + + @pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater']) + def test_result(self, alternative): + rng = np.random.default_rng(265827767938813079281100964083953437622) + x1 = rng.standard_normal((10, 1)) + x2 = rng.standard_normal((15, 1)) + + res = stats.mood(x1, x2, alternative=alternative) + assert_equal((res.statistic, res.pvalue), res) + + +class TestProbplot: + + def test_basic(self): + x = stats.norm.rvs(size=20, random_state=12345) + osm, osr = stats.probplot(x, fit=False) + osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575, + -0.73908135, -0.5857176, -0.44506467, -0.31273668, + -0.18568928, -0.06158146, 0.06158146, 0.18568928, + 0.31273668, 0.44506467, 0.5857176, 0.73908135, + 0.91222575, 1.11829229, 1.38768012, 1.8241636] + assert_allclose(osr, np.sort(x)) + assert_allclose(osm, osm_expected) + + res, res_fit = stats.probplot(x, fit=True) + res_fit_expected = [1.05361841, 0.31297795, 0.98741609] + assert_allclose(res_fit, res_fit_expected) + + def test_sparams_keyword(self): + x = stats.norm.rvs(size=100, random_state=123456) + # Check that None, () and 0 (loc=0, for normal distribution) all work + # and give the same results + osm1, osr1 = stats.probplot(x, sparams=None, fit=False) + osm2, osr2 = stats.probplot(x, sparams=0, fit=False) + osm3, osr3 = stats.probplot(x, sparams=(), fit=False) + assert_allclose(osm1, osm2) + assert_allclose(osm1, osm3) + assert_allclose(osr1, osr2) + assert_allclose(osr1, osr3) + # Check giving (loc, scale) params for normal distribution + osm, osr = stats.probplot(x, sparams=(), fit=False) + + def test_dist_keyword(self): + x = stats.norm.rvs(size=20, random_state=12345) + osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,)) + osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,)) + assert_allclose(osm1, osm2) + assert_allclose(osr1, osr2) + + assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name') + assert_raises(AttributeError, stats.probplot, x, dist=[]) + + class custom_dist: + """Some class that looks just enough like a distribution.""" + def ppf(self, q): + return stats.norm.ppf(q, loc=2) + + osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False) + osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False) + assert_allclose(osm1, osm2) + assert_allclose(osr1, osr2) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_plot_kwarg(self): + fig = plt.figure() + fig.add_subplot(111) + x = stats.t.rvs(3, size=100, random_state=7654321) + res1, fitres1 = stats.probplot(x, plot=plt) + plt.close() + res2, fitres2 = stats.probplot(x, plot=None) + res3 = stats.probplot(x, fit=False, plot=plt) + plt.close() + res4 = stats.probplot(x, fit=False, plot=None) + # Check that results are consistent between combinations of `fit` and + # `plot` keywords. + assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2) + assert_allclose(res1, res2) + assert_allclose(res1, res3) + assert_allclose(res1, res4) + assert_allclose(fitres1, fitres2) + + # Check that a Matplotlib Axes object is accepted + fig = plt.figure() + ax = fig.add_subplot(111) + stats.probplot(x, fit=False, plot=ax) + plt.close() + + def test_probplot_bad_args(self): + # Raise ValueError when given an invalid distribution. + assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp") + + def test_empty(self): + assert_equal(stats.probplot([], fit=False), + (np.array([]), np.array([]))) + assert_equal(stats.probplot([], fit=True), + ((np.array([]), np.array([])), + (np.nan, np.nan, 0.0))) + + def test_array_of_size_one(self): + with np.errstate(invalid='ignore'): + assert_equal(stats.probplot([1], fit=True), + ((np.array([0.]), np.array([1])), + (np.nan, np.nan, 0.0))) + + +class TestWilcoxon: + def test_wilcoxon_bad_arg(self): + # Raise ValueError when two args of different lengths are given or + # zero_method is unknown. + assert_raises(ValueError, stats.wilcoxon, [1], [1, 2]) + assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy") + assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], + alternative="dummy") + assert_raises(ValueError, stats.wilcoxon, [1]*10, mode="xyz") + + def test_zero_diff(self): + x = np.arange(20) + # pratt and wilcox do not work if x - y == 0 + assert_raises(ValueError, stats.wilcoxon, x, x, "wilcox", + mode="approx") + assert_raises(ValueError, stats.wilcoxon, x, x, "pratt", + mode="approx") + # ranksum is n*(n+1)/2, split in half if zero_method == "zsplit" + assert_equal(stats.wilcoxon(x, x, "zsplit", mode="approx"), + (20*21/4, 1.0)) + + def test_pratt(self): + # regression test for gh-6805: p-value matches value from R package + # coin (wilcoxsign_test) reported in the issue + x = [1, 2, 3, 4] + y = [1, 2, 3, 5] + with suppress_warnings() as sup: + sup.filter(UserWarning, message="Sample size too small") + res = stats.wilcoxon(x, y, zero_method="pratt", mode="approx") + assert_allclose(res, (0.0, 0.31731050786291415)) + + def test_wilcoxon_arg_type(self): + # Should be able to accept list as arguments. + # Address issue 6070. + arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2] + + _ = stats.wilcoxon(arr, zero_method="pratt", mode="approx") + _ = stats.wilcoxon(arr, zero_method="zsplit", mode="approx") + _ = stats.wilcoxon(arr, zero_method="wilcox", mode="approx") + + def test_accuracy_wilcoxon(self): + freq = [1, 4, 16, 15, 8, 4, 5, 1, 2] + nums = range(-4, 5) + x = np.concatenate([[u] * v for u, v in zip(nums, freq)]) + y = np.zeros(x.size) + + T, p = stats.wilcoxon(x, y, "pratt", mode="approx") + assert_allclose(T, 423) + assert_allclose(p, 0.0031724568006762576) + + T, p = stats.wilcoxon(x, y, "zsplit", mode="approx") + assert_allclose(T, 441) + assert_allclose(p, 0.0032145343172473055) + + T, p = stats.wilcoxon(x, y, "wilcox", mode="approx") + assert_allclose(T, 327) + assert_allclose(p, 0.00641346115861) + + # Test the 'correction' option, using values computed in R with: + # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE}) + x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) + y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) + T, p = stats.wilcoxon(x, y, correction=False, mode="approx") + assert_equal(T, 34) + assert_allclose(p, 0.6948866, rtol=1e-6) + T, p = stats.wilcoxon(x, y, correction=True, mode="approx") + assert_equal(T, 34) + assert_allclose(p, 0.7240817, rtol=1e-6) + + def test_wilcoxon_result_attributes(self): + x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) + y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) + res = stats.wilcoxon(x, y, correction=False, mode="approx") + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_wilcoxon_has_zstatistic(self): + rng = np.random.default_rng(89426135444) + x, y = rng.random(15), rng.random(15) + + res = stats.wilcoxon(x, y, mode="approx") + ref = stats.norm.ppf(res.pvalue/2) + assert_allclose(res.zstatistic, ref) + + res = stats.wilcoxon(x, y, mode="exact") + assert not hasattr(res, 'zstatistic') + + res = stats.wilcoxon(x, y) + assert not hasattr(res, 'zstatistic') + + def test_wilcoxon_tie(self): + # Regression test for gh-2391. + # Corresponding R code is: + # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE) + # > result$p.value + # [1] 0.001565402 + # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE) + # > result$p.value + # [1] 0.001904195 + stat, p = stats.wilcoxon([0.1] * 10, mode="approx") + expected_p = 0.001565402 + assert_equal(stat, 0) + assert_allclose(p, expected_p, rtol=1e-6) + + stat, p = stats.wilcoxon([0.1] * 10, correction=True, mode="approx") + expected_p = 0.001904195 + assert_equal(stat, 0) + assert_allclose(p, expected_p, rtol=1e-6) + + def test_onesided(self): + # tested against "R version 3.4.1 (2017-06-30)" + # x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135) + # y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145) + # cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE) + # do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE))) + # do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE))) + # do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE))) + # do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE))) + x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135] + y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145] + + with suppress_warnings() as sup: + sup.filter(UserWarning, message="Sample size too small") + w, p = stats.wilcoxon(x, y, alternative="less", mode="approx") + assert_equal(w, 27) + assert_almost_equal(p, 0.7031847, decimal=6) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message="Sample size too small") + w, p = stats.wilcoxon(x, y, alternative="less", correction=True, + mode="approx") + assert_equal(w, 27) + assert_almost_equal(p, 0.7233656, decimal=6) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message="Sample size too small") + w, p = stats.wilcoxon(x, y, alternative="greater", mode="approx") + assert_equal(w, 27) + assert_almost_equal(p, 0.2968153, decimal=6) + + with suppress_warnings() as sup: + sup.filter(UserWarning, message="Sample size too small") + w, p = stats.wilcoxon(x, y, alternative="greater", correction=True, + mode="approx") + assert_equal(w, 27) + assert_almost_equal(p, 0.3176447, decimal=6) + + def test_exact_basic(self): + for n in range(1, 51): + pmf1 = _get_wilcoxon_distr(n) + pmf2 = _get_wilcoxon_distr2(n) + assert_equal(n*(n+1)/2 + 1, len(pmf1)) + assert_equal(sum(pmf1), 1) + assert_array_almost_equal(pmf1, pmf2) + + def test_exact_pval(self): + # expected values computed with "R version 3.4.1 (2017-06-30)" + x = np.array([1.81, 0.82, 1.56, -0.48, 0.81, 1.28, -1.04, 0.23, + -0.75, 0.14]) + y = np.array([0.71, 0.65, -0.2, 0.85, -1.1, -0.45, -0.84, -0.24, + -0.68, -0.76]) + _, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact") + assert_almost_equal(p, 0.1054688, decimal=6) + _, p = stats.wilcoxon(x, y, alternative="less", mode="exact") + assert_almost_equal(p, 0.9580078, decimal=6) + _, p = stats.wilcoxon(x, y, alternative="greater", mode="exact") + assert_almost_equal(p, 0.05273438, decimal=6) + + x = np.arange(0, 20) + 0.5 + y = np.arange(20, 0, -1) + _, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact") + assert_almost_equal(p, 0.8694878, decimal=6) + _, p = stats.wilcoxon(x, y, alternative="less", mode="exact") + assert_almost_equal(p, 0.4347439, decimal=6) + _, p = stats.wilcoxon(x, y, alternative="greater", mode="exact") + assert_almost_equal(p, 0.5795889, decimal=6) + + # These inputs were chosen to give a W statistic that is either the + # center of the distribution (when the length of the support is odd), or + # the value to the left of the center (when the length of the support is + # even). Also, the numbers are chosen so that the W statistic is the + # sum of the positive values. + + @pytest.mark.parametrize('x', [[-1, -2, 3], + [-1, 2, -3, -4, 5], + [-1, -2, 3, -4, -5, -6, 7, 8]]) + def test_exact_p_1(self, x): + w, p = stats.wilcoxon(x) + x = np.array(x) + wtrue = x[x > 0].sum() + assert_equal(w, wtrue) + assert_equal(p, 1) + + def test_auto(self): + # auto default to exact if there are no ties and n<= 25 + x = np.arange(0, 25) + 0.5 + y = np.arange(25, 0, -1) + assert_equal(stats.wilcoxon(x, y), + stats.wilcoxon(x, y, mode="exact")) + + # if there are ties (i.e. zeros in d = x-y), then switch to approx + d = np.arange(0, 13) + with suppress_warnings() as sup: + sup.filter(UserWarning, message="Exact p-value calculation") + w, p = stats.wilcoxon(d) + assert_equal(stats.wilcoxon(d, mode="approx"), (w, p)) + + # use approximation for samples > 25 + d = np.arange(1, 52) + assert_equal(stats.wilcoxon(d), stats.wilcoxon(d, mode="approx")) + + @pytest.mark.parametrize('size', [3, 5, 10]) + def test_permutation_method(self, size): + rng = np.random.default_rng(92348034828501345) + x = rng.random(size=size) + res = stats.wilcoxon(x, method=stats.PermutationMethod()) + ref = stats.wilcoxon(x, method='exact') + assert_equal(res.statistic, ref.statistic) + assert_equal(res.pvalue, ref.pvalue) + + x = rng.random(size=size*10) + rng = np.random.default_rng(59234803482850134) + pm = stats.PermutationMethod(n_resamples=99, random_state=rng) + ref = stats.wilcoxon(x, method=pm) + rng = np.random.default_rng(59234803482850134) + pm = stats.PermutationMethod(n_resamples=99, random_state=rng) + res = stats.wilcoxon(x, method=pm) + + assert_equal(np.round(res.pvalue, 2), res.pvalue) # n_resamples used + assert_equal(res.pvalue, ref.pvalue) # random_state used + + +class TestKstat: + def test_moments_normal_distribution(self): + np.random.seed(32149) + data = np.random.randn(12345) + moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]] + + expected = [0.011315, 1.017931, 0.05811052, 0.0754134] + assert_allclose(moments, expected, rtol=1e-4) + + # test equivalence with `stats.moment` + m1 = stats.moment(data, order=1) + m2 = stats.moment(data, order=2) + m3 = stats.moment(data, order=3) + assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2) + + def test_empty_input(self): + assert_raises(ValueError, stats.kstat, []) + + def test_nan_input(self): + data = np.arange(10.) + data[6] = np.nan + + assert_equal(stats.kstat(data), np.nan) + + def test_kstat_bad_arg(self): + # Raise ValueError if n > 4 or n < 1. + data = np.arange(10) + for n in [0, 4.001]: + assert_raises(ValueError, stats.kstat, data, n=n) + + +class TestKstatVar: + def test_empty_input(self): + assert_raises(ValueError, stats.kstatvar, []) + + def test_nan_input(self): + data = np.arange(10.) + data[6] = np.nan + + assert_equal(stats.kstat(data), np.nan) + + def test_bad_arg(self): + # Raise ValueError is n is not 1 or 2. + data = [1] + n = 10 + assert_raises(ValueError, stats.kstatvar, data, n=n) + + +class TestPpccPlot: + def setup_method(self): + self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5 + + def test_basic(self): + N = 5 + svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N) + ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, + 0.93519298] + assert_allclose(svals, np.linspace(-10, 10, num=N)) + assert_allclose(ppcc, ppcc_expected) + + def test_dist(self): + # Test that we can specify distributions both by name and as objects. + svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda') + svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, + dist=stats.tukeylambda) + assert_allclose(svals1, svals2, rtol=1e-20) + assert_allclose(ppcc1, ppcc2, rtol=1e-20) + # Test that 'tukeylambda' is the default dist + svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10) + assert_allclose(svals1, svals3, rtol=1e-20) + assert_allclose(ppcc1, ppcc3, rtol=1e-20) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_plot_kwarg(self): + # Check with the matplotlib.pyplot module + fig = plt.figure() + ax = fig.add_subplot(111) + stats.ppcc_plot(self.x, -20, 20, plot=plt) + fig.delaxes(ax) + + # Check that a Matplotlib Axes object is accepted + ax = fig.add_subplot(111) + stats.ppcc_plot(self.x, -20, 20, plot=ax) + plt.close() + + def test_invalid_inputs(self): + # `b` has to be larger than `a` + assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0) + + # Raise ValueError when given an invalid distribution. + assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1, + dist="plate_of_shrimp") + + def test_empty(self): + # For consistency with probplot return for one empty array, + # ppcc contains all zeros and svals is the same as for normal array + # input. + svals, ppcc = stats.ppcc_plot([], 0, 1) + assert_allclose(svals, np.linspace(0, 1, num=80)) + assert_allclose(ppcc, np.zeros(80, dtype=float)) + + +class TestPpccMax: + def test_ppcc_max_bad_arg(self): + # Raise ValueError when given an invalid distribution. + data = [1] + assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp") + + def test_ppcc_max_basic(self): + x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, + random_state=1234567) + 1e4 + assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=7) + + def test_dist(self): + x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, + random_state=1234567) + 1e4 + + # Test that we can specify distributions both by name and as objects. + max1 = stats.ppcc_max(x, dist='tukeylambda') + max2 = stats.ppcc_max(x, dist=stats.tukeylambda) + assert_almost_equal(max1, -0.71215366521264145, decimal=5) + assert_almost_equal(max2, -0.71215366521264145, decimal=5) + + # Test that 'tukeylambda' is the default dist + max3 = stats.ppcc_max(x) + assert_almost_equal(max3, -0.71215366521264145, decimal=5) + + def test_brack(self): + x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, + random_state=1234567) + 1e4 + assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5)) + + assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)), + -0.71215366521264145, decimal=7) + + assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)), + -0.71215366521264145, decimal=7) + + +class TestBoxcox_llf: + + def test_basic(self): + x = stats.norm.rvs(size=10000, loc=10, random_state=54321) + lmbda = 1 + llf = stats.boxcox_llf(lmbda, x) + llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2)) + assert_allclose(llf, llf_expected) + + def test_array_like(self): + x = stats.norm.rvs(size=100, loc=10, random_state=54321) + lmbda = 1 + llf = stats.boxcox_llf(lmbda, x) + llf2 = stats.boxcox_llf(lmbda, list(x)) + assert_allclose(llf, llf2, rtol=1e-12) + + def test_2d_input(self): + # Note: boxcox_llf() was already working with 2-D input (sort of), so + # keep it like that. boxcox() doesn't work with 2-D input though, due + # to brent() returning a scalar. + x = stats.norm.rvs(size=100, loc=10, random_state=54321) + lmbda = 1 + llf = stats.boxcox_llf(lmbda, x) + llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T) + assert_allclose([llf, llf], llf2, rtol=1e-12) + + def test_empty(self): + assert_(np.isnan(stats.boxcox_llf(1, []))) + + def test_gh_6873(self): + # Regression test for gh-6873. + # This example was taken from gh-7534, a duplicate of gh-6873. + data = [198.0, 233.0, 233.0, 392.0] + llf = stats.boxcox_llf(-8, data) + # The expected value was computed with mpmath. + assert_allclose(llf, -17.93934208579061) + + def test_instability_gh20021(self): + data = [2003, 1950, 1997, 2000, 2009] + llf = stats.boxcox_llf(1e-8, data) + # The expected value was computed with mpsci, set mpmath.mp.dps=100 + assert_allclose(llf, -15.32401272869016598) + + +# This is the data from github user Qukaiyi, given as an example +# of a data set that caused boxcox to fail. +_boxcox_data = [ + 15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875, + 207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660, + 904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575, + 68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855, + 1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000, + 198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051, + 345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787, + 57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237, + 131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418, + 246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193, + 872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561, + 483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858, + 88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096, + 402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232, + 606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486, + 95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334, + 174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335, + 898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524, + 81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735, + 132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217, + 150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448, + 10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175, + 56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907, + 244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624, + 406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795, + 145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075, + 367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870, + 55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390, + 236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340, + 84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337, + 120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375, + 887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988, + 509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229, + 411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084, + 479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426, + 1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598, + 1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047, + 194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310, + 4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793, + 1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221, + 266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321, + 309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743, + 1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201, + 141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518, + 2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845, + 724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016, + 1891609 +] + + +class TestBoxcox: + + def test_fixed_lmbda(self): + x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5 + xt = stats.boxcox(x, lmbda=1) + assert_allclose(xt, x - 1) + xt = stats.boxcox(x, lmbda=-1) + assert_allclose(xt, 1 - 1/x) + + xt = stats.boxcox(x, lmbda=0) + assert_allclose(xt, np.log(x)) + + # Also test that array_like input works + xt = stats.boxcox(list(x), lmbda=0) + assert_allclose(xt, np.log(x)) + + # test that constant input is accepted; see gh-12225 + xt = stats.boxcox(np.ones(10), 2) + assert_equal(xt, np.zeros(10)) + + def test_lmbda_None(self): + # Start from normal rv's, do inverse transform to check that + # optimization function gets close to the right answer. + lmbda = 2.5 + x = stats.norm.rvs(loc=10, size=50000, random_state=1245) + x_inv = (x * lmbda + 1)**(-lmbda) + xt, maxlog = stats.boxcox(x_inv) + + assert_almost_equal(maxlog, -1 / lmbda, decimal=2) + + def test_alpha(self): + rng = np.random.RandomState(1234) + x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5 + + # Some regular values for alpha, on a small sample size + _, _, interval = stats.boxcox(x, alpha=0.75) + assert_allclose(interval, [4.004485780226041, 5.138756355035744]) + _, _, interval = stats.boxcox(x, alpha=0.05) + assert_allclose(interval, [1.2138178554857557, 8.209033272375663]) + + # Try some extreme values, see we don't hit the N=500 limit + x = _old_loggamma_rvs(7, size=500, random_state=rng) + 15 + _, _, interval = stats.boxcox(x, alpha=0.001) + assert_allclose(interval, [0.3988867, 11.40553131]) + _, _, interval = stats.boxcox(x, alpha=0.999) + assert_allclose(interval, [5.83316246, 5.83735292]) + + def test_boxcox_bad_arg(self): + # Raise ValueError if any data value is negative. + x = np.array([-1, 2]) + assert_raises(ValueError, stats.boxcox, x) + # Raise ValueError if data is constant. + assert_raises(ValueError, stats.boxcox, np.array([1])) + # Raise ValueError if data is not 1-dimensional. + assert_raises(ValueError, stats.boxcox, np.array([[1], [2]])) + + def test_empty(self): + assert_(stats.boxcox([]).shape == (0,)) + + def test_gh_6873(self): + # Regression test for gh-6873. + y, lam = stats.boxcox(_boxcox_data) + # The expected value of lam was computed with the function + # powerTransform in the R library 'car'. I trust that value + # to only about five significant digits. + assert_allclose(lam, -0.051654, rtol=1e-5) + + @pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)]) + def test_bounded_optimizer_within_bounds(self, bounds): + # Define custom optimizer with bounds. + def optimizer(fun): + return optimize.minimize_scalar(fun, bounds=bounds, + method="bounded") + + _, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer) + assert bounds[0] < lmbda < bounds[1] + + def test_bounded_optimizer_against_unbounded_optimizer(self): + # Test whether setting bounds on optimizer excludes solution from + # unbounded optimizer. + + # Get unbounded solution. + _, lmbda = stats.boxcox(_boxcox_data, lmbda=None) + + # Set tolerance and bounds around solution. + bounds = (lmbda + 0.1, lmbda + 1) + options = {'xatol': 1e-12} + + def optimizer(fun): + return optimize.minimize_scalar(fun, bounds=bounds, + method="bounded", options=options) + + # Check bounded solution. Lower bound should be active. + _, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None, + optimizer=optimizer) + assert lmbda_bounded != lmbda + assert_allclose(lmbda_bounded, bounds[0]) + + @pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1]) + def test_bad_optimizer_type_raises_error(self, optimizer): + # Check if error is raised if string, tuple or float is passed + with pytest.raises(ValueError, match="`optimizer` must be a callable"): + stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer) + + def test_bad_optimizer_value_raises_error(self): + # Check if error is raised if `optimizer` function does not return + # `OptimizeResult` object + + # Define test function that always returns 1 + def optimizer(fun): + return 1 + + message = "return an object containing the optimal `lmbda`" + with pytest.raises(ValueError, match=message): + stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer) + + @pytest.mark.parametrize( + "bad_x", [np.array([1, -42, 12345.6]), np.array([np.nan, 42, 1])] + ) + def test_negative_x_value_raises_error(self, bad_x): + """Test boxcox_normmax raises ValueError if x contains non-positive values.""" + message = "only positive, finite, real numbers" + with pytest.raises(ValueError, match=message): + stats.boxcox_normmax(bad_x) + + @pytest.mark.parametrize('x', [ + # Attempt to trigger overflow in power expressions. + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0, + 2009.0, 1980.0, 1999.0, 2007.0, 1991.0]), + # Attempt to trigger overflow with a large optimal lambda. + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]), + # Attempt to trigger overflow with large data. + np.array([2003.0e200, 1950.0e200, 1997.0e200, 2000.0e200, 2009.0e200]) + ]) + def test_overflow(self, x): + with pytest.warns(UserWarning, match="The optimal lambda is"): + xt_bc, lam_bc = stats.boxcox(x) + assert np.all(np.isfinite(xt_bc)) + + +class TestBoxcoxNormmax: + def setup_method(self): + self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5 + + def test_pearsonr(self): + maxlog = stats.boxcox_normmax(self.x) + assert_allclose(maxlog, 1.804465, rtol=1e-6) + + def test_mle(self): + maxlog = stats.boxcox_normmax(self.x, method='mle') + assert_allclose(maxlog, 1.758101, rtol=1e-6) + + # Check that boxcox() uses 'mle' + _, maxlog_boxcox = stats.boxcox(self.x) + assert_allclose(maxlog_boxcox, maxlog) + + def test_all(self): + maxlog_all = stats.boxcox_normmax(self.x, method='all') + assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6) + + @pytest.mark.parametrize("method", ["mle", "pearsonr", "all"]) + @pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)]) + def test_bounded_optimizer_within_bounds(self, method, bounds): + + def optimizer(fun): + return optimize.minimize_scalar(fun, bounds=bounds, + method="bounded") + + maxlog = stats.boxcox_normmax(self.x, method=method, + optimizer=optimizer) + assert np.all(bounds[0] < maxlog) + assert np.all(maxlog < bounds[1]) + + def test_user_defined_optimizer(self): + # tests an optimizer that is not based on scipy.optimize.minimize + lmbda = stats.boxcox_normmax(self.x) + lmbda_rounded = np.round(lmbda, 5) + lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001) + + class MyResult: + pass + + def optimizer(fun): + # brute force minimum over the range + objs = [] + for lmbda in lmbda_range: + objs.append(fun(lmbda)) + res = MyResult() + res.x = lmbda_range[np.argmin(objs)] + return res + + lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer) + assert lmbda2 != lmbda # not identical + assert_allclose(lmbda2, lmbda, 1e-5) # but as close as it should be + + def test_user_defined_optimizer_and_brack_raises_error(self): + optimizer = optimize.minimize_scalar + + # Using default `brack=None` with user-defined `optimizer` works as + # expected. + stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer) + + # Using user-defined `brack` with user-defined `optimizer` is expected + # to throw an error. Instead, users should specify + # optimizer-specific parameters in the optimizer function itself. + with pytest.raises(ValueError, match="`brack` must be None if " + "`optimizer` is given"): + + stats.boxcox_normmax(self.x, brack=(-2.0, 2.0), + optimizer=optimizer) + + @pytest.mark.parametrize( + 'x', ([2003.0, 1950.0, 1997.0, 2000.0, 2009.0], + [0.50000471, 0.50004979, 0.50005902, 0.50009312, 0.50001632])) + def test_overflow(self, x): + message = "The optimal lambda is..." + with pytest.warns(UserWarning, match=message): + lmbda = stats.boxcox_normmax(x, method='mle') + assert np.isfinite(special.boxcox(x, lmbda)).all() + # 10000 is safety factor used in boxcox_normmax + ymax = np.finfo(np.float64).max / 10000 + x_treme = np.max(x) if lmbda > 0 else np.min(x) + y_extreme = special.boxcox(x_treme, lmbda) + assert_allclose(y_extreme, ymax * np.sign(lmbda)) + + def test_negative_ymax(self): + with pytest.raises(ValueError, match="`ymax` must be strictly positive"): + stats.boxcox_normmax(self.x, ymax=-1) + + @pytest.mark.parametrize("x", [ + # positive overflow in float64 + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0], + dtype=np.float64), + # negative overflow in float64 + np.array([0.50000471, 0.50004979, 0.50005902, 0.50009312, 0.50001632], + dtype=np.float64), + # positive overflow in float32 + np.array([200.3, 195.0, 199.7, 200.0, 200.9], + dtype=np.float32), + # negative overflow in float32 + np.array([2e-30, 1e-30, 1e-30, 1e-30, 1e-30, 1e-30], + dtype=np.float32), + ]) + @pytest.mark.parametrize("ymax", [1e10, 1e30, None]) + # TODO: add method "pearsonr" after fix overflow issue + @pytest.mark.parametrize("method", ["mle"]) + def test_user_defined_ymax_input_float64_32(self, x, ymax, method): + # Test the maximum of the transformed data close to ymax + with pytest.warns(UserWarning, match="The optimal lambda is"): + kwarg = {'ymax': ymax} if ymax is not None else {} + lmb = stats.boxcox_normmax(x, method=method, **kwarg) + x_treme = [np.min(x), np.max(x)] + ymax_res = max(abs(stats.boxcox(x_treme, lmb))) + if ymax is None: + # 10000 is safety factor used in boxcox_normmax + ymax = np.finfo(x.dtype).max / 10000 + assert_allclose(ymax, ymax_res, rtol=1e-5) + + @pytest.mark.parametrize("x", [ + # positive overflow in float32 but not float64 + [200.3, 195.0, 199.7, 200.0, 200.9], + # negative overflow in float32 but not float64 + [2e-30, 1e-30, 1e-30, 1e-30, 1e-30, 1e-30], + ]) + # TODO: add method "pearsonr" after fix overflow issue + @pytest.mark.parametrize("method", ["mle"]) + def test_user_defined_ymax_inf(self, x, method): + x_32 = np.asarray(x, dtype=np.float32) + x_64 = np.asarray(x, dtype=np.float64) + + # assert overflow with float32 but not float64 + with pytest.warns(UserWarning, match="The optimal lambda is"): + stats.boxcox_normmax(x_32, method=method) + stats.boxcox_normmax(x_64, method=method) + + # compute the true optimal lambda then compare them + lmb_32 = stats.boxcox_normmax(x_32, ymax=np.inf, method=method) + lmb_64 = stats.boxcox_normmax(x_64, ymax=np.inf, method=method) + assert_allclose(lmb_32, lmb_64, rtol=1e-2) + + +class TestBoxcoxNormplot: + def setup_method(self): + self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5 + + def test_basic(self): + N = 5 + lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N) + ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057, + 0.95843297] + assert_allclose(lmbdas, np.linspace(-10, 10, num=N)) + assert_allclose(ppcc, ppcc_expected) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_plot_kwarg(self): + # Check with the matplotlib.pyplot module + fig = plt.figure() + ax = fig.add_subplot(111) + stats.boxcox_normplot(self.x, -20, 20, plot=plt) + fig.delaxes(ax) + + # Check that a Matplotlib Axes object is accepted + ax = fig.add_subplot(111) + stats.boxcox_normplot(self.x, -20, 20, plot=ax) + plt.close() + + def test_invalid_inputs(self): + # `lb` has to be larger than `la` + assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0) + # `x` can not contain negative values + assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1) + + def test_empty(self): + assert_(stats.boxcox_normplot([], 0, 1).size == 0) + + +class TestYeojohnson_llf: + + def test_array_like(self): + x = stats.norm.rvs(size=100, loc=0, random_state=54321) + lmbda = 1 + llf = stats.yeojohnson_llf(lmbda, x) + llf2 = stats.yeojohnson_llf(lmbda, list(x)) + assert_allclose(llf, llf2, rtol=1e-12) + + def test_2d_input(self): + x = stats.norm.rvs(size=100, loc=10, random_state=54321) + lmbda = 1 + llf = stats.yeojohnson_llf(lmbda, x) + llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T) + assert_allclose([llf, llf], llf2, rtol=1e-12) + + def test_empty(self): + assert_(np.isnan(stats.yeojohnson_llf(1, []))) + + +class TestYeojohnson: + + def test_fixed_lmbda(self): + rng = np.random.RandomState(12345) + + # Test positive input + x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5 + assert np.all(x > 0) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt, x) + xt = stats.yeojohnson(x, lmbda=-1) + assert_allclose(xt, 1 - 1 / (x + 1)) + xt = stats.yeojohnson(x, lmbda=0) + assert_allclose(xt, np.log(x + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt, x) + + # Test negative input + x = _old_loggamma_rvs(5, size=50, random_state=rng) - 5 + assert np.all(x < 0) + xt = stats.yeojohnson(x, lmbda=2) + assert_allclose(xt, -np.log(-x + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt, x) + xt = stats.yeojohnson(x, lmbda=3) + assert_allclose(xt, 1 / (-x + 1) - 1) + + # test both positive and negative input + x = _old_loggamma_rvs(5, size=50, random_state=rng) - 2 + assert not np.all(x < 0) + assert not np.all(x >= 0) + pos = x >= 0 + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt[pos], x[pos]) + xt = stats.yeojohnson(x, lmbda=-1) + assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1)) + xt = stats.yeojohnson(x, lmbda=0) + assert_allclose(xt[pos], np.log(x[pos] + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt[pos], x[pos]) + + neg = ~pos + xt = stats.yeojohnson(x, lmbda=2) + assert_allclose(xt[neg], -np.log(-x[neg] + 1)) + xt = stats.yeojohnson(x, lmbda=1) + assert_allclose(xt[neg], x[neg]) + xt = stats.yeojohnson(x, lmbda=3) + assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1) + + @pytest.mark.parametrize('lmbda', [0, .1, .5, 2]) + def test_lmbda_None(self, lmbda): + # Start from normal rv's, do inverse transform to check that + # optimization function gets close to the right answer. + + def _inverse_transform(x, lmbda): + x_inv = np.zeros(x.shape, dtype=x.dtype) + pos = x >= 0 + + # when x >= 0 + if abs(lmbda) < np.spacing(1.): + x_inv[pos] = np.exp(x[pos]) - 1 + else: # lmbda != 0 + x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1 + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.): + x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, + 1 / (2 - lmbda)) + else: # lmbda == 2 + x_inv[~pos] = 1 - np.exp(-x[~pos]) + + return x_inv + + n_samples = 20000 + np.random.seed(1234567) + x = np.random.normal(loc=0, scale=1, size=(n_samples)) + + x_inv = _inverse_transform(x, lmbda) + xt, maxlog = stats.yeojohnson(x_inv) + + assert_allclose(maxlog, lmbda, atol=1e-2) + + assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2) + assert_almost_equal(0, xt.mean(), decimal=1) + assert_almost_equal(1, xt.std(), decimal=1) + + def test_empty(self): + assert_(stats.yeojohnson([]).shape == (0,)) + + def test_array_like(self): + x = stats.norm.rvs(size=100, loc=0, random_state=54321) + xt1, _ = stats.yeojohnson(x) + xt2, _ = stats.yeojohnson(list(x)) + assert_allclose(xt1, xt2, rtol=1e-12) + + @pytest.mark.parametrize('dtype', [np.complex64, np.complex128]) + def test_input_dtype_complex(self, dtype): + x = np.arange(6, dtype=dtype) + err_msg = ('Yeo-Johnson transformation is not defined for complex ' + 'numbers.') + with pytest.raises(ValueError, match=err_msg): + stats.yeojohnson(x) + + @pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32]) + def test_input_dtype_integer(self, dtype): + x_int = np.arange(8, dtype=dtype) + x_float = np.arange(8, dtype=np.float64) + xt_int, lmbda_int = stats.yeojohnson(x_int) + xt_float, lmbda_float = stats.yeojohnson(x_float) + assert_allclose(xt_int, xt_float, rtol=1e-7) + assert_allclose(lmbda_int, lmbda_float, rtol=1e-7) + + def test_input_high_variance(self): + # non-regression test for gh-10821 + x = np.array([3251637.22, 620695.44, 11642969.00, 2223468.22, + 85307500.00, 16494389.89, 917215.88, 11642969.00, + 2145773.87, 4962000.00, 620695.44, 651234.50, + 1907876.71, 4053297.88, 3251637.22, 3259103.08, + 9547969.00, 20631286.23, 12807072.08, 2383819.84, + 90114500.00, 17209575.46, 12852969.00, 2414609.99, + 2170368.23]) + xt_yeo, lam_yeo = stats.yeojohnson(x) + xt_box, lam_box = stats.boxcox(x + 1) + assert_allclose(xt_yeo, xt_box, rtol=1e-6) + assert_allclose(lam_yeo, lam_box, rtol=1e-6) + + @pytest.mark.parametrize('x', [ + np.array([1.0, float("nan"), 2.0]), + np.array([1.0, float("inf"), 2.0]), + np.array([1.0, -float("inf"), 2.0]), + np.array([-1.0, float("nan"), float("inf"), -float("inf"), 1.0]) + ]) + def test_nonfinite_input(self, x): + with pytest.raises(ValueError, match='Yeo-Johnson input must be finite'): + xt_yeo, lam_yeo = stats.yeojohnson(x) + + @pytest.mark.parametrize('x', [ + # Attempt to trigger overflow in power expressions. + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0, + 2009.0, 1980.0, 1999.0, 2007.0, 1991.0]), + # Attempt to trigger overflow with a large optimal lambda. + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]), + # Attempt to trigger overflow with large data. + np.array([2003.0e200, 1950.0e200, 1997.0e200, 2000.0e200, 2009.0e200]) + ]) + def test_overflow(self, x): + # non-regression test for gh-18389 + + def optimizer(fun, lam_yeo): + out = optimize.fminbound(fun, -lam_yeo, lam_yeo, xtol=1.48e-08) + result = optimize.OptimizeResult() + result.x = out + return result + + with np.errstate(all="raise"): + xt_yeo, lam_yeo = stats.yeojohnson(x) + xt_box, lam_box = stats.boxcox( + x + 1, optimizer=partial(optimizer, lam_yeo=lam_yeo)) + assert np.isfinite(np.var(xt_yeo)) + assert np.isfinite(np.var(xt_box)) + assert_allclose(lam_yeo, lam_box, rtol=1e-6) + assert_allclose(xt_yeo, xt_box, rtol=1e-4) + + @pytest.mark.parametrize('x', [ + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0, + 2009.0, 1980.0, 1999.0, 2007.0, 1991.0]), + np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]) + ]) + @pytest.mark.parametrize('scale', [1, 1e-12, 1e-32, 1e-150, 1e32, 1e200]) + @pytest.mark.parametrize('sign', [1, -1]) + def test_overflow_underflow_signed_data(self, x, scale, sign): + # non-regression test for gh-18389 + with np.errstate(all="raise"): + xt_yeo, lam_yeo = stats.yeojohnson(sign * x * scale) + assert np.all(np.sign(sign * x) == np.sign(xt_yeo)) + assert np.isfinite(lam_yeo) + assert np.isfinite(np.var(xt_yeo)) + + @pytest.mark.parametrize('x', [ + np.array([0, 1, 2, 3]), + np.array([0, -1, 2, -3]), + np.array([0, 0, 0]) + ]) + @pytest.mark.parametrize('sign', [1, -1]) + @pytest.mark.parametrize('brack', [None, (-2, 2)]) + def test_integer_signed_data(self, x, sign, brack): + with np.errstate(all="raise"): + x_int = sign * x + x_float = x_int.astype(np.float64) + lam_yeo_int = stats.yeojohnson_normmax(x_int, brack=brack) + xt_yeo_int = stats.yeojohnson(x_int, lmbda=lam_yeo_int) + lam_yeo_float = stats.yeojohnson_normmax(x_float, brack=brack) + xt_yeo_float = stats.yeojohnson(x_float, lmbda=lam_yeo_float) + assert np.all(np.sign(x_int) == np.sign(xt_yeo_int)) + assert np.isfinite(lam_yeo_int) + assert np.isfinite(np.var(xt_yeo_int)) + assert lam_yeo_int == lam_yeo_float + assert np.all(xt_yeo_int == xt_yeo_float) + + +class TestYeojohnsonNormmax: + def setup_method(self): + self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5 + + def test_mle(self): + maxlog = stats.yeojohnson_normmax(self.x) + assert_allclose(maxlog, 1.876393, rtol=1e-6) + + def test_darwin_example(self): + # test from original paper "A new family of power transformations to + # improve normality or symmetry" by Yeo and Johnson. + x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3, + 7.5, -6.0] + lmbda = stats.yeojohnson_normmax(x) + assert np.allclose(lmbda, 1.305, atol=1e-3) + + +class TestCircFuncs: + # In gh-5747, the R package `circular` was used to calculate reference + # values for the circular variance, e.g.: + # library(circular) + # options(digits=16) + # x = c(0, 2*pi/3, 5*pi/3) + # var.circular(x) + @pytest.mark.parametrize("test_func,expected", + [(stats.circmean, 0.167690146), + (stats.circvar, 0.006455174270186603), + (stats.circstd, 6.520702116)]) + def test_circfuncs(self, test_func, expected): + x = np.array([355, 5, 2, 359, 10, 350]) + assert_allclose(test_func(x, high=360), expected, rtol=1e-7) + + def test_circfuncs_small(self): + x = np.array([20, 21, 22, 18, 19, 20.5, 19.2]) + M1 = x.mean() + M2 = stats.circmean(x, high=360) + assert_allclose(M2, M1, rtol=1e-5) + + V1 = (x*np.pi/180).var() + # for small variations, circvar is approximately half the + # linear variance + V1 = V1 / 2. + V2 = stats.circvar(x, high=360) + assert_allclose(V2, V1, rtol=1e-4) + + S1 = x.std() + S2 = stats.circstd(x, high=360) + assert_allclose(S2, S1, rtol=1e-4) + + @pytest.mark.parametrize("test_func, numpy_func", + [(stats.circmean, np.mean), + (stats.circvar, np.var), + (stats.circstd, np.std)]) + def test_circfuncs_close(self, test_func, numpy_func): + # circfuncs should handle very similar inputs (gh-12740) + x = np.array([0.12675364631578953] * 10 + [0.12675365920187928] * 100) + circstat = test_func(x) + normal = numpy_func(x) + assert_allclose(circstat, normal, atol=2e-8) + + def test_circmean_axis(self): + x = np.array([[355, 5, 2, 359, 10, 350], + [351, 7, 4, 352, 9, 349], + [357, 9, 8, 358, 4, 356]]) + M1 = stats.circmean(x, high=360) + M2 = stats.circmean(x.ravel(), high=360) + assert_allclose(M1, M2, rtol=1e-14) + + M1 = stats.circmean(x, high=360, axis=1) + M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])] + assert_allclose(M1, M2, rtol=1e-14) + + M1 = stats.circmean(x, high=360, axis=0) + M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])] + assert_allclose(M1, M2, rtol=1e-14) + + def test_circvar_axis(self): + x = np.array([[355, 5, 2, 359, 10, 350], + [351, 7, 4, 352, 9, 349], + [357, 9, 8, 358, 4, 356]]) + + V1 = stats.circvar(x, high=360) + V2 = stats.circvar(x.ravel(), high=360) + assert_allclose(V1, V2, rtol=1e-11) + + V1 = stats.circvar(x, high=360, axis=1) + V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])] + assert_allclose(V1, V2, rtol=1e-11) + + V1 = stats.circvar(x, high=360, axis=0) + V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])] + assert_allclose(V1, V2, rtol=1e-11) + + def test_circstd_axis(self): + x = np.array([[355, 5, 2, 359, 10, 350], + [351, 7, 4, 352, 9, 349], + [357, 9, 8, 358, 4, 356]]) + + S1 = stats.circstd(x, high=360) + S2 = stats.circstd(x.ravel(), high=360) + assert_allclose(S1, S2, rtol=1e-11) + + S1 = stats.circstd(x, high=360, axis=1) + S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])] + assert_allclose(S1, S2, rtol=1e-11) + + S1 = stats.circstd(x, high=360, axis=0) + S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])] + assert_allclose(S1, S2, rtol=1e-11) + + @pytest.mark.parametrize("test_func,expected", + [(stats.circmean, 0.167690146), + (stats.circvar, 0.006455174270186603), + (stats.circstd, 6.520702116)]) + def test_circfuncs_array_like(self, test_func, expected): + x = [355, 5, 2, 359, 10, 350] + assert_allclose(test_func(x, high=360), expected, rtol=1e-7) + + @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar, + stats.circstd]) + def test_empty(self, test_func): + assert_(np.isnan(test_func([]))) + + @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar, + stats.circstd]) + def test_nan_propagate(self, test_func): + x = [355, 5, 2, 359, 10, 350, np.nan] + assert_(np.isnan(test_func(x, high=360))) + + @pytest.mark.parametrize("test_func,expected", + [(stats.circmean, + {None: np.nan, 0: 355.66582264, 1: 0.28725053}), + (stats.circvar, + {None: np.nan, + 0: 0.002570671054089924, + 1: 0.005545914017677123}), + (stats.circstd, + {None: np.nan, 0: 4.11093193, 1: 6.04265394})]) + def test_nan_propagate_array(self, test_func, expected): + x = np.array([[355, 5, 2, 359, 10, 350, 1], + [351, 7, 4, 352, 9, 349, np.nan], + [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]]) + for axis in expected.keys(): + out = test_func(x, high=360, axis=axis) + if axis is None: + assert_(np.isnan(out)) + else: + assert_allclose(out[0], expected[axis], rtol=1e-7) + assert_(np.isnan(out[1:]).all()) + + @pytest.mark.parametrize("test_func,expected", + [(stats.circmean, + {None: 359.4178026893944, + 0: np.array([353.0, 6.0, 3.0, 355.5, 9.5, + 349.5]), + 1: np.array([0.16769015, 358.66510252])}), + (stats.circvar, + {None: 0.008396678483192477, + 0: np.array([1.9997969, 0.4999873, 0.4999873, + 6.1230956, 0.1249992, 0.1249992] + )*(np.pi/180)**2, + 1: np.array([0.006455174270186603, + 0.01016767581393285])}), + (stats.circstd, + {None: 7.440570778057074, + 0: np.array([2.00020313, 1.00002539, 1.00002539, + 3.50108929, 0.50000317, + 0.50000317]), + 1: np.array([6.52070212, 8.19138093])})]) + def test_nan_omit_array(self, test_func, expected): + x = np.array([[355, 5, 2, 359, 10, 350, np.nan], + [351, 7, 4, 352, 9, 349, np.nan], + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]]) + for axis in expected.keys(): + out = test_func(x, high=360, nan_policy='omit', axis=axis) + if axis is None: + assert_allclose(out, expected[axis], rtol=1e-7) + else: + assert_allclose(out[:-1], expected[axis], rtol=1e-7) + assert_(np.isnan(out[-1])) + + @pytest.mark.parametrize("test_func,expected", + [(stats.circmean, 0.167690146), + (stats.circvar, 0.006455174270186603), + (stats.circstd, 6.520702116)]) + def test_nan_omit(self, test_func, expected): + x = [355, 5, 2, 359, 10, 350, np.nan] + assert_allclose(test_func(x, high=360, nan_policy='omit'), + expected, rtol=1e-7) + + @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar, + stats.circstd]) + def test_nan_omit_all(self, test_func): + x = [np.nan, np.nan, np.nan, np.nan, np.nan] + assert_(np.isnan(test_func(x, nan_policy='omit'))) + + @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar, + stats.circstd]) + def test_nan_omit_all_axis(self, test_func): + x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, np.nan]]) + out = test_func(x, nan_policy='omit', axis=1) + assert_(np.isnan(out).all()) + assert_(len(out) == 2) + + @pytest.mark.parametrize("x", + [[355, 5, 2, 359, 10, 350, np.nan], + np.array([[355, 5, 2, 359, 10, 350, np.nan], + [351, 7, 4, 352, np.nan, 9, 349]])]) + @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar, + stats.circstd]) + def test_nan_raise(self, test_func, x): + assert_raises(ValueError, test_func, x, high=360, nan_policy='raise') + + @pytest.mark.parametrize("x", + [[355, 5, 2, 359, 10, 350, np.nan], + np.array([[355, 5, 2, 359, 10, 350, np.nan], + [351, 7, 4, 352, np.nan, 9, 349]])]) + @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar, + stats.circstd]) + def test_bad_nan_policy(self, test_func, x): + assert_raises(ValueError, test_func, x, high=360, nan_policy='foobar') + + def test_circmean_scalar(self): + x = 1. + M1 = x + M2 = stats.circmean(x) + assert_allclose(M2, M1, rtol=1e-5) + + def test_circmean_range(self): + # regression test for gh-6420: circmean(..., high, low) must be + # between `high` and `low` + m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi) + assert_(m < np.pi) + assert_(m > -np.pi) + + def test_circfuncs_uint8(self): + # regression test for gh-7255: overflow when working with + # numpy uint8 data type + x = np.array([150, 10], dtype='uint8') + assert_equal(stats.circmean(x, high=180), 170.0) + assert_allclose(stats.circvar(x, high=180), 0.2339555554617, rtol=1e-7) + assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7) + + +class TestMedianTest: + + def test_bad_n_samples(self): + # median_test requires at least two samples. + assert_raises(ValueError, stats.median_test, [1, 2, 3]) + + def test_empty_sample(self): + # Each sample must contain at least one value. + assert_raises(ValueError, stats.median_test, [], [1, 2, 3]) + + def test_empty_when_ties_ignored(self): + # The grand median is 1, and all values in the first argument are + # equal to the grand median. With ties="ignore", those values are + # ignored, which results in the first sample being (in effect) empty. + # This should raise a ValueError. + assert_raises(ValueError, stats.median_test, + [1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore") + + def test_empty_contingency_row(self): + # The grand median is 1, and with the default ties="below", all the + # values in the samples are counted as being below the grand median. + # This would result a row of zeros in the contingency table, which is + # an error. + assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1]) + + # With ties="above", all the values are counted as above the + # grand median. + assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1], + ties="above") + + def test_bad_ties(self): + assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], + ties="foo") + + def test_bad_nan_policy(self): + assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], + nan_policy='foobar') + + def test_bad_keyword(self): + assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], + foo="foo") + + def test_simple(self): + x = [1, 2, 3] + y = [1, 2, 3] + stat, p, med, tbl = stats.median_test(x, y) + + # The median is floating point, but this equality test should be safe. + assert_equal(med, 2.0) + + assert_array_equal(tbl, [[1, 1], [2, 2]]) + + # The expected values of the contingency table equal the contingency + # table, so the statistic should be 0 and the p-value should be 1. + assert_equal(stat, 0) + assert_equal(p, 1) + + def test_ties_options(self): + # Test the contingency table calculation. + x = [1, 2, 3, 4] + y = [5, 6] + z = [7, 8, 9] + # grand median is 5. + + # Default 'ties' option is "below". + stat, p, m, tbl = stats.median_test(x, y, z) + assert_equal(m, 5) + assert_equal(tbl, [[0, 1, 3], [4, 1, 0]]) + + stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore") + assert_equal(m, 5) + assert_equal(tbl, [[0, 1, 3], [4, 0, 0]]) + + stat, p, m, tbl = stats.median_test(x, y, z, ties="above") + assert_equal(m, 5) + assert_equal(tbl, [[0, 2, 3], [4, 0, 0]]) + + def test_nan_policy_options(self): + x = [1, 2, np.nan] + y = [4, 5, 6] + mt1 = stats.median_test(x, y, nan_policy='propagate') + s, p, m, t = stats.median_test(x, y, nan_policy='omit') + + assert_equal(mt1, (np.nan, np.nan, np.nan, None)) + assert_allclose(s, 0.31250000000000006) + assert_allclose(p, 0.57615012203057869) + assert_equal(m, 4.0) + assert_equal(t, np.array([[0, 2], [2, 1]])) + assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise') + + def test_basic(self): + # median_test calls chi2_contingency to compute the test statistic + # and p-value. Make sure it hasn't screwed up the call... + + x = [1, 2, 3, 4, 5] + y = [2, 4, 6, 8] + + stat, p, m, tbl = stats.median_test(x, y) + assert_equal(m, 4) + assert_equal(tbl, [[1, 2], [4, 2]]) + + exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl) + assert_allclose(stat, exp_stat) + assert_allclose(p, exp_p) + + stat, p, m, tbl = stats.median_test(x, y, lambda_=0) + assert_equal(m, 4) + assert_equal(tbl, [[1, 2], [4, 2]]) + + exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0) + assert_allclose(stat, exp_stat) + assert_allclose(p, exp_p) + + stat, p, m, tbl = stats.median_test(x, y, correction=False) + assert_equal(m, 4) + assert_equal(tbl, [[1, 2], [4, 2]]) + + exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False) + assert_allclose(stat, exp_stat) + assert_allclose(p, exp_p) + + @pytest.mark.parametrize("correction", [False, True]) + def test_result(self, correction): + x = [1, 2, 3] + y = [1, 2, 3] + + res = stats.median_test(x, y, correction=correction) + assert_equal((res.statistic, res.pvalue, res.median, res.table), res) + + +class TestDirectionalStats: + # Reference implementations are not available + def test_directional_stats_correctness(self): + # Data from Fisher: Dispersion on a sphere, 1953 and + # Mardia and Jupp, Directional Statistics. + + decl = -np.deg2rad(np.array([343.2, 62., 36.9, 27., 359., + 5.7, 50.4, 357.6, 44.])) + incl = -np.deg2rad(np.array([66.1, 68.7, 70.1, 82.1, 79.5, + 73., 69.3, 58.8, 51.4])) + data = np.stack((np.cos(incl) * np.cos(decl), + np.cos(incl) * np.sin(decl), + np.sin(incl)), + axis=1) + + dirstats = stats.directional_stats(data) + directional_mean = dirstats.mean_direction + mean_rounded = np.round(directional_mean, 4) + + reference_mean = np.array([0.2984, -0.1346, -0.9449]) + assert_allclose(mean_rounded, reference_mean) + + @pytest.mark.parametrize('angles, ref', [ + ([-np.pi/2, np.pi/2], 1.), + ([0, 2*np.pi], 0.) + ]) + def test_directional_stats_2d_special_cases(self, angles, ref): + if callable(ref): + ref = ref(angles) + data = np.stack([np.cos(angles), np.sin(angles)], axis=1) + res = 1 - stats.directional_stats(data).mean_resultant_length + assert_allclose(res, ref) + + def test_directional_stats_2d(self): + # Test that for circular data directional_stats + # yields the same result as circmean/circvar + rng = np.random.default_rng(0xec9a6899d5a2830e0d1af479dbe1fd0c) + testdata = 2 * np.pi * rng.random((1000, )) + testdata_vector = np.stack((np.cos(testdata), + np.sin(testdata)), + axis=1) + dirstats = stats.directional_stats(testdata_vector) + directional_mean = dirstats.mean_direction + directional_mean_angle = np.arctan2(directional_mean[1], + directional_mean[0]) + directional_mean_angle = directional_mean_angle % (2*np.pi) + circmean = stats.circmean(testdata) + assert_allclose(circmean, directional_mean_angle) + + directional_var = 1 - dirstats.mean_resultant_length + circular_var = stats.circvar(testdata) + assert_allclose(directional_var, circular_var) + + def test_directional_mean_higher_dim(self): + # test that directional_stats works for higher dimensions + # here a 4D array is reduced over axis = 2 + data = np.array([[0.8660254, 0.5, 0.], + [0.8660254, -0.5, 0.]]) + full_array = np.tile(data, (2, 2, 2, 1)) + expected = np.array([[[1., 0., 0.], + [1., 0., 0.]], + [[1., 0., 0.], + [1., 0., 0.]]]) + dirstats = stats.directional_stats(full_array, axis=2) + assert_allclose(expected, dirstats.mean_direction) + + def test_directional_stats_list_ndarray_input(self): + # test that list and numpy array inputs yield same results + data = [[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0]] + data_array = np.asarray(data) + res = stats.directional_stats(data) + ref = stats.directional_stats(data_array) + assert_allclose(res.mean_direction, ref.mean_direction) + assert_allclose(res.mean_resultant_length, + res.mean_resultant_length) + + def test_directional_stats_1d_error(self): + # test that one-dimensional data raises ValueError + data = np.ones((5, )) + message = (r"samples must at least be two-dimensional. " + r"Instead samples has shape: (5,)") + with pytest.raises(ValueError, match=re.escape(message)): + stats.directional_stats(data) + + def test_directional_stats_normalize(self): + # test that directional stats calculations yield same results + # for unnormalized input with normalize=True and normalized + # input with normalize=False + data = np.array([[0.8660254, 0.5, 0.], + [1.7320508, -1., 0.]]) + res = stats.directional_stats(data, normalize=True) + normalized_data = data / np.linalg.norm(data, axis=-1, + keepdims=True) + ref = stats.directional_stats(normalized_data, + normalize=False) + assert_allclose(res.mean_direction, ref.mean_direction) + assert_allclose(res.mean_resultant_length, + ref.mean_resultant_length) + + +class TestFDRControl: + def test_input_validation(self): + message = "`ps` must include only numbers between 0 and 1" + with pytest.raises(ValueError, match=message): + stats.false_discovery_control([-1, 0.5, 0.7]) + with pytest.raises(ValueError, match=message): + stats.false_discovery_control([0.5, 0.7, 2]) + with pytest.raises(ValueError, match=message): + stats.false_discovery_control([0.5, 0.7, np.nan]) + + message = "Unrecognized `method` 'YAK'" + with pytest.raises(ValueError, match=message): + stats.false_discovery_control([0.5, 0.7, 0.9], method='YAK') + + message = "`axis` must be an integer or `None`" + with pytest.raises(ValueError, match=message): + stats.false_discovery_control([0.5, 0.7, 0.9], axis=1.5) + with pytest.raises(ValueError, match=message): + stats.false_discovery_control([0.5, 0.7, 0.9], axis=(1, 2)) + + def test_against_TileStats(self): + # See reference [3] of false_discovery_control + ps = [0.005, 0.009, 0.019, 0.022, 0.051, 0.101, 0.361, 0.387] + res = stats.false_discovery_control(ps) + ref = [0.036, 0.036, 0.044, 0.044, 0.082, 0.135, 0.387, 0.387] + assert_allclose(res, ref, atol=1e-3) + + @pytest.mark.parametrize("case", + [([0.24617028, 0.01140030, 0.05652047, 0.06841983, + 0.07989886, 0.01841490, 0.17540784, 0.06841983, + 0.06841983, 0.25464082], 'bh'), + ([0.72102493, 0.03339112, 0.16554665, 0.20039952, + 0.23402122, 0.05393666, 0.51376399, 0.20039952, + 0.20039952, 0.74583488], 'by')]) + def test_against_R(self, case): + # Test against p.adjust, e.g. + # p = c(0.22155325, 0.00114003,..., 0.0364813 , 0.25464082) + # p.adjust(p, "BY") + ref, method = case + rng = np.random.default_rng(6134137338861652935) + ps = stats.loguniform.rvs(1e-3, 0.5, size=10, random_state=rng) + ps[3] = ps[7] # force a tie + res = stats.false_discovery_control(ps, method=method) + assert_allclose(res, ref, atol=1e-6) + + def test_axis_None(self): + rng = np.random.default_rng(6134137338861652935) + ps = stats.loguniform.rvs(1e-3, 0.5, size=(3, 4, 5), random_state=rng) + res = stats.false_discovery_control(ps, axis=None) + ref = stats.false_discovery_control(ps.ravel()) + assert_equal(res, ref) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_axis(self, axis): + rng = np.random.default_rng(6134137338861652935) + ps = stats.loguniform.rvs(1e-3, 0.5, size=(3, 4, 5), random_state=rng) + res = stats.false_discovery_control(ps, axis=axis) + ref = np.apply_along_axis(stats.false_discovery_control, axis, ps) + assert_equal(res, ref) + + def test_edge_cases(self): + assert_array_equal(stats.false_discovery_control([0.25]), [0.25]) + assert_array_equal(stats.false_discovery_control(0.25), 0.25) + assert_array_equal(stats.false_discovery_control([]), []) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_basic.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..5250e1e3ca2497bcf376a709aa965c9a6a6628b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_basic.py @@ -0,0 +1,2053 @@ +""" +Tests for the stats.mstats module (support for masked arrays) +""" +import warnings +import platform + +import numpy as np +from numpy import nan +import numpy.ma as ma +from numpy.ma import masked, nomask + +import scipy.stats.mstats as mstats +from scipy import stats +from .common_tests import check_named_results +import pytest +from pytest import raises as assert_raises +from numpy.ma.testutils import (assert_equal, assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, assert_, + assert_allclose, assert_array_equal) +from numpy.testing import suppress_warnings +from scipy.stats import _mstats_basic + + +class TestMquantiles: + def test_mquantiles_limit_keyword(self): + # Regression test for Trac ticket #867 + data = np.array([[6., 7., 1.], + [47., 15., 2.], + [49., 36., 3.], + [15., 39., 4.], + [42., 40., -999.], + [41., 41., -999.], + [7., -999., -999.], + [39., -999., -999.], + [43., -999., -999.], + [40., -999., -999.], + [36., -999., -999.]]) + desired = [[19.2, 14.6, 1.45], + [40.0, 37.5, 2.5], + [42.8, 40.05, 3.55]] + quants = mstats.mquantiles(data, axis=0, limit=(0, 50)) + assert_almost_equal(quants, desired) + + +def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7): + # Note this doesn't test when axis is not specified + x = mstats.gmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7): + x = stats.hmean(array_like, axis=axis, dtype=dtype) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +class TestGeoMean: + def test_1d(self): + a = [1, 2, 3, 4] + desired = np.power(1*2*3*4, 1./4.) + check_equal_gmean(a, desired, rtol=1e-14) + + def test_1d_ma(self): + # Test a 1d masked array + a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + desired = 45.2872868812 + check_equal_gmean(a, desired) + + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) + desired = np.power(1*2*3, 1./3.) + check_equal_gmean(a, desired, rtol=1e-14) + + def test_1d_ma_value(self): + # Test a 1d masked array with a masked value + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], + mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) + desired = 41.4716627439 + check_equal_gmean(a, desired) + + def test_1d_ma0(self): + # Test a 1d masked array with zero element + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) + desired = 0 + check_equal_gmean(a, desired) + + def test_1d_ma_inf(self): + # Test a 1d masked array with negative element + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) + desired = np.nan + with np.errstate(invalid='ignore'): + check_equal_gmean(a, desired) + + @pytest.mark.skipif(not hasattr(np, 'float96'), + reason='cannot find float96 so skipping') + def test_1d_float96(self): + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) + desired_dt = np.power(1*2*3, 1./3.).astype(np.float96) + check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14) + + def test_2d_ma(self): + a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) + desired = np.array([1, 2, 3, 4]) + check_equal_gmean(a, desired, axis=0, rtol=1e-14) + + desired = ma.array([np.power(1*2*3*4, 1./4.), + np.power(2*3, 1./2.), + np.power(1*4, 1./2.)]) + check_equal_gmean(a, desired, axis=-1, rtol=1e-14) + + # Test a 2d masked array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = 52.8885199 + check_equal_gmean(np.ma.array(a), desired) + + +class TestHarMean: + def test_1d(self): + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) + desired = 3. / (1./1 + 1./2 + 1./3) + check_equal_hmean(a, desired, rtol=1e-14) + + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + desired = 34.1417152147 + check_equal_hmean(a, desired) + + a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], + mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) + desired = 31.8137186141 + check_equal_hmean(a, desired) + + @pytest.mark.skipif(not hasattr(np, 'float96'), + reason='cannot find float96 so skipping') + def test_1d_float96(self): + a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1]) + desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96) + check_equal_hmean(a, desired_dt, dtype=np.float96) + + def test_2d(self): + a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]]) + desired = ma.array([1, 2, 3, 4]) + check_equal_hmean(a, desired, axis=0, rtol=1e-14) + + desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)] + check_equal_hmean(a, desired, axis=-1, rtol=1e-14) + + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = 38.6696271841 + check_equal_hmean(np.ma.array(a), desired) + + +class TestRanking: + def test_ranking(self): + x = ma.array([0,1,1,1,2,3,4,5,5,6,]) + assert_almost_equal(mstats.rankdata(x), + [1,3,3,3,5,6,7,8.5,8.5,10]) + x[[3,4]] = masked + assert_almost_equal(mstats.rankdata(x), + [1,2.5,2.5,0,0,4,5,6.5,6.5,8]) + assert_almost_equal(mstats.rankdata(x, use_missing=True), + [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8]) + x = ma.array([0,1,5,1,2,4,3,5,1,6,]) + assert_almost_equal(mstats.rankdata(x), + [1,3,8.5,3,5,7,6,8.5,3,10]) + x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]]) + assert_almost_equal(mstats.rankdata(x), + [[1,3,3,3,5], [6,7,8.5,8.5,10]]) + assert_almost_equal(mstats.rankdata(x, axis=1), + [[1,3,3,3,5], [1,2,3.5,3.5,5]]) + assert_almost_equal(mstats.rankdata(x,axis=0), + [[1,1,1,1,1], [2,2,2,2,2,]]) + + +class TestCorr: + def test_pearsonr(self): + # Tests some computations of Pearson's r + x = ma.arange(10) + with warnings.catch_warnings(): + # The tests in this context are edge cases, with perfect + # correlation or anticorrelation, or totally masked data. + # None of these should trigger a RuntimeWarning. + warnings.simplefilter("error", RuntimeWarning) + + assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0) + assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0) + + x = ma.array(x, mask=True) + pr = mstats.pearsonr(x, x) + assert_(pr[0] is masked) + assert_(pr[1] is masked) + + x1 = ma.array([-1.0, 0.0, 1.0]) + y1 = ma.array([0, 0, 3]) + r, p = mstats.pearsonr(x1, y1) + assert_almost_equal(r, np.sqrt(3)/2) + assert_almost_equal(p, 1.0/3) + + # (x2, y2) have the same unmasked data as (x1, y1). + mask = [False, False, False, True] + x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask) + y2 = ma.array([0, 0, 3, -1], mask=mask) + r, p = mstats.pearsonr(x2, y2) + assert_almost_equal(r, np.sqrt(3)/2) + assert_almost_equal(p, 1.0/3) + + def test_pearsonr_misaligned_mask(self): + mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0]) + my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0]) + x = np.array([1, 4, 5, 6]) + y = np.array([9, 6, 5, 9]) + mr, mp = mstats.pearsonr(mx, my) + r, p = stats.pearsonr(x, y) + assert_equal(mr, r) + assert_equal(mp, p) + + def test_spearmanr(self): + # Tests some computations of Spearman's rho + (x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95]) + assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) + (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan]) + (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) + assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) + + x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, + 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7] + y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, + 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4] + assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) + x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, + 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan] + y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, + 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan] + (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) + assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) + # Next test is to make sure calculation uses sufficient precision. + # The denominator's value is ~n^3 and used to be represented as an + # int. 2000**3 > 2**32 so these arrays would cause overflow on + # some machines. + x = list(range(2000)) + y = list(range(2000)) + y[0], y[9] = y[9], y[0] + y[10], y[434] = y[434], y[10] + y[435], y[1509] = y[1509], y[435] + # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1)) + # = 1 - (1 / 500) + # = 0.998 + assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998) + + # test for namedtuple attributes + res = mstats.spearmanr(x, y) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_spearmanr_alternative(self): + # check against R + # options(digits=16) + # cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, + # 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7), + # c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, + # 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4), + # alternative='two.sided', method='spearman') + x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, + 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7] + y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, + 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4] + + r_exp = 0.6887298747763864 # from cor.test + + r, p = mstats.spearmanr(x, y) + assert_allclose(r, r_exp) + assert_allclose(p, 0.004519192910756) + + r, p = mstats.spearmanr(x, y, alternative='greater') + assert_allclose(r, r_exp) + assert_allclose(p, 0.002259596455378) + + r, p = mstats.spearmanr(x, y, alternative='less') + assert_allclose(r, r_exp) + assert_allclose(p, 0.9977404035446) + + # intuitive test (with obvious positive correlation) + n = 100 + x = np.linspace(0, 5, n) + y = 0.1*x + np.random.rand(n) # y is positively correlated w/ x + + stat1, p1 = mstats.spearmanr(x, y) + + stat2, p2 = mstats.spearmanr(x, y, alternative="greater") + assert_allclose(p2, p1 / 2) # positive correlation -> small p + + stat3, p3 = mstats.spearmanr(x, y, alternative="less") + assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p + + assert stat1 == stat2 == stat3 + + with pytest.raises(ValueError, match="alternative must be 'less'..."): + mstats.spearmanr(x, y, alternative="ekki-ekki") + + @pytest.mark.skipif(platform.machine() == 'ppc64le', + reason="fails/crashes on ppc64le") + def test_kendalltau(self): + # check case with maximum disorder and p=1 + x = ma.array(np.array([9, 2, 5, 6])) + y = ma.array(np.array([4, 7, 9, 11])) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [0.0, 1.0] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # simple case without ties + x = ma.array(np.arange(10)) + y = ma.array(np.arange(10)) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [1.0, 5.511463844797e-07] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # check exception in case of invalid method keyword + assert_raises(ValueError, mstats.kendalltau, x, y, method='banana') + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [0.9555555555555556, 5.511463844797e-06] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [0.9111111111111111, 2.976190476190e-05] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # same in opposite direction + x = ma.array(np.arange(10)) + y = ma.array(np.arange(10)[::-1]) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [-1.0, 5.511463844797e-07] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [-0.9555555555555556, 5.511463844797e-06] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = [-0.9111111111111111, 2.976190476190e-05] + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected) + + # Tests some computations of Kendall's tau + x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan]) + y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan]) + z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan]) + assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), + [+0.3333333, 0.75]) + assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')), + [+0.3333333, 0.4969059]) + assert_almost_equal(np.asarray(mstats.kendalltau(x, z)), + [-0.5477226, 0.2785987]) + # + x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20, + 10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan]) + y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27, + 25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0]) + result = mstats.kendalltau(x, y) + assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009]) + + # test for namedtuple attributes + attributes = ('correlation', 'pvalue') + check_named_results(result, attributes, ma=True) + + @pytest.mark.skipif(platform.machine() == 'ppc64le', + reason="fails/crashes on ppc64le") + @pytest.mark.slow + def test_kendalltau_large(self): + # make sure internal variable use correct precision with + # larger arrays + x = np.arange(2000, dtype=float) + x = ma.masked_greater(x, 1995) + y = np.arange(2000, dtype=float) + y = np.concatenate((y[1000:], y[:1000])) + assert_(np.isfinite(mstats.kendalltau(x, y)[1])) + + def test_kendalltau_seasonal(self): + # Tests the seasonal Kendall tau. + x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], + [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], + [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan], + [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]] + x = ma.fix_invalid(x).T + output = mstats.kendalltau_seasonal(x) + assert_almost_equal(output['global p-value (indep)'], 0.008, 3) + assert_almost_equal(output['seasonal p-value'].round(2), + [0.18,0.53,0.20,0.04]) + + @pytest.mark.parametrize("method", ("exact", "asymptotic")) + @pytest.mark.parametrize("alternative", ("two-sided", "greater", "less")) + def test_kendalltau_mstats_vs_stats(self, method, alternative): + # Test that mstats.kendalltau and stats.kendalltau with + # nan_policy='omit' matches behavior of stats.kendalltau + # Accuracy of the alternatives is tested in stats/tests/test_stats.py + + np.random.seed(0) + n = 50 + x = np.random.rand(n) + y = np.random.rand(n) + mask = np.random.rand(n) > 0.5 + + x_masked = ma.array(x, mask=mask) + y_masked = ma.array(y, mask=mask) + res_masked = mstats.kendalltau( + x_masked, y_masked, method=method, alternative=alternative) + + x_compressed = x_masked.compressed() + y_compressed = y_masked.compressed() + res_compressed = stats.kendalltau( + x_compressed, y_compressed, method=method, alternative=alternative) + + x[mask] = np.nan + y[mask] = np.nan + res_nan = stats.kendalltau( + x, y, method=method, nan_policy='omit', alternative=alternative) + + assert_allclose(res_masked, res_compressed) + assert_allclose(res_nan, res_compressed) + + def test_kendall_p_exact_medium(self): + # Test for the exact method with medium samples (some n >= 171) + # expected values generated using SymPy + expectations = {(100, 2393): 0.62822615287956040664, + (101, 2436): 0.60439525773513602669, + (170, 0): 2.755801935583541e-307, + (171, 0): 0.0, + (171, 1): 2.755801935583541e-307, + (172, 1): 0.0, + (200, 9797): 0.74753983745929675209, + (201, 9656): 0.40959218958120363618} + for nc, expected in expectations.items(): + res = _mstats_basic._kendall_p_exact(nc[0], nc[1]) + assert_almost_equal(res, expected) + + @pytest.mark.xslow + def test_kendall_p_exact_large(self): + # Test for the exact method with large samples (n >= 171) + # expected values generated using SymPy + expectations = {(400, 38965): 0.48444283672113314099, + (401, 39516): 0.66363159823474837662, + (800, 156772): 0.42265448483120932055, + (801, 157849): 0.53437553412194416236, + (1600, 637472): 0.84200727400323538419, + (1601, 630304): 0.34465255088058593946} + + for nc, expected in expectations.items(): + res = _mstats_basic._kendall_p_exact(nc[0], nc[1]) + assert_almost_equal(res, expected) + + def test_pointbiserial(self): + x = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, -1] + y = [14.8, 13.8, 12.4, 10.1, 7.1, 6.1, 5.8, 4.6, 4.3, 3.5, 3.3, 3.2, + 3.0, 2.8, 2.8, 2.5, 2.4, 2.3, 2.1, 1.7, 1.7, 1.5, 1.3, 1.3, 1.2, + 1.2, 1.1, 0.8, 0.7, 0.6, 0.5, 0.2, 0.2, 0.1, np.nan] + assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5) + + # test for namedtuple attributes + res = mstats.pointbiserialr(x, y) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes, ma=True) + + +class TestTrimming: + + def test_trim(self): + a = ma.arange(10) + assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9]) + a = ma.arange(10) + assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None]) + a = ma.arange(10) + assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)), + [None,None,None,3,4,5,6,7,None,None]) + a = ma.arange(10) + assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True), + [None,1,2,3,4,5,6,7,None,None]) + + a = ma.arange(12) + a[[0,-1]] = a[5] = masked + assert_equal(mstats.trim(a, (2,8)), + [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None]) + + x = ma.arange(100).reshape(10, 10) + expected = [1]*10 + [0]*70 + [1]*20 + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1) + assert_equal(trimx._mask.T.ravel(), expected) + + # same as above, but with an extra masked row inserted + x = ma.arange(110).reshape(11, 10) + x[1] = masked + expected = [1]*20 + [0]*70 + [1]*20 + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0) + assert_equal(trimx._mask.ravel(), expected) + trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1) + assert_equal(trimx.T._mask.ravel(), expected) + + def test_trim_old(self): + x = ma.arange(100) + assert_equal(mstats.trimboth(x).count(), 60) + assert_equal(mstats.trimtail(x,tail='r').count(), 80) + x[50:70] = masked + trimx = mstats.trimboth(x) + assert_equal(trimx.count(), 48) + assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16) + x._mask = nomask + x.shape = (10,10) + assert_equal(mstats.trimboth(x).count(), 60) + assert_equal(mstats.trimtail(x).count(), 80) + + def test_trimr(self): + x = ma.arange(10) + result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False)) + expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1]) + assert_equal(result, expected) + assert_equal(result.mask, expected.mask) + + def test_trimmedmean(self): + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0) + assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0) + assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0) + + def test_trimmedvar(self): + # Basic test. Additional tests of all arguments, edge cases, + # input validation, and proper treatment of masked arrays are needed. + rng = np.random.default_rng(3262323289434724460) + data_orig = rng.random(size=20) + data = np.sort(data_orig) + data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) + assert_allclose(mstats.trimmed_var(data_orig, 0.1), data.var()) + + def test_trimmedstd(self): + # Basic test. Additional tests of all arguments, edge cases, + # input validation, and proper treatment of masked arrays are needed. + rng = np.random.default_rng(7121029245207162780) + data_orig = rng.random(size=20) + data = np.sort(data_orig) + data = ma.array(data, mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) + assert_allclose(mstats.trimmed_std(data_orig, 0.1), data.std()) + + def test_trimmed_stde(self): + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5) + assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5) + + def test_winsorization(self): + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1), + 21551.4, 1) + assert_almost_equal( + mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1), + 11887.3, 1) + data[5] = masked + winsorized = mstats.winsorize(data) + assert_equal(winsorized.mask, data.mask) + + def test_winsorization_nan(self): + data = ma.array([np.nan, np.nan, 0, 1, 2]) + assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05), + nan_policy='raise') + # Testing propagate (default behavior) + assert_equal(mstats.winsorize(data, (0.4, 0.4)), + ma.array([2, 2, 2, 2, 2])) + assert_equal(mstats.winsorize(data, (0.8, 0.8)), + ma.array([np.nan, np.nan, np.nan, np.nan, np.nan])) + assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'), + ma.array([np.nan, np.nan, 2, 2, 2])) + assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'), + ma.array([np.nan, np.nan, 2, 2, 2])) + + +class TestMoments: + # Comparison numbers are found using R v.1.5.1 + # note that length(testcase) = 4 + # testmathworks comes from documentation for the + # Statistics Toolbox for Matlab and can be found at both + # https://www.mathworks.com/help/stats/kurtosis.html + # https://www.mathworks.com/help/stats/skewness.html + # Note that both test cases came from here. + testcase = [1,2,3,4] + testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965, + np.nan]) + testcase_2d = ma.array( + np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149], + [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407], + [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733], + [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998], + [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]), + mask=np.array([[True, False, False, True, False], + [True, True, True, False, True], + [False, False, False, False, False], + [True, True, True, True, True], + [False, False, True, False, False]], dtype=bool)) + + def _assert_equal(self, actual, expect, *, shape=None, dtype=None): + expect = np.asarray(expect) + if shape is not None: + expect = np.broadcast_to(expect, shape) + assert_array_equal(actual, expect) + if dtype is None: + dtype = expect.dtype + assert actual.dtype == dtype + + def test_moment(self): + y = mstats.moment(self.testcase,1) + assert_almost_equal(y,0.0,10) + y = mstats.moment(self.testcase,2) + assert_almost_equal(y,1.25) + y = mstats.moment(self.testcase,3) + assert_almost_equal(y,0.0) + y = mstats.moment(self.testcase,4) + assert_almost_equal(y,2.5625) + + # check array_like input for moment + y = mstats.moment(self.testcase, [1, 2, 3, 4]) + assert_allclose(y, [0, 1.25, 0, 2.5625]) + + # check moment input consists only of integers + y = mstats.moment(self.testcase, 0.0) + assert_allclose(y, 1.0) + assert_raises(ValueError, mstats.moment, self.testcase, 1.2) + y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0]) + assert_allclose(y, [0, 1.25, 0, 2.5625]) + + # test empty input + y = mstats.moment([]) + self._assert_equal(y, np.nan, dtype=np.float64) + y = mstats.moment(np.array([], dtype=np.float32)) + self._assert_equal(y, np.nan, dtype=np.float32) + y = mstats.moment(np.zeros((1, 0)), axis=0) + self._assert_equal(y, [], shape=(0,), dtype=np.float64) + y = mstats.moment([[]], axis=1) + self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64) + y = mstats.moment([[]], moment=[0, 1], axis=0) + self._assert_equal(y, [], shape=(2, 0)) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored + + def test_variation(self): + y = mstats.variation(self.testcase) + assert_almost_equal(y,0.44721359549996, 10) + + def test_variation_ddof(self): + # test variation with delta degrees of freedom + # regression test for gh-13341 + a = np.array([1, 2, 3, 4, 5]) + y = mstats.variation(a, ddof=1) + assert_almost_equal(y, 0.5270462766947299) + + def test_skewness(self): + y = mstats.skew(self.testmathworks) + assert_almost_equal(y,-0.29322304336607,10) + y = mstats.skew(self.testmathworks,bias=0) + assert_almost_equal(y,-0.437111105023940,10) + y = mstats.skew(self.testcase) + assert_almost_equal(y,0.0,10) + + # test that skew works on multidimensional masked arrays + correct_2d = ma.array( + np.array([0.6882870394455785, 0, 0.2665647526856708, + 0, -0.05211472114254485]), + mask=np.array([False, False, False, True, False], dtype=bool) + ) + assert_allclose(mstats.skew(self.testcase_2d, 1), correct_2d) + for i, row in enumerate(self.testcase_2d): + assert_almost_equal(mstats.skew(row), correct_2d[i]) + + correct_2d_bias_corrected = ma.array( + np.array([1.685952043212545, 0.0, 0.3973712716070531, 0, + -0.09026534484117164]), + mask=np.array([False, False, False, True, False], dtype=bool) + ) + assert_allclose(mstats.skew(self.testcase_2d, 1, bias=False), + correct_2d_bias_corrected) + for i, row in enumerate(self.testcase_2d): + assert_almost_equal(mstats.skew(row, bias=False), + correct_2d_bias_corrected[i]) + + # Check consistency between stats and mstats implementations + assert_allclose(mstats.skew(self.testcase_2d[2, :]), + stats.skew(self.testcase_2d[2, :])) + + def test_kurtosis(self): + # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis + # for compatibility with Matlab) + y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1) + assert_almost_equal(y, 2.1658856802973, 10) + # Note that MATLAB has confusing docs for the following case + # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness + # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) + # The MATLAB docs imply that both should give Fisher's + y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0) + assert_almost_equal(y, 3.663542721189047, 10) + y = mstats.kurtosis(self.testcase, 0, 0) + assert_almost_equal(y, 1.64) + + # test that kurtosis works on multidimensional masked arrays + correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0., + -1.26979517952]), + mask=np.array([False, False, False, True, + False], dtype=bool)) + assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1), + correct_2d) + for i, row in enumerate(self.testcase_2d): + assert_almost_equal(mstats.kurtosis(row), correct_2d[i]) + + correct_2d_bias_corrected = ma.array( + np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]), + mask=np.array([False, False, False, True, False], dtype=bool)) + assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1, + bias=False), + correct_2d_bias_corrected) + for i, row in enumerate(self.testcase_2d): + assert_almost_equal(mstats.kurtosis(row, bias=False), + correct_2d_bias_corrected[i]) + + # Check consistency between stats and mstats implementations + assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]), + stats.kurtosis(self.testcase_2d[2, :]), + nulp=4) + + +class TestMode: + def test_mode(self): + a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7] + a2 = np.reshape(a1, (3,5)) + a3 = np.array([1,2,3,4,5,6]) + a4 = np.reshape(a3, (3,2)) + ma1 = ma.masked_where(ma.array(a1) > 2, a1) + ma2 = ma.masked_where(a2 > 2, a2) + ma3 = ma.masked_where(a3 < 2, a3) + ma4 = ma.masked_where(ma.array(a4) < 2, a4) + assert_equal(mstats.mode(a1, axis=None), (3,4)) + assert_equal(mstats.mode(a1, axis=0), (3,4)) + assert_equal(mstats.mode(ma1, axis=None), (0,3)) + assert_equal(mstats.mode(a2, axis=None), (3,4)) + assert_equal(mstats.mode(ma2, axis=None), (0,3)) + assert_equal(mstats.mode(a3, axis=None), (1,1)) + assert_equal(mstats.mode(ma3, axis=None), (2,1)) + assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]])) + assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]])) + assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]])) + assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]])) + assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]])) + assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]])) + + a1_res = mstats.mode(a1, axis=None) + + # test for namedtuple attributes + attributes = ('mode', 'count') + check_named_results(a1_res, attributes, ma=True) + + def test_mode_modifies_input(self): + # regression test for gh-6428: mode(..., axis=None) may not modify + # the input array + im = np.zeros((100, 100)) + im[:50, :] += 1 + im[:, :50] += 1 + cp = im.copy() + mstats.mode(im, None) + assert_equal(im, cp) + + +class TestPercentile: + def setup_method(self): + self.a1 = [3, 4, 5, 10, -3, -5, 6] + self.a2 = [3, -6, -2, 8, 7, 4, 2, 1] + self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0] + + def test_percentile(self): + x = np.arange(8) * 0.5 + assert_equal(mstats.scoreatpercentile(x, 0), 0.) + assert_equal(mstats.scoreatpercentile(x, 100), 3.5) + assert_equal(mstats.scoreatpercentile(x, 50), 1.75) + + def test_2D(self): + x = ma.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1]) + + +class TestVariability: + """ Comparison numbers are found using R v.1.5.1 + note that length(testcase) = 4 + """ + testcase = ma.fix_invalid([1,2,3,4,np.nan]) + + def test_sem(self): + # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3) + y = mstats.sem(self.testcase) + assert_almost_equal(y, 0.6454972244) + n = self.testcase.count() + assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)), + mstats.sem(self.testcase, ddof=2)) + + def test_zmap(self): + # This is not in R, so tested by using: + # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4) + y = mstats.zmap(self.testcase, self.testcase) + desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996, + 0.44721359549996, 1.3416407864999]) + assert_array_almost_equal(desired_unmaskedvals, + y.data[y.mask == False], decimal=12) # noqa: E712 + + def test_zscore(self): + # This is not in R, so tested by using: + # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4) + y = mstats.zscore(self.testcase) + desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996, + 0.44721359549996, 1.3416407864999, np.nan]) + assert_almost_equal(desired, y, decimal=12) + + +class TestMisc: + + def test_obrientransform(self): + args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2, + [6]+[7]*2+[8]*4+[9]*9+[10]*16] + result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538], + [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]] + assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4), + result, 4) + + def test_ks_2samp(self): + x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], + [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], + [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan], + [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]] + x = ma.fix_invalid(x).T + (winter, spring, summer, fall) = x.T + + assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4), + (0.1818, 0.9628)) + assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4), + (0.1469, 0.6886)) + assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4), + (0.1818, 0.6011)) + + def test_friedmanchisq(self): + # No missing values + args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0], + [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0], + [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0]) + result = mstats.friedmanchisquare(*args) + assert_almost_equal(result[0], 10.4737, 4) + assert_almost_equal(result[1], 0.005317, 6) + # Missing values + x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], + [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], + [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], + [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] + x = ma.fix_invalid(x) + result = mstats.friedmanchisquare(*x) + assert_almost_equal(result[0], 2.0156, 4) + assert_almost_equal(result[1], 0.5692, 4) + + # test for namedtuple attributes + attributes = ('statistic', 'pvalue') + check_named_results(result, attributes, ma=True) + + +def test_regress_simple(): + # Regress a line with sinusoidal noise. Test for #1273. + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + + result = mstats.linregress(x, y) + + # Result is of a correct class and with correct fields + lr = stats._stats_mstats_common.LinregressResult + assert_(isinstance(result, lr)) + attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') + check_named_results(result, attributes, ma=True) + assert 'intercept_stderr' in dir(result) + + # Slope and intercept are estimated correctly + assert_almost_equal(result.slope, 0.19644990055858422) + assert_almost_equal(result.intercept, 10.211269918932341) + assert_almost_equal(result.stderr, 0.002395781449783862) + assert_almost_equal(result.intercept_stderr, 0.13866936078570702) + + +def test_linregress_identical_x(): + x = np.zeros(10) + y = np.random.random(10) + msg = "Cannot calculate a linear regression if all x values are identical" + with assert_raises(ValueError, match=msg): + mstats.linregress(x, y) + + +class TestTheilslopes: + def test_theilslopes(self): + # Test for basic slope and intercept. + slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1]) + assert_almost_equal(slope, 0.5) + assert_almost_equal(intercept, 0.5) + + slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1], + method='joint') + assert_almost_equal(slope, 0.5) + assert_almost_equal(intercept, 0.0) + + # Test for correct masking. + y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False]) + slope, intercept, lower, upper = mstats.theilslopes(y) + assert_almost_equal(slope, 1./3) + assert_almost_equal(intercept, 2./3) + + slope, intercept, lower, upper = mstats.theilslopes(y, + method='joint') + assert_almost_equal(slope, 1./3) + assert_almost_equal(intercept, 0.0) + + # Test of confidence intervals from example in Sen (1968). + x = [1, 2, 3, 4, 10, 12, 18] + y = [9, 15, 19, 20, 45, 55, 78] + slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07) + assert_almost_equal(slope, 4) + assert_almost_equal(intercept, 4.0) + assert_almost_equal(upper, 4.38, decimal=2) + assert_almost_equal(lower, 3.71, decimal=2) + + slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07, + method='joint') + assert_almost_equal(slope, 4) + assert_almost_equal(intercept, 6.0) + assert_almost_equal(upper, 4.38, decimal=2) + assert_almost_equal(lower, 3.71, decimal=2) + + + def test_theilslopes_warnings(self): + # Test `theilslopes` with degenerate input; see gh-15943 + msg = "All `x` coordinates.*|Mean of empty slice.|invalid value encountered.*" + with pytest.warns(RuntimeWarning, match=msg): + res = mstats.theilslopes([0, 1], [0, 0]) + assert np.all(np.isnan(res)) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered...") + res = mstats.theilslopes([0, 0, 0], [0, 1, 0]) + assert_allclose(res, (0, 0, np.nan, np.nan)) + + + def test_theilslopes_namedtuple_consistency(self): + """ + Simple test to ensure tuple backwards-compatibility of the returned + TheilslopesResult object + """ + y = [1, 2, 4] + x = [4, 6, 8] + slope, intercept, low_slope, high_slope = mstats.theilslopes(y, x) + result = mstats.theilslopes(y, x) + + # note all four returned values are distinct here + assert_equal(slope, result.slope) + assert_equal(intercept, result.intercept) + assert_equal(low_slope, result.low_slope) + assert_equal(high_slope, result.high_slope) + + def test_gh19678_uint8(self): + # `theilslopes` returned unexpected results when `y` was an unsigned type. + # Check that this is resolved. + rng = np.random.default_rng(2549824598234528) + y = rng.integers(0, 255, size=10, dtype=np.uint8) + res = stats.theilslopes(y, y) + np.testing.assert_allclose(res.slope, 1) + + +def test_siegelslopes(): + # method should be exact for straight line + y = 2 * np.arange(10) + 0.5 + assert_equal(mstats.siegelslopes(y), (2.0, 0.5)) + assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5)) + + x = 2 * np.arange(10) + y = 5 * x - 3.0 + assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0)) + assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0)) + + # method is robust to outliers: brekdown point of 50% + y[:4] = 1000 + assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0)) + + # if there are no outliers, results should be comparble to linregress + x = np.arange(10) + y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231) + slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y) + + slope, intercept = mstats.siegelslopes(y, x) + assert_allclose(slope, slope_ols, rtol=0.1) + assert_allclose(intercept, intercept_ols, rtol=0.1) + + slope, intercept = mstats.siegelslopes(y, x, method='separate') + assert_allclose(slope, slope_ols, rtol=0.1) + assert_allclose(intercept, intercept_ols, rtol=0.1) + + +def test_siegelslopes_namedtuple_consistency(): + """ + Simple test to ensure tuple backwards-compatibility of the returned + SiegelslopesResult object. + """ + y = [1, 2, 4] + x = [4, 6, 8] + slope, intercept = mstats.siegelslopes(y, x) + result = mstats.siegelslopes(y, x) + + # note both returned values are distinct here + assert_equal(slope, result.slope) + assert_equal(intercept, result.intercept) + + +def test_sen_seasonal_slopes(): + rng = np.random.default_rng(5765986256978575148) + x = rng.random(size=(100, 4)) + intra_slope, inter_slope = mstats.sen_seasonal_slopes(x) + + # reference implementation from the `sen_seasonal_slopes` documentation + def dijk(yi): + n = len(yi) + x = np.arange(n) + dy = yi - yi[:, np.newaxis] + dx = x - x[:, np.newaxis] + mask = np.triu(np.ones((n, n), dtype=bool), k=1) + return dy[mask]/dx[mask] + + for i in range(4): + assert_allclose(np.median(dijk(x[:, i])), intra_slope[i]) + + all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])]) + assert_allclose(np.median(all_slopes), inter_slope) + + +def test_plotting_positions(): + # Regression test for #1256 + pos = mstats.plotting_positions(np.arange(3), 0, 0) + assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75])) + + +class TestNormalitytests: + + def test_vs_nonmasked(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + assert_array_almost_equal(mstats.normaltest(x), + stats.normaltest(x)) + assert_array_almost_equal(mstats.skewtest(x), + stats.skewtest(x)) + assert_array_almost_equal(mstats.kurtosistest(x), + stats.kurtosistest(x)) + + funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest] + mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest] + x = [1, 2, 3, 4] + for func, mfunc in zip(funcs, mfuncs): + assert_raises(ValueError, func, x) + assert_raises(ValueError, mfunc, x) + + def test_axis_None(self): + # Test axis=None (equal to axis=0 for 1-D input) + x = np.array((-2,-1,0,1,2,3)*4)**2 + assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x)) + assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x)) + assert_allclose(mstats.kurtosistest(x, axis=None), + mstats.kurtosistest(x)) + + def test_maskedarray_input(self): + # Add some masked values, test result doesn't change + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + xm = np.ma.array(np.r_[np.inf, x, 10], + mask=np.r_[True, [False] * x.size, True]) + assert_allclose(mstats.normaltest(xm), stats.normaltest(x)) + assert_allclose(mstats.skewtest(xm), stats.skewtest(x)) + assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x)) + + def test_nd_input(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + x_2d = np.vstack([x] * 2).T + for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]: + res_1d = func(x) + res_2d = func(x_2d) + assert_allclose(res_2d[0], [res_1d[0]] * 2) + assert_allclose(res_2d[1], [res_1d[1]] * 2) + + def test_normaltest_result_attributes(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + res = mstats.normaltest(x) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_kurtosistest_result_attributes(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + res = mstats.kurtosistest(x) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_regression_9033(self): + # x clearly non-normal but power of negative denom needs + # to be handled correctly to reject normality + counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167] + x = np.hstack([np.full(c, i) for i, c in enumerate(counts)]) + assert_equal(mstats.kurtosistest(x)[1] < 0.01, True) + + @pytest.mark.parametrize("test", ["skewtest", "kurtosistest"]) + @pytest.mark.parametrize("alternative", ["less", "greater"]) + def test_alternative(self, test, alternative): + x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123) + + stats_test = getattr(stats, test) + mstats_test = getattr(mstats, test) + + z_ex, p_ex = stats_test(x, alternative=alternative) + z, p = mstats_test(x, alternative=alternative) + assert_allclose(z, z_ex, atol=1e-12) + assert_allclose(p, p_ex, atol=1e-12) + + # test with masked arrays + x[1:5] = np.nan + x = np.ma.masked_array(x, mask=np.isnan(x)) + z_ex, p_ex = stats_test(x.compressed(), alternative=alternative) + z, p = mstats_test(x, alternative=alternative) + assert_allclose(z, z_ex, atol=1e-12) + assert_allclose(p, p_ex, atol=1e-12) + + def test_bad_alternative(self): + x = stats.norm.rvs(size=20, random_state=123) + msg = r"`alternative` must be..." + + with pytest.raises(ValueError, match=msg): + mstats.skewtest(x, alternative='error') + + with pytest.raises(ValueError, match=msg): + mstats.kurtosistest(x, alternative='error') + + +class TestFOneway: + def test_result_attributes(self): + a = np.array([655, 788], dtype=np.uint16) + b = np.array([789, 772], dtype=np.uint16) + res = mstats.f_oneway(a, b) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + +class TestMannwhitneyu: + # data from gh-1428 + x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1.]) + + y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., + 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., + 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., + 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., + 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., + 1., 1., 1., 1.]) + + def test_result_attributes(self): + res = mstats.mannwhitneyu(self.x, self.y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_against_stats(self): + # gh-4641 reported that stats.mannwhitneyu returned half the p-value + # of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu + # is now two-sided, so they match. + res1 = mstats.mannwhitneyu(self.x, self.y) + res2 = stats.mannwhitneyu(self.x, self.y) + assert res1.statistic == res2.statistic + assert_allclose(res1.pvalue, res2.pvalue) + + +class TestKruskal: + def test_result_attributes(self): + x = [1, 3, 5, 7, 9] + y = [2, 4, 6, 8, 10] + + res = mstats.kruskal(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + +# TODO: for all ttest functions, add tests with masked array inputs +class TestTtest_rel: + def test_vs_nonmasked(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + # 1-D inputs + res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1]) + res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1]) + assert_allclose(res1, res2) + + # 2-D inputs + res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None) + res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None) + assert_allclose(res1, res2) + res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0) + res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0) + assert_allclose(res1, res2) + + # Check default is axis=0 + res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:]) + assert_allclose(res2, res3) + + def test_fully_masked(self): + np.random.seed(1234567) + outcome = ma.masked_array(np.random.randn(3, 2), + mask=[[1, 1, 1], [0, 0, 0]]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + for pair in [(outcome[:, 0], outcome[:, 1]), + ([np.nan, np.nan], [1.0, 2.0])]: + t, p = mstats.ttest_rel(*pair) + assert_array_equal(t, (np.nan, np.nan)) + assert_array_equal(p, (np.nan, np.nan)) + + def test_result_attributes(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1]) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_invalid_input_size(self): + assert_raises(ValueError, mstats.ttest_rel, + np.arange(10), np.arange(11)) + x = np.arange(24) + assert_raises(ValueError, mstats.ttest_rel, + x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1) + assert_raises(ValueError, mstats.ttest_rel, + x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2) + + def test_empty(self): + res1 = mstats.ttest_rel([], []) + assert_(np.all(np.isnan(res1))) + + def test_zero_division(self): + t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0]) + assert_array_equal(t, np.array([np.nan, np.nan])) + assert_array_equal(p, np.array([np.nan, np.nan])) + + def test_bad_alternative(self): + msg = r"alternative must be 'less', 'greater' or 'two-sided'" + with pytest.raises(ValueError, match=msg): + mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo') + + @pytest.mark.parametrize("alternative", ["less", "greater"]) + def test_alternative(self, alternative): + x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42) + y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42) + + t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative) + t, p = mstats.ttest_rel(x, y, alternative=alternative) + assert_allclose(t, t_ex, rtol=1e-14) + assert_allclose(p, p_ex, rtol=1e-14) + + # test with masked arrays + x[1:10] = np.nan + y[1:10] = np.nan + x = np.ma.masked_array(x, mask=np.isnan(x)) + y = np.ma.masked_array(y, mask=np.isnan(y)) + t, p = mstats.ttest_rel(x, y, alternative=alternative) + t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(), + alternative=alternative) + assert_allclose(t, t_ex, rtol=1e-14) + assert_allclose(p, p_ex, rtol=1e-14) + + +class TestTtest_ind: + def test_vs_nonmasked(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + # 1-D inputs + res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1]) + res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1]) + assert_allclose(res1, res2) + + # 2-D inputs + res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None) + res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None) + assert_allclose(res1, res2) + res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0) + res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0) + assert_allclose(res1, res2) + + # Check default is axis=0 + res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:]) + assert_allclose(res2, res3) + + # Check equal_var + res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True) + res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True) + assert_allclose(res4, res5) + res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False) + res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False) + assert_allclose(res4, res5) + + def test_fully_masked(self): + np.random.seed(1234567) + outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]]) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + for pair in [(outcome[:, 0], outcome[:, 1]), + ([np.nan, np.nan], [1.0, 2.0])]: + t, p = mstats.ttest_ind(*pair) + assert_array_equal(t, (np.nan, np.nan)) + assert_array_equal(p, (np.nan, np.nan)) + + def test_result_attributes(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1]) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_empty(self): + res1 = mstats.ttest_ind([], []) + assert_(np.all(np.isnan(res1))) + + def test_zero_division(self): + t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0]) + assert_array_equal(t, (np.nan, np.nan)) + assert_array_equal(p, (np.nan, np.nan)) + + t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False) + assert_equal((np.abs(t), p), (np.inf, 0)) + assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0], + equal_var=False), (np.nan, np.nan)) + + def test_bad_alternative(self): + msg = r"alternative must be 'less', 'greater' or 'two-sided'" + with pytest.raises(ValueError, match=msg): + mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo') + + @pytest.mark.parametrize("alternative", ["less", "greater"]) + def test_alternative(self, alternative): + x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123) + y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123) + + t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative) + t, p = mstats.ttest_ind(x, y, alternative=alternative) + assert_allclose(t, t_ex, rtol=1e-14) + assert_allclose(p, p_ex, rtol=1e-14) + + # test with masked arrays + x[1:10] = np.nan + y[80:90] = np.nan + x = np.ma.masked_array(x, mask=np.isnan(x)) + y = np.ma.masked_array(y, mask=np.isnan(y)) + t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(), + alternative=alternative) + t, p = mstats.ttest_ind(x, y, alternative=alternative) + assert_allclose(t, t_ex, rtol=1e-14) + assert_allclose(p, p_ex, rtol=1e-14) + + +class TestTtest_1samp: + def test_vs_nonmasked(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + # 1-D inputs + res1 = stats.ttest_1samp(outcome[:, 0], 1) + res2 = mstats.ttest_1samp(outcome[:, 0], 1) + assert_allclose(res1, res2) + + def test_fully_masked(self): + np.random.seed(1234567) + outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1]) + expected = (np.nan, np.nan) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]: + t, p = mstats.ttest_1samp(*pair) + assert_array_equal(p, expected) + assert_array_equal(t, expected) + + def test_result_attributes(self): + np.random.seed(1234567) + outcome = np.random.randn(20, 4) + [0, 0, 1, 2] + + res = mstats.ttest_1samp(outcome[:, 0], 1) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_empty(self): + res1 = mstats.ttest_1samp([], 1) + assert_(np.all(np.isnan(res1))) + + def test_zero_division(self): + t, p = mstats.ttest_1samp([0, 0, 0], 1) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in absolute") + t, p = mstats.ttest_1samp([0, 0, 0], 0) + assert_(np.isnan(t)) + assert_array_equal(p, (np.nan, np.nan)) + + def test_bad_alternative(self): + msg = r"alternative must be 'less', 'greater' or 'two-sided'" + with pytest.raises(ValueError, match=msg): + mstats.ttest_1samp([1, 2, 3], 4, alternative='foo') + + @pytest.mark.parametrize("alternative", ["less", "greater"]) + def test_alternative(self, alternative): + x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123) + + t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative) + t, p = mstats.ttest_1samp(x, 9, alternative=alternative) + assert_allclose(t, t_ex, rtol=1e-14) + assert_allclose(p, p_ex, rtol=1e-14) + + # test with masked arrays + x[1:10] = np.nan + x = np.ma.masked_array(x, mask=np.isnan(x)) + t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9, + alternative=alternative) + t, p = mstats.ttest_1samp(x, 9, alternative=alternative) + assert_allclose(t, t_ex, rtol=1e-14) + assert_allclose(p, p_ex, rtol=1e-14) + + +class TestDescribe: + """ + Tests for mstats.describe. + + Note that there are also tests for `mstats.describe` in the + class TestCompareWithStats. + """ + def test_basic_with_axis(self): + # This is a basic test that is also a regression test for gh-7303. + a = np.ma.masked_array([[0, 1, 2, 3, 4, 9], + [5, 5, 0, 9, 3, 3]], + mask=[[0, 0, 0, 0, 0, 1], + [0, 0, 1, 1, 0, 0]]) + result = mstats.describe(a, axis=1) + assert_equal(result.nobs, [5, 4]) + amin, amax = result.minmax + assert_equal(amin, [0, 3]) + assert_equal(amax, [4, 5]) + assert_equal(result.mean, [2.0, 4.0]) + assert_equal(result.variance, [2.0, 1.0]) + assert_equal(result.skewness, [0.0, 0.0]) + assert_allclose(result.kurtosis, [-1.3, -2.0]) + + +class TestCompareWithStats: + """ + Class to compare mstats results with stats results. + + It is in general assumed that scipy.stats is at a more mature stage than + stats.mstats. If a routine in mstats results in similar results like in + scipy.stats, this is considered also as a proper validation of scipy.mstats + routine. + + Different sample sizes are used for testing, as some problems between stats + and mstats are dependent on sample size. + + Author: Alexander Loew + + NOTE that some tests fail. This might be caused by + a) actual differences or bugs between stats and mstats + b) numerical inaccuracies + c) different definitions of routine interfaces + + These failures need to be checked. Current workaround is to have disabled these + tests, but issuing reports on scipy-dev + + """ + def get_n(self): + """ Returns list of sample sizes to be used for comparison. """ + return [1000, 100, 10, 5] + + def generate_xy_sample(self, n): + # This routine generates numpy arrays and corresponding masked arrays + # with the same data, but additional masked values + np.random.seed(1234567) + x = np.random.randn(n) + y = x + np.random.randn(n) + xm = np.full(len(x) + 5, 1e16) + ym = np.full(len(y) + 5, 1e16) + xm[0:len(x)] = x + ym[0:len(y)] = y + mask = xm > 9e15 + xm = np.ma.array(xm, mask=mask) + ym = np.ma.array(ym, mask=mask) + return x, y, xm, ym + + def generate_xy_sample2D(self, n, nx): + x = np.full((n, nx), np.nan) + y = np.full((n, nx), np.nan) + xm = np.full((n+5, nx), np.nan) + ym = np.full((n+5, nx), np.nan) + + for i in range(nx): + x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n) + + xm[0:n, :] = x[0:n] + ym[0:n, :] = y[0:n] + xm = np.ma.array(xm, mask=np.isnan(xm)) + ym = np.ma.array(ym, mask=np.isnan(ym)) + return x, y, xm, ym + + def test_linregress(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + result1 = stats.linregress(x, y) + result2 = stats.mstats.linregress(xm, ym) + assert_allclose(np.asarray(result1), np.asarray(result2)) + + def test_pearsonr(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r, p = stats.pearsonr(x, y) + rm, pm = stats.mstats.pearsonr(xm, ym) + + assert_almost_equal(r, rm, decimal=14) + assert_almost_equal(p, pm, decimal=14) + + def test_spearmanr(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r, p = stats.spearmanr(x, y) + rm, pm = stats.mstats.spearmanr(xm, ym) + assert_almost_equal(r, rm, 14) + assert_almost_equal(p, pm, 14) + + def test_spearmanr_backcompat_useties(self): + # A regression test to ensure we don't break backwards compat + # more than we have to (see gh-9204). + x = np.arange(6) + assert_raises(ValueError, mstats.spearmanr, x, x, False) + + def test_gmean(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.gmean(abs(x)) + rm = stats.mstats.gmean(abs(xm)) + assert_allclose(r, rm, rtol=1e-13) + + r = stats.gmean(abs(y)) + rm = stats.mstats.gmean(abs(ym)) + assert_allclose(r, rm, rtol=1e-13) + + def test_hmean(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + r = stats.hmean(abs(x)) + rm = stats.mstats.hmean(abs(xm)) + assert_almost_equal(r, rm, 10) + + r = stats.hmean(abs(y)) + rm = stats.mstats.hmean(abs(ym)) + assert_almost_equal(r, rm, 10) + + def test_skew(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + r = stats.skew(x) + rm = stats.mstats.skew(xm) + assert_almost_equal(r, rm, 10) + + r = stats.skew(y) + rm = stats.mstats.skew(ym) + assert_almost_equal(r, rm, 10) + + def test_moment(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + r = stats.moment(x) + rm = stats.mstats.moment(xm) + assert_almost_equal(r, rm, 10) + + r = stats.moment(y) + rm = stats.mstats.moment(ym) + assert_almost_equal(r, rm, 10) + + def test_zscore(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + + # reference solution + zx = (x - x.mean()) / x.std() + zy = (y - y.mean()) / y.std() + + # validate stats + assert_allclose(stats.zscore(x), zx, rtol=1e-10) + assert_allclose(stats.zscore(y), zy, rtol=1e-10) + + # compare stats and mstats + assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]), + rtol=1e-10) + assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]), + rtol=1e-10) + + def test_kurtosis(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.kurtosis(x) + rm = stats.mstats.kurtosis(xm) + assert_almost_equal(r, rm, 10) + + r = stats.kurtosis(y) + rm = stats.mstats.kurtosis(ym) + assert_almost_equal(r, rm, 10) + + def test_sem(self): + # example from stats.sem doc + a = np.arange(20).reshape(5, 4) + am = np.ma.array(a) + r = stats.sem(a, ddof=1) + rm = stats.mstats.sem(am, ddof=1) + + assert_allclose(r, 2.82842712, atol=1e-5) + assert_allclose(rm, 2.82842712, atol=1e-5) + + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0), + stats.sem(x, axis=None, ddof=0), decimal=13) + assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0), + stats.sem(y, axis=None, ddof=0), decimal=13) + assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1), + stats.sem(x, axis=None, ddof=1), decimal=13) + assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1), + stats.sem(y, axis=None, ddof=1), decimal=13) + + def test_describe(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.describe(x, ddof=1) + rm = stats.mstats.describe(xm, ddof=1) + for ii in range(6): + assert_almost_equal(np.asarray(r[ii]), + np.asarray(rm[ii]), + decimal=12) + + def test_describe_result_attributes(self): + actual = mstats.describe(np.arange(5)) + attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis') + check_named_results(actual, attributes, ma=True) + + def test_rankdata(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.rankdata(x) + rm = stats.mstats.rankdata(x) + assert_allclose(r, rm) + + def test_tmean(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14) + assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14) + + def test_tmax(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tmax(x,2.), + stats.mstats.tmax(xm,2.), 10) + assert_almost_equal(stats.tmax(y,2.), + stats.mstats.tmax(ym,2.), 10) + + assert_almost_equal(stats.tmax(x, upperlimit=3.), + stats.mstats.tmax(xm, upperlimit=3.), 10) + assert_almost_equal(stats.tmax(y, upperlimit=3.), + stats.mstats.tmax(ym, upperlimit=3.), 10) + + def test_tmin(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_equal(stats.tmin(x), stats.mstats.tmin(xm)) + assert_equal(stats.tmin(y), stats.mstats.tmin(ym)) + + assert_almost_equal(stats.tmin(x, lowerlimit=-1.), + stats.mstats.tmin(xm, lowerlimit=-1.), 10) + assert_almost_equal(stats.tmin(y, lowerlimit=-1.), + stats.mstats.tmin(ym, lowerlimit=-1.), 10) + + def test_zmap(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + z = stats.zmap(x, y) + zm = stats.mstats.zmap(xm, ym) + assert_allclose(z, zm[0:len(z)], atol=1e-10) + + def test_variation(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.variation(x), stats.mstats.variation(xm), + decimal=12) + assert_almost_equal(stats.variation(y), stats.mstats.variation(ym), + decimal=12) + + def test_tvar(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm), + decimal=12) + assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym), + decimal=12) + + def test_trimboth(self): + a = np.arange(20) + b = stats.trimboth(a, 0.1) + bm = stats.mstats.trimboth(a, 0.1) + assert_allclose(np.sort(b), bm.data[~bm.mask]) + + def test_tsem(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm), + decimal=14) + assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym), + decimal=14) + assert_almost_equal(stats.tsem(x, limits=(-2., 2.)), + stats.mstats.tsem(xm, limits=(-2., 2.)), + decimal=14) + + def test_skewtest(self): + # this test is for 1D data + for n in self.get_n(): + if n > 8: + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.skewtest(x) + rm = stats.mstats.skewtest(xm) + assert_allclose(r, rm) + + def test_skewtest_result_attributes(self): + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + res = mstats.skewtest(x) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes, ma=True) + + def test_skewtest_2D_notmasked(self): + # a normal ndarray is passed to the masked function + x = np.random.random((20, 2)) * 20. + r = stats.skewtest(x) + rm = stats.mstats.skewtest(x) + assert_allclose(np.asarray(r), np.asarray(rm)) + + def test_skewtest_2D_WithMask(self): + nx = 2 + for n in self.get_n(): + if n > 8: + x, y, xm, ym = self.generate_xy_sample2D(n, nx) + r = stats.skewtest(x) + rm = stats.mstats.skewtest(xm) + + assert_allclose(r[0][0], rm[0][0], rtol=1e-14) + assert_allclose(r[0][1], rm[0][1], rtol=1e-14) + + def test_normaltest(self): + with np.errstate(over='raise'), suppress_warnings() as sup: + sup.filter(UserWarning, "kurtosistest only valid for n>=20") + for n in self.get_n(): + if n > 8: + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.normaltest(x) + rm = stats.mstats.normaltest(xm) + assert_allclose(np.asarray(r), np.asarray(rm)) + + def test_find_repeats(self): + x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float') + tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float') + mask = (tmp == 5.) + xm = np.ma.array(tmp, mask=mask) + x_orig, xm_orig = x.copy(), xm.copy() + + r = stats.find_repeats(x) + rm = stats.mstats.find_repeats(xm) + + assert_equal(r, rm) + assert_equal(x, x_orig) + assert_equal(xm, xm_orig) + + # This crazy behavior is expected by count_tied_groups, but is not + # in the docstring... + _, counts = stats.mstats.find_repeats([]) + assert_equal(counts, np.array(0, dtype=np.intp)) + + def test_kendalltau(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.kendalltau(x, y) + rm = stats.mstats.kendalltau(xm, ym) + assert_almost_equal(r[0], rm[0], decimal=10) + assert_almost_equal(r[1], rm[1], decimal=7) + + def test_obrientransform(self): + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + r = stats.obrientransform(x) + rm = stats.mstats.obrientransform(xm) + assert_almost_equal(r.T, rm[0:len(x)]) + + def test_ks_1samp(self): + """Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays.""" + for mode in ['auto', 'exact', 'asymp']: + with suppress_warnings(): + for alternative in ['less', 'greater', 'two-sided']: + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + res1 = stats.ks_1samp(x, stats.norm.cdf, + alternative=alternative, mode=mode) + res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res2)) + res3 = stats.ks_1samp(xm, stats.norm.cdf, + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res3)) + + def test_kstest_1samp(self): + """ + Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays. + """ + for mode in ['auto', 'exact', 'asymp']: + with suppress_warnings(): + for alternative in ['less', 'greater', 'two-sided']: + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + res1 = stats.kstest(x, 'norm', + alternative=alternative, mode=mode) + res2 = stats.mstats.kstest(xm, 'norm', + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res2)) + res3 = stats.kstest(xm, 'norm', + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res3)) + + def test_ks_2samp(self): + """Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays. + gh-8431""" + for mode in ['auto', 'exact', 'asymp']: + with suppress_warnings() as sup: + if mode in ['auto', 'exact']: + message = "ks_2samp: Exact calculation unsuccessful." + sup.filter(RuntimeWarning, message) + for alternative in ['less', 'greater', 'two-sided']: + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + res1 = stats.ks_2samp(x, y, + alternative=alternative, mode=mode) + res2 = stats.mstats.ks_2samp(xm, ym, + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res2)) + res3 = stats.ks_2samp(xm, y, + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res3)) + + def test_kstest_2samp(self): + """ + Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays. + """ + for mode in ['auto', 'exact', 'asymp']: + with suppress_warnings() as sup: + if mode in ['auto', 'exact']: + message = "ks_2samp: Exact calculation unsuccessful." + sup.filter(RuntimeWarning, message) + for alternative in ['less', 'greater', 'two-sided']: + for n in self.get_n(): + x, y, xm, ym = self.generate_xy_sample(n) + res1 = stats.kstest(x, y, + alternative=alternative, mode=mode) + res2 = stats.mstats.kstest(xm, ym, + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res2)) + res3 = stats.kstest(xm, y, + alternative=alternative, mode=mode) + assert_equal(np.asarray(res1), np.asarray(res3)) + + +class TestBrunnerMunzel: + # Data from (Lumley, 1996) + X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1, + 1, 1, 1, 2, 4, 1, 1, np.nan]) + Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4]) + significant = 14 + + def test_brunnermunzel_one_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less') + u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater') + u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater') + u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less') + + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(p3, p4, decimal=self.significant) + assert_(p1 != p3) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u3, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u4, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0028931043330757342, + decimal=self.significant) + assert_almost_equal(p3, 0.99710689566692423, + decimal=self.significant) + + def test_brunnermunzel_two_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided') + u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided') + + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0057862086661515377, + decimal=self.significant) + + def test_brunnermunzel_default(self): + # The default value for alternative is two-sided + u1, p1 = mstats.brunnermunzel(self.X, self.Y) + u2, p2 = mstats.brunnermunzel(self.Y, self.X) + + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0057862086661515377, + decimal=self.significant) + + def test_brunnermunzel_alternative_error(self): + alternative = "error" + distribution = "t" + assert_(alternative not in ["two-sided", "greater", "less"]) + assert_raises(ValueError, + mstats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution) + + def test_brunnermunzel_distribution_norm(self): + u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal") + u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal") + assert_almost_equal(p1, p2, decimal=self.significant) + assert_almost_equal(u1, 3.1374674823029505, + decimal=self.significant) + assert_almost_equal(u2, -3.1374674823029505, + decimal=self.significant) + assert_almost_equal(p1, 0.0017041417600383024, + decimal=self.significant) + + def test_brunnermunzel_distribution_error(self): + alternative = "two-sided" + distribution = "error" + assert_(alternative not in ["t", "normal"]) + assert_raises(ValueError, + mstats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution) + + def test_brunnermunzel_empty_imput(self): + u1, p1 = mstats.brunnermunzel(self.X, []) + u2, p2 = mstats.brunnermunzel([], self.Y) + u3, p3 = mstats.brunnermunzel([], []) + + assert_(np.isnan(u1)) + assert_(np.isnan(p1)) + assert_(np.isnan(u2)) + assert_(np.isnan(p2)) + assert_(np.isnan(u3)) + assert_(np.isnan(p3)) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_extras.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..4b9fd0d80a6e05652c5151f5dfece2c5979dbfe5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_mstats_extras.py @@ -0,0 +1,172 @@ +import numpy as np +import numpy.ma as ma +import scipy.stats.mstats as ms + +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_allclose) + + +def test_compare_medians_ms(): + x = np.arange(7) + y = x + 10 + assert_almost_equal(ms.compare_medians_ms(x, y), 0) + + y2 = np.linspace(0, 1, num=10) + assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778) + + +def test_hdmedian(): + # 1-D array + x = ma.arange(11) + assert_allclose(ms.hdmedian(x), 5, rtol=1e-14) + x.mask = ma.make_mask(x) + x.mask[:7] = False + assert_allclose(ms.hdmedian(x), 3, rtol=1e-14) + + # Check that `var` keyword returns a value. TODO: check whether returned + # value is actually correct. + assert_(ms.hdmedian(x, var=True).size == 2) + + # 2-D array + x2 = ma.arange(22).reshape((11, 2)) + assert_allclose(ms.hdmedian(x2, axis=0), [10, 11]) + x2.mask = ma.make_mask(x2) + x2.mask[:7, :] = False + assert_allclose(ms.hdmedian(x2, axis=0), [6, 7]) + + +def test_rsh(): + np.random.seed(132345) + x = np.random.randn(100) + res = ms.rsh(x) + # Just a sanity check that the code runs and output shape is correct. + # TODO: check that implementation is correct. + assert_(res.shape == x.shape) + + # Check points keyword + res = ms.rsh(x, points=[0, 1.]) + assert_(res.size == 2) + + +def test_mjci(): + # Tests the Marits-Jarrett estimator + data = ma.array([77, 87, 88,114,151,210,219,246,253,262, + 296,299,306,376,428,515,666,1310,2611]) + assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5) + + +def test_trimmed_mean_ci(): + # Tests the confidence intervals of the trimmed mean. + data = ma.array([545,555,558,572,575,576,578,580, + 594,605,635,651,653,661,666]) + assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1) + assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1), + [561.8, 630.6]) + + +def test_idealfourths(): + # Tests ideal-fourths + test = np.arange(100) + assert_almost_equal(np.asarray(ms.idealfourths(test)), + [24.416667,74.583333],6) + test_2D = test.repeat(3).reshape(-1,3) + assert_almost_equal(ms.idealfourths(test_2D, axis=0), + [[24.416667,24.416667,24.416667], + [74.583333,74.583333,74.583333]],6) + assert_almost_equal(ms.idealfourths(test_2D, axis=1), + test.repeat(2).reshape(-1,2)) + test = [0, 0] + _result = ms.idealfourths(test) + assert_(np.isnan(_result).all()) + + +class TestQuantiles: + data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014, + 0.887764025,0.239407086,0.349638551,0.972791145,0.149789972, + 0.936947700,0.132359948,0.046041972,0.641675031,0.945530547, + 0.224218684,0.771450991,0.820257774,0.336458052,0.589113496, + 0.509736129,0.696838829,0.491323573,0.622767425,0.775189248, + 0.641461450,0.118455200,0.773029450,0.319280007,0.752229111, + 0.047841438,0.466295911,0.583850781,0.840581845,0.550086491, + 0.466470062,0.504765074,0.226855960,0.362641207,0.891620942, + 0.127898691,0.490094097,0.044882048,0.041441695,0.317976349, + 0.504135618,0.567353033,0.434617473,0.636243375,0.231803616, + 0.230154113,0.160011327,0.819464108,0.854706985,0.438809221, + 0.487427267,0.786907310,0.408367937,0.405534192,0.250444460, + 0.995309248,0.144389588,0.739947527,0.953543606,0.680051621, + 0.388382017,0.863530727,0.006514031,0.118007779,0.924024803, + 0.384236354,0.893687694,0.626534881,0.473051932,0.750134705, + 0.241843555,0.432947602,0.689538104,0.136934797,0.150206859, + 0.474335206,0.907775349,0.525869295,0.189184225,0.854284286, + 0.831089744,0.251637345,0.587038213,0.254475554,0.237781276, + 0.827928620,0.480283781,0.594514455,0.213641488,0.024194386, + 0.536668589,0.699497811,0.892804071,0.093835427,0.731107772] + + def test_hdquantiles(self): + data = self.data + assert_almost_equal(ms.hdquantiles(data,[0., 1.]), + [0.006514031, 0.995309248]) + hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75]) + assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,]) + + data = np.array(data).reshape(10,10) + hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0) + assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75])) + assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75])) + hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True) + assert_almost_equal(hdq[...,0], + ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True)) + assert_almost_equal(hdq[...,-1], + ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True)) + + def test_hdquantiles_sd(self): + # Standard deviation is a jackknife estimator, so we can check if + # the efficient version (hdquantiles_sd) matches a rudimentary, + # but clear version here. + + hd_std_errs = ms.hdquantiles_sd(self.data) + + # jacknnife standard error, Introduction to the Bootstrap Eq. 11.5 + n = len(self.data) + jdata = np.broadcast_to(self.data, (n, n)) + jselector = np.logical_not(np.eye(n)) # leave out one sample each row + jdata = jdata[jselector].reshape(n, n-1) + jdist = ms.hdquantiles(jdata, axis=1) + jdist_mean = np.mean(jdist, axis=0) + jstd = ((n-1)/n * np.sum((jdist - jdist_mean)**2, axis=0))**.5 + + assert_almost_equal(hd_std_errs, jstd) + # Test actual values for good measure + assert_almost_equal(hd_std_errs, [0.0379258, 0.0380656, 0.0380013]) + + two_data_points = ms.hdquantiles_sd([1, 2]) + assert_almost_equal(two_data_points, [0.5, 0.5, 0.5]) + + def test_mquantiles_cimj(self): + # Only test that code runs, implementation not checked for correctness + ci_lower, ci_upper = ms.mquantiles_cimj(self.data) + assert_(ci_lower.size == ci_upper.size == 3) + + +def test_median_cihs(): + # Basic test against R library EnvStats function `eqnpar`, e.g. + # library(EnvStats) + # options(digits=8) + # x = c(0.88612955, 0.35242375, 0.66240904, 0.94617974, 0.10929913, + # 0.76699506, 0.88550655, 0.62763754, 0.76818588, 0.68506508, + # 0.88043148, 0.03911248, 0.93805564, 0.95326961, 0.25291112, + # 0.16128487, 0.49784577, 0.24588924, 0.6597, 0.92239679) + # eqnpar(x, p=0.5, + # ci.method = "interpolate", approx.conf.level = 0.95, ci = TRUE) + rng = np.random.default_rng(8824288259505800535) + x = rng.random(size=20) + assert_allclose(ms.median_cihs(x), (0.38663198, 0.88431272)) + + # SciPy's 90% CI upper limit doesn't match that of EnvStats eqnpar. SciPy + # doesn't look wrong, and it agrees with a different reference, + # `median_confint_hs` from `hoehleatsu/quantileCI`. + # In (e.g.) Colab with R runtime: + # devtools::install_github("hoehleatsu/quantileCI") + # library(quantileCI) + # median_confint_hs(x=x, conf.level=0.90, interpolate=TRUE) + assert_allclose(ms.median_cihs(x, 0.1), (0.48319773366, 0.88094268050)) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_multicomp.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_multicomp.py new file mode 100644 index 0000000000000000000000000000000000000000..c85d95ebbb1609c861a338fa1c4c2b6169b8ce00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_multicomp.py @@ -0,0 +1,404 @@ +import copy + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from scipy import stats +from scipy.stats._multicomp import _pvalue_dunnett, DunnettResult + + +class TestDunnett: + # For the following tests, p-values were computed using Matlab, e.g. + # sample = [18. 15. 18. 16. 17. 15. 14. 14. 14. 15. 15.... + # 14. 15. 14. 22. 18. 21. 21. 10. 10. 11. 9.... + # 25. 26. 17.5 16. 15.5 14.5 22. 22. 24. 22.5 29.... + # 24.5 20. 18. 18.5 17.5 26.5 13. 16.5 13. 13. 13.... + # 28. 27. 34. 31. 29. 27. 24. 23. 38. 36. 25.... + # 38. 26. 22. 36. 27. 27. 32. 28. 31.... + # 24. 27. 33. 32. 28. 19. 37. 31. 36. 36.... + # 34. 38. 32. 38. 32.... + # 26. 24. 26. 25. 29. 29.5 16.5 36. 44.... + # 25. 27. 19.... + # 25. 20.... + # 28.]; + # j = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ... + # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ... + # 0 0 0 0... + # 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1... + # 2 2 2 2 2 2 2 2 2... + # 3 3 3... + # 4 4... + # 5]; + # [~, ~, stats] = anova1(sample, j, "off"); + # [results, ~, ~, gnames] = multcompare(stats, ... + # "CriticalValueType", "dunnett", ... + # "Approximate", false); + # tbl = array2table(results, "VariableNames", ... + # ["Group", "Control Group", "Lower Limit", ... + # "Difference", "Upper Limit", "P-value"]); + # tbl.("Group") = gnames(tbl.("Group")); + # tbl.("Control Group") = gnames(tbl.("Control Group")) + + # Matlab doesn't report the statistic, so the statistics were + # computed using R multcomp `glht`, e.g.: + # library(multcomp) + # options(digits=16) + # control < - c(18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0, + # 15.0, 15.0, 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0, + # 10.0, 10.0, 11.0, 9.0, 25.0, 26.0, 17.5, 16.0, 15.5, + # 14.5, 22.0, 22.0, 24.0, 22.5, 29.0, 24.5, 20.0, 18.0, + # 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0, 28.0, + # 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0, + # 25.0, 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0, + # 31.0) + # t < - c(24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0, + # 34.0, 38.0, 32.0, 38.0, 32.0) + # w < - c(26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0) + # x < - c(25.0, 27.0, 19.0) + # y < - c(25.0, 20.0) + # z < - c(28.0) + # + # groups = factor(rep(c("control", "t", "w", "x", "y", "z"), + # times=c(length(control), length(t), length(w), + # length(x), length(y), length(z)))) + # df < - data.frame(response=c(control, t, w, x, y, z), + # group=groups) + # model < - aov(response + # ~group, data = df) + # test < - glht(model=model, + # linfct=mcp(group="Dunnett"), + # alternative="g") + # summary(test) + # confint(test) + # p-values agreed with those produced by Matlab to at least atol=1e-3 + + # From Matlab's documentation on multcompare + samples_1 = [ + [ + 24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0, + 34.0, 38.0, 32.0, 38.0, 32.0 + ], + [26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0], + [25.0, 27.0, 19.0], + [25.0, 20.0], + [28.0] + ] + control_1 = [ + 18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0, 15.0, 15.0, + 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0, 10.0, 10.0, 11.0, 9.0, + 25.0, 26.0, 17.5, 16.0, 15.5, 14.5, 22.0, 22.0, 24.0, 22.5, 29.0, + 24.5, 20.0, 18.0, 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0, + 28.0, 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0, 25.0, + 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0, 31.0 + ] + pvalue_1 = [4.727e-06, 0.022346, 0.97912, 0.99953, 0.86579] # Matlab + # Statistic, alternative p-values, and CIs computed with R multcomp `glht` + p_1_twosided = [1e-4, 0.02237, 0.97913, 0.99953, 0.86583] + p_1_greater = [1e-4, 0.011217, 0.768500, 0.896991, 0.577211] + p_1_less = [1, 1, 0.99660, 0.98398, .99953] + statistic_1 = [5.27356, 2.91270, 0.60831, 0.27002, 0.96637] + ci_1_twosided = [[5.3633917835622, 0.7296142201217, -8.3879817106607, + -11.9090753452911, -11.7655021543469], + [15.9709832164378, 13.8936496687672, 13.4556900439941, + 14.6434503452911, 25.4998771543469]] + ci_1_greater = [5.9036402398526, 1.4000632918725, -7.2754756323636, + -10.5567456382391, -9.8675629499576] + ci_1_less = [15.4306165948619, 13.2230539537359, 12.3429406339544, + 13.2908248513211, 23.6015228251660] + pvalues_1 = dict(twosided=p_1_twosided, less=p_1_less, greater=p_1_greater) + cis_1 = dict(twosided=ci_1_twosided, less=ci_1_less, greater=ci_1_greater) + case_1 = dict(samples=samples_1, control=control_1, statistic=statistic_1, + pvalues=pvalues_1, cis=cis_1) + + # From Dunnett1955 comparing with R's DescTools: DunnettTest + samples_2 = [[9.76, 8.80, 7.68, 9.36], [12.80, 9.68, 12.16, 9.20, 10.55]] + control_2 = [7.40, 8.50, 7.20, 8.24, 9.84, 8.32] + pvalue_2 = [0.6201, 0.0058] + # Statistic, alternative p-values, and CIs computed with R multcomp `glht` + p_2_twosided = [0.6201020, 0.0058254] + p_2_greater = [0.3249776, 0.0029139] + p_2_less = [0.91676, 0.99984] + statistic_2 = [0.85703, 3.69375] + ci_2_twosided = [[-1.2564116462124, 0.8396273539789], + [2.5564116462124, 4.4163726460211]] + ci_2_greater = [-0.9588591188156, 1.1187563667543] + ci_2_less = [2.2588591188156, 4.1372436332457] + pvalues_2 = dict(twosided=p_2_twosided, less=p_2_less, greater=p_2_greater) + cis_2 = dict(twosided=ci_2_twosided, less=ci_2_less, greater=ci_2_greater) + case_2 = dict(samples=samples_2, control=control_2, statistic=statistic_2, + pvalues=pvalues_2, cis=cis_2) + + samples_3 = [[55, 64, 64], [55, 49, 52], [50, 44, 41]] + control_3 = [55, 47, 48] + pvalue_3 = [0.0364, 0.8966, 0.4091] + # Statistic, alternative p-values, and CIs computed with R multcomp `glht` + p_3_twosided = [0.036407, 0.896539, 0.409295] + p_3_greater = [0.018277, 0.521109, 0.981892] + p_3_less = [0.99944, 0.90054, 0.20974] + statistic_3 = [3.09073, 0.56195, -1.40488] + ci_3_twosided = [[0.7529028025053, -8.2470971974947, -15.2470971974947], + [21.2470971974947, 12.2470971974947, 5.2470971974947]] + ci_3_greater = [2.4023682323149, -6.5976317676851, -13.5976317676851] + ci_3_less = [19.5984402363662, 10.5984402363662, 3.5984402363662] + pvalues_3 = dict(twosided=p_3_twosided, less=p_3_less, greater=p_3_greater) + cis_3 = dict(twosided=ci_3_twosided, less=ci_3_less, greater=ci_3_greater) + case_3 = dict(samples=samples_3, control=control_3, statistic=statistic_3, + pvalues=pvalues_3, cis=cis_3) + + # From Thomson and Short, + # Mucociliary function in health, chronic obstructive airway disease, + # and asbestosis, Journal of Applied Physiology, 1969. Table 1 + # Comparing with R's DescTools: DunnettTest + samples_4 = [[3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]] + control_4 = [2.9, 3.0, 2.5, 2.6, 3.2] + pvalue_4 = [0.5832, 0.9982] + # Statistic, alternative p-values, and CIs computed with R multcomp `glht` + p_4_twosided = [0.58317, 0.99819] + p_4_greater = [0.30225, 0.69115] + p_4_less = [0.91929, 0.65212] + statistic_4 = [0.90875, -0.05007] + ci_4_twosided = [[-0.6898153448579, -1.0333456251632], + [1.4598153448579, 0.9933456251632]] + ci_4_greater = [-0.5186459268412, -0.8719655502147 ] + ci_4_less = [1.2886459268412, 0.8319655502147] + pvalues_4 = dict(twosided=p_4_twosided, less=p_4_less, greater=p_4_greater) + cis_4 = dict(twosided=ci_4_twosided, less=ci_4_less, greater=ci_4_greater) + case_4 = dict(samples=samples_4, control=control_4, statistic=statistic_4, + pvalues=pvalues_4, cis=cis_4) + + @pytest.mark.parametrize( + 'rho, n_groups, df, statistic, pvalue, alternative', + [ + # From Dunnett1955 + # Tables 1a and 1b pages 1117-1118 + (0.5, 1, 10, 1.81, 0.05, "greater"), # different than two-sided + (0.5, 3, 10, 2.34, 0.05, "greater"), + (0.5, 2, 30, 1.99, 0.05, "greater"), + (0.5, 5, 30, 2.33, 0.05, "greater"), + (0.5, 4, 12, 3.32, 0.01, "greater"), + (0.5, 7, 12, 3.56, 0.01, "greater"), + (0.5, 2, 60, 2.64, 0.01, "greater"), + (0.5, 4, 60, 2.87, 0.01, "greater"), + (0.5, 4, 60, [2.87, 2.21], [0.01, 0.05], "greater"), + # Tables 2a and 2b pages 1119-1120 + (0.5, 1, 10, 2.23, 0.05, "two-sided"), # two-sided + (0.5, 3, 10, 2.81, 0.05, "two-sided"), + (0.5, 2, 30, 2.32, 0.05, "two-sided"), + (0.5, 3, 20, 2.57, 0.05, "two-sided"), + (0.5, 4, 12, 3.76, 0.01, "two-sided"), + (0.5, 7, 12, 4.08, 0.01, "two-sided"), + (0.5, 2, 60, 2.90, 0.01, "two-sided"), + (0.5, 4, 60, 3.14, 0.01, "two-sided"), + (0.5, 4, 60, [3.14, 2.55], [0.01, 0.05], "two-sided"), + ], + ) + def test_critical_values( + self, rho, n_groups, df, statistic, pvalue, alternative + ): + rng = np.random.default_rng(165250594791731684851746311027739134893) + rho = np.full((n_groups, n_groups), rho) + np.fill_diagonal(rho, 1) + + statistic = np.array(statistic) + res = _pvalue_dunnett( + rho=rho, df=df, statistic=statistic, + alternative=alternative, + rng=rng + ) + assert_allclose(res, pvalue, atol=5e-3) + + @pytest.mark.parametrize( + 'samples, control, pvalue, statistic', + [ + (samples_1, control_1, pvalue_1, statistic_1), + (samples_2, control_2, pvalue_2, statistic_2), + (samples_3, control_3, pvalue_3, statistic_3), + (samples_4, control_4, pvalue_4, statistic_4), + ] + ) + def test_basic(self, samples, control, pvalue, statistic): + rng = np.random.default_rng(11681140010308601919115036826969764808) + + res = stats.dunnett(*samples, control=control, random_state=rng) + + assert isinstance(res, DunnettResult) + assert_allclose(res.statistic, statistic, rtol=5e-5) + assert_allclose(res.pvalue, pvalue, rtol=1e-2, atol=1e-4) + + @pytest.mark.parametrize( + 'alternative', + ['two-sided', 'less', 'greater'] + ) + def test_ttest_ind(self, alternative): + # check that `dunnett` agrees with `ttest_ind` + # when there are only two groups + rng = np.random.default_rng(114184017807316971636137493526995620351) + + for _ in range(10): + sample = rng.integers(-100, 100, size=(10,)) + control = rng.integers(-100, 100, size=(10,)) + + res = stats.dunnett( + sample, control=control, + alternative=alternative, random_state=rng + ) + ref = stats.ttest_ind( + sample, control, + alternative=alternative, random_state=rng + ) + + assert_allclose(res.statistic, ref.statistic, rtol=1e-3, atol=1e-5) + assert_allclose(res.pvalue, ref.pvalue, rtol=1e-3, atol=1e-5) + + @pytest.mark.parametrize( + 'alternative, pvalue', + [ + ('less', [0, 1]), + ('greater', [1, 0]), + ('two-sided', [0, 0]), + ] + ) + def test_alternatives(self, alternative, pvalue): + rng = np.random.default_rng(114184017807316971636137493526995620351) + + # width of 20 and min diff between samples/control is 60 + # and maximal diff would be 100 + sample_less = rng.integers(0, 20, size=(10,)) + control = rng.integers(80, 100, size=(10,)) + sample_greater = rng.integers(160, 180, size=(10,)) + + res = stats.dunnett( + sample_less, sample_greater, control=control, + alternative=alternative, random_state=rng + ) + assert_allclose(res.pvalue, pvalue, atol=1e-7) + + ci = res.confidence_interval() + # two-sided is comparable for high/low + if alternative == 'less': + assert np.isneginf(ci.low).all() + assert -100 < ci.high[0] < -60 + assert 60 < ci.high[1] < 100 + elif alternative == 'greater': + assert -100 < ci.low[0] < -60 + assert 60 < ci.low[1] < 100 + assert np.isposinf(ci.high).all() + elif alternative == 'two-sided': + assert -100 < ci.low[0] < -60 + assert 60 < ci.low[1] < 100 + assert -100 < ci.high[0] < -60 + assert 60 < ci.high[1] < 100 + + @pytest.mark.parametrize("case", [case_1, case_2, case_3, case_4]) + @pytest.mark.parametrize("alternative", ['less', 'greater', 'two-sided']) + def test_against_R_multicomp_glht(self, case, alternative): + rng = np.random.default_rng(189117774084579816190295271136455278291) + samples = case['samples'] + control = case['control'] + alternatives = {'less': 'less', 'greater': 'greater', + 'two-sided': 'twosided'} + p_ref = case['pvalues'][alternative.replace('-', '')] + + res = stats.dunnett(*samples, control=control, alternative=alternative, + random_state=rng) + # atol can't be tighter because R reports some pvalues as "< 1e-4" + assert_allclose(res.pvalue, p_ref, rtol=5e-3, atol=1e-4) + + ci_ref = case['cis'][alternatives[alternative]] + if alternative == "greater": + ci_ref = [ci_ref, np.inf] + elif alternative == "less": + ci_ref = [-np.inf, ci_ref] + assert res._ci is None + assert res._ci_cl is None + ci = res.confidence_interval(confidence_level=0.95) + assert_allclose(ci.low, ci_ref[0], rtol=5e-3, atol=1e-5) + assert_allclose(ci.high, ci_ref[1], rtol=5e-3, atol=1e-5) + + # re-run to use the cached value "is" to check id as same object + assert res._ci is ci + assert res._ci_cl == 0.95 + ci_ = res.confidence_interval(confidence_level=0.95) + assert ci_ is ci + + @pytest.mark.parametrize('alternative', ["two-sided", "less", "greater"]) + def test_str(self, alternative): + rng = np.random.default_rng(189117774084579816190295271136455278291) + + res = stats.dunnett( + *self.samples_3, control=self.control_3, alternative=alternative, + random_state=rng + ) + + # check some str output + res_str = str(res) + assert '(Sample 2 - Control)' in res_str + assert '95.0%' in res_str + + if alternative == 'less': + assert '-inf' in res_str + assert '19.' in res_str + elif alternative == 'greater': + assert 'inf' in res_str + assert '-13.' in res_str + else: + assert 'inf' not in res_str + assert '21.' in res_str + + def test_warnings(self): + rng = np.random.default_rng(189117774084579816190295271136455278291) + + res = stats.dunnett( + *self.samples_3, control=self.control_3, random_state=rng + ) + msg = r"Computation of the confidence interval did not converge" + with pytest.warns(UserWarning, match=msg): + res._allowance(tol=1e-5) + + def test_raises(self): + samples, control = self.samples_3, self.control_3 + + # alternative + with pytest.raises(ValueError, match="alternative must be"): + stats.dunnett(*samples, control=control, alternative='bob') + + # 2D for a sample + samples_ = copy.deepcopy(samples) + samples_[0] = [samples_[0]] + with pytest.raises(ValueError, match="must be 1D arrays"): + stats.dunnett(*samples_, control=control) + + # 2D for control + control_ = copy.deepcopy(control) + control_ = [control_] + with pytest.raises(ValueError, match="must be 1D arrays"): + stats.dunnett(*samples, control=control_) + + # No obs in a sample + samples_ = copy.deepcopy(samples) + samples_[1] = [] + with pytest.raises(ValueError, match="at least 1 observation"): + stats.dunnett(*samples_, control=control) + + # No obs in control + control_ = [] + with pytest.raises(ValueError, match="at least 1 observation"): + stats.dunnett(*samples, control=control_) + + res = stats.dunnett(*samples, control=control) + with pytest.raises(ValueError, match="Confidence level must"): + res.confidence_interval(confidence_level=3) + + @pytest.mark.filterwarnings("ignore:Computation of the confidence") + @pytest.mark.parametrize('n_samples', [1, 2, 3]) + def test_shapes(self, n_samples): + rng = np.random.default_rng(689448934110805334) + samples = rng.normal(size=(n_samples, 10)) + control = rng.normal(size=10) + res = stats.dunnett(*samples, control=control, random_state=rng) + assert res.statistic.shape == (n_samples,) + assert res.pvalue.shape == (n_samples,) + ci = res.confidence_interval() + assert ci.low.shape == (n_samples,) + assert ci.high.shape == (n_samples,) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_multivariate.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_multivariate.py new file mode 100644 index 0000000000000000000000000000000000000000..824660923144abf3fba427abada2ff1603ed5bc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_multivariate.py @@ -0,0 +1,3859 @@ +""" +Test functions for multivariate normal distributions. + +""" +import pickle + +from numpy.testing import (assert_allclose, assert_almost_equal, + assert_array_almost_equal, assert_equal, + assert_array_less, assert_) +import pytest +from pytest import raises as assert_raises + +from .test_continuous_basic import check_distribution_rvs + +import numpy +import numpy as np + +import scipy.linalg +from scipy.stats._multivariate import (_PSD, + _lnB, + multivariate_normal_frozen) +from scipy.stats import (multivariate_normal, multivariate_hypergeom, + matrix_normal, special_ortho_group, ortho_group, + random_correlation, unitary_group, dirichlet, + beta, wishart, multinomial, invwishart, chi2, + invgamma, norm, uniform, ks_2samp, kstest, binom, + hypergeom, multivariate_t, cauchy, normaltest, + random_table, uniform_direction, vonmises_fisher, + dirichlet_multinomial, vonmises) + +from scipy.stats import _covariance, Covariance +from scipy import stats + +from scipy.integrate import romb, qmc_quad, tplquad +from scipy.special import multigammaln +from scipy._lib._pep440 import Version + +from .common_tests import check_random_state_property +from .data._mvt import _qsimvtv + +from unittest.mock import patch + + +def assert_close(res, ref, *args, **kwargs): + res, ref = np.asarray(res), np.asarray(ref) + assert_allclose(res, ref, *args, **kwargs) + assert_equal(res.shape, ref.shape) + + +class TestCovariance: + + def test_input_validation(self): + + message = "The input `precision` must be a square, two-dimensional..." + with pytest.raises(ValueError, match=message): + _covariance.CovViaPrecision(np.ones(2)) + + message = "`precision.shape` must equal `covariance.shape`." + with pytest.raises(ValueError, match=message): + _covariance.CovViaPrecision(np.eye(3), covariance=np.eye(2)) + + message = "The input `diagonal` must be a one-dimensional array..." + with pytest.raises(ValueError, match=message): + _covariance.CovViaDiagonal("alpaca") + + message = "The input `cholesky` must be a square, two-dimensional..." + with pytest.raises(ValueError, match=message): + _covariance.CovViaCholesky(np.ones(2)) + + message = "The input `eigenvalues` must be a one-dimensional..." + with pytest.raises(ValueError, match=message): + _covariance.CovViaEigendecomposition(("alpaca", np.eye(2))) + + message = "The input `eigenvectors` must be a square..." + with pytest.raises(ValueError, match=message): + _covariance.CovViaEigendecomposition((np.ones(2), "alpaca")) + + message = "The shapes of `eigenvalues` and `eigenvectors` must be..." + with pytest.raises(ValueError, match=message): + _covariance.CovViaEigendecomposition(([1, 2, 3], np.eye(2))) + + _covariance_preprocessing = {"Diagonal": np.diag, + "Precision": np.linalg.inv, + "Cholesky": np.linalg.cholesky, + "Eigendecomposition": np.linalg.eigh, + "PSD": lambda x: + _PSD(x, allow_singular=True)} + _all_covariance_types = np.array(list(_covariance_preprocessing)) + _matrices = {"diagonal full rank": np.diag([1, 2, 3]), + "general full rank": [[5, 1, 3], [1, 6, 4], [3, 4, 7]], + "diagonal singular": np.diag([1, 0, 3]), + "general singular": [[5, -1, 0], [-1, 5, 0], [0, 0, 0]]} + _cov_types = {"diagonal full rank": _all_covariance_types, + "general full rank": _all_covariance_types[1:], + "diagonal singular": _all_covariance_types[[0, -2, -1]], + "general singular": _all_covariance_types[-2:]} + + @pytest.mark.parametrize("cov_type_name", _all_covariance_types[:-1]) + def test_factories(self, cov_type_name): + A = np.diag([1, 2, 3]) + x = [-4, 2, 5] + + cov_type = getattr(_covariance, f"CovVia{cov_type_name}") + preprocessing = self._covariance_preprocessing[cov_type_name] + factory = getattr(Covariance, f"from_{cov_type_name.lower()}") + + res = factory(preprocessing(A)) + ref = cov_type(preprocessing(A)) + assert type(res) == type(ref) + assert_allclose(res.whiten(x), ref.whiten(x)) + + @pytest.mark.parametrize("matrix_type", list(_matrices)) + @pytest.mark.parametrize("cov_type_name", _all_covariance_types) + def test_covariance(self, matrix_type, cov_type_name): + message = (f"CovVia{cov_type_name} does not support {matrix_type} " + "matrices") + if cov_type_name not in self._cov_types[matrix_type]: + pytest.skip(message) + + A = self._matrices[matrix_type] + cov_type = getattr(_covariance, f"CovVia{cov_type_name}") + preprocessing = self._covariance_preprocessing[cov_type_name] + + psd = _PSD(A, allow_singular=True) + + # test properties + cov_object = cov_type(preprocessing(A)) + assert_close(cov_object.log_pdet, psd.log_pdet) + assert_equal(cov_object.rank, psd.rank) + assert_equal(cov_object.shape, np.asarray(A).shape) + assert_close(cov_object.covariance, np.asarray(A)) + + # test whitening/coloring 1D x + rng = np.random.default_rng(5292808890472453840) + x = rng.random(size=3) + res = cov_object.whiten(x) + ref = x @ psd.U + # res != ref in general; but res @ res == ref @ ref + assert_close(res @ res, ref @ ref) + if hasattr(cov_object, "_colorize") and "singular" not in matrix_type: + # CovViaPSD does not have _colorize + assert_close(cov_object.colorize(res), x) + + # test whitening/coloring 3D x + x = rng.random(size=(2, 4, 3)) + res = cov_object.whiten(x) + ref = x @ psd.U + assert_close((res**2).sum(axis=-1), (ref**2).sum(axis=-1)) + if hasattr(cov_object, "_colorize") and "singular" not in matrix_type: + assert_close(cov_object.colorize(res), x) + + # gh-19197 reported that multivariate normal `rvs` produced incorrect + # results when a singular Covariance object was produce using + # `from_eigenvalues`. This was due to an issue in `colorize` with + # singular covariance matrices. Check this edge case, which is skipped + # in the previous tests. + if hasattr(cov_object, "_colorize"): + res = cov_object.colorize(np.eye(len(A))) + assert_close(res.T @ res, A) + + @pytest.mark.parametrize("size", [None, tuple(), 1, (2, 4, 3)]) + @pytest.mark.parametrize("matrix_type", list(_matrices)) + @pytest.mark.parametrize("cov_type_name", _all_covariance_types) + def test_mvn_with_covariance(self, size, matrix_type, cov_type_name): + message = (f"CovVia{cov_type_name} does not support {matrix_type} " + "matrices") + if cov_type_name not in self._cov_types[matrix_type]: + pytest.skip(message) + + A = self._matrices[matrix_type] + cov_type = getattr(_covariance, f"CovVia{cov_type_name}") + preprocessing = self._covariance_preprocessing[cov_type_name] + + mean = [0.1, 0.2, 0.3] + cov_object = cov_type(preprocessing(A)) + mvn = multivariate_normal + dist0 = multivariate_normal(mean, A, allow_singular=True) + dist1 = multivariate_normal(mean, cov_object, allow_singular=True) + + rng = np.random.default_rng(5292808890472453840) + x = rng.multivariate_normal(mean, A, size=size) + rng = np.random.default_rng(5292808890472453840) + x1 = mvn.rvs(mean, cov_object, size=size, random_state=rng) + rng = np.random.default_rng(5292808890472453840) + x2 = mvn(mean, cov_object, seed=rng).rvs(size=size) + if isinstance(cov_object, _covariance.CovViaPSD): + assert_close(x1, np.squeeze(x)) # for backward compatibility + assert_close(x2, np.squeeze(x)) + else: + assert_equal(x1.shape, x.shape) + assert_equal(x2.shape, x.shape) + assert_close(x2, x1) + + assert_close(mvn.pdf(x, mean, cov_object), dist0.pdf(x)) + assert_close(dist1.pdf(x), dist0.pdf(x)) + assert_close(mvn.logpdf(x, mean, cov_object), dist0.logpdf(x)) + assert_close(dist1.logpdf(x), dist0.logpdf(x)) + assert_close(mvn.entropy(mean, cov_object), dist0.entropy()) + assert_close(dist1.entropy(), dist0.entropy()) + + @pytest.mark.parametrize("size", [tuple(), (2, 4, 3)]) + @pytest.mark.parametrize("cov_type_name", _all_covariance_types) + def test_mvn_with_covariance_cdf(self, size, cov_type_name): + # This is split from the test above because it's slow to be running + # with all matrix types, and there's no need because _mvn.mvnun + # does the calculation. All Covariance needs to do is pass is + # provide the `covariance` attribute. + matrix_type = "diagonal full rank" + A = self._matrices[matrix_type] + cov_type = getattr(_covariance, f"CovVia{cov_type_name}") + preprocessing = self._covariance_preprocessing[cov_type_name] + + mean = [0.1, 0.2, 0.3] + cov_object = cov_type(preprocessing(A)) + mvn = multivariate_normal + dist0 = multivariate_normal(mean, A, allow_singular=True) + dist1 = multivariate_normal(mean, cov_object, allow_singular=True) + + rng = np.random.default_rng(5292808890472453840) + x = rng.multivariate_normal(mean, A, size=size) + + assert_close(mvn.cdf(x, mean, cov_object), dist0.cdf(x)) + assert_close(dist1.cdf(x), dist0.cdf(x)) + assert_close(mvn.logcdf(x, mean, cov_object), dist0.logcdf(x)) + assert_close(dist1.logcdf(x), dist0.logcdf(x)) + + def test_covariance_instantiation(self): + message = "The `Covariance` class cannot be instantiated directly." + with pytest.raises(NotImplementedError, match=message): + Covariance() + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") # matrix not PSD + def test_gh9942(self): + # Originally there was a mistake in the `multivariate_normal_frozen` + # `rvs` method that caused all covariance objects to be processed as + # a `_CovViaPSD`. Ensure that this is resolved. + A = np.diag([1, 2, -1e-8]) + n = A.shape[0] + mean = np.zeros(n) + + # Error if the matrix is processed as a `_CovViaPSD` + with pytest.raises(ValueError, match="The input matrix must be..."): + multivariate_normal(mean, A).rvs() + + # No error if it is provided as a `CovViaEigendecomposition` + seed = 3562050283508273023 + rng1 = np.random.default_rng(seed) + rng2 = np.random.default_rng(seed) + cov = Covariance.from_eigendecomposition(np.linalg.eigh(A)) + rv = multivariate_normal(mean, cov) + res = rv.rvs(random_state=rng1) + ref = multivariate_normal.rvs(mean, cov, random_state=rng2) + assert_equal(res, ref) + + def test_gh19197(self): + # gh-19197 reported that multivariate normal `rvs` produced incorrect + # results when a singular Covariance object was produce using + # `from_eigenvalues`. Check that this specific issue is resolved; + # a more general test is included in `test_covariance`. + mean = np.ones(2) + cov = Covariance.from_eigendecomposition((np.zeros(2), np.eye(2))) + dist = scipy.stats.multivariate_normal(mean=mean, cov=cov) + rvs = dist.rvs(size=None) + assert_equal(rvs, mean) + + cov = scipy.stats.Covariance.from_eigendecomposition( + (np.array([1., 0.]), np.array([[1., 0.], [0., 400.]]))) + dist = scipy.stats.multivariate_normal(mean=mean, cov=cov) + rvs = dist.rvs(size=None) + assert rvs[0] != mean[0] + assert rvs[1] == mean[1] + + +def _random_covariance(dim, evals, rng, singular=False): + # Generates random covariance matrix with dimensionality `dim` and + # eigenvalues `evals` using provided Generator `rng`. Randomly sets + # some evals to zero if `singular` is True. + A = rng.random((dim, dim)) + A = A @ A.T + _, v = np.linalg.eigh(A) + if singular: + zero_eigs = rng.normal(size=dim) > 0 + evals[zero_eigs] = 0 + cov = v @ np.diag(evals) @ v.T + return cov + + +def _sample_orthonormal_matrix(n): + M = np.random.randn(n, n) + u, s, v = scipy.linalg.svd(M) + return u + + +class TestMultivariateNormal: + def test_input_shape(self): + mu = np.arange(3) + cov = np.identity(2) + assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov) + assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov) + assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov) + assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov) + + def test_scalar_values(self): + np.random.seed(1234) + + # When evaluated on scalar data, the pdf should return a scalar + x, mean, cov = 1.5, 1.7, 2.5 + pdf = multivariate_normal.pdf(x, mean, cov) + assert_equal(pdf.ndim, 0) + + # When evaluated on a single vector, the pdf should return a scalar + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix + pdf = multivariate_normal.pdf(x, mean, cov) + assert_equal(pdf.ndim, 0) + + # When evaluated on scalar data, the cdf should return a scalar + x, mean, cov = 1.5, 1.7, 2.5 + cdf = multivariate_normal.cdf(x, mean, cov) + assert_equal(cdf.ndim, 0) + + # When evaluated on a single vector, the cdf should return a scalar + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix + cdf = multivariate_normal.cdf(x, mean, cov) + assert_equal(cdf.ndim, 0) + + def test_logpdf(self): + # Check that the log of the pdf is in fact the logpdf + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) + d1 = multivariate_normal.logpdf(x, mean, cov) + d2 = multivariate_normal.pdf(x, mean, cov) + assert_allclose(d1, np.log(d2)) + + def test_logpdf_default_values(self): + # Check that the log of the pdf is in fact the logpdf + # with default parameters Mean=None and cov = 1 + np.random.seed(1234) + x = np.random.randn(5) + d1 = multivariate_normal.logpdf(x) + d2 = multivariate_normal.pdf(x) + # check whether default values are being used + d3 = multivariate_normal.logpdf(x, None, 1) + d4 = multivariate_normal.pdf(x, None, 1) + assert_allclose(d1, np.log(d2)) + assert_allclose(d3, np.log(d4)) + + def test_logcdf(self): + # Check that the log of the cdf is in fact the logcdf + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) + d1 = multivariate_normal.logcdf(x, mean, cov) + d2 = multivariate_normal.cdf(x, mean, cov) + assert_allclose(d1, np.log(d2)) + + def test_logcdf_default_values(self): + # Check that the log of the cdf is in fact the logcdf + # with default parameters Mean=None and cov = 1 + np.random.seed(1234) + x = np.random.randn(5) + d1 = multivariate_normal.logcdf(x) + d2 = multivariate_normal.cdf(x) + # check whether default values are being used + d3 = multivariate_normal.logcdf(x, None, 1) + d4 = multivariate_normal.cdf(x, None, 1) + assert_allclose(d1, np.log(d2)) + assert_allclose(d3, np.log(d4)) + + def test_rank(self): + # Check that the rank is detected correctly. + np.random.seed(1234) + n = 4 + mean = np.random.randn(n) + for expected_rank in range(1, n + 1): + s = np.random.randn(n, expected_rank) + cov = np.dot(s, s.T) + distn = multivariate_normal(mean, cov, allow_singular=True) + assert_equal(distn.cov_object.rank, expected_rank) + + def test_degenerate_distributions(self): + + for n in range(1, 5): + z = np.random.randn(n) + for k in range(1, n): + # Sample a small covariance matrix. + s = np.random.randn(k, k) + cov_kk = np.dot(s, s.T) + + # Embed the small covariance matrix into a larger singular one. + cov_nn = np.zeros((n, n)) + cov_nn[:k, :k] = cov_kk + + # Embed part of the vector in the same way + x = np.zeros(n) + x[:k] = z[:k] + + # Define a rotation of the larger low rank matrix. + u = _sample_orthonormal_matrix(n) + cov_rr = np.dot(u, np.dot(cov_nn, u.T)) + y = np.dot(u, x) + + # Check some identities. + distn_kk = multivariate_normal(np.zeros(k), cov_kk, + allow_singular=True) + distn_nn = multivariate_normal(np.zeros(n), cov_nn, + allow_singular=True) + distn_rr = multivariate_normal(np.zeros(n), cov_rr, + allow_singular=True) + assert_equal(distn_kk.cov_object.rank, k) + assert_equal(distn_nn.cov_object.rank, k) + assert_equal(distn_rr.cov_object.rank, k) + pdf_kk = distn_kk.pdf(x[:k]) + pdf_nn = distn_nn.pdf(x) + pdf_rr = distn_rr.pdf(y) + assert_allclose(pdf_kk, pdf_nn) + assert_allclose(pdf_kk, pdf_rr) + logpdf_kk = distn_kk.logpdf(x[:k]) + logpdf_nn = distn_nn.logpdf(x) + logpdf_rr = distn_rr.logpdf(y) + assert_allclose(logpdf_kk, logpdf_nn) + assert_allclose(logpdf_kk, logpdf_rr) + + # Add an orthogonal component and find the density + y_orth = y + u[:, -1] + pdf_rr_orth = distn_rr.pdf(y_orth) + logpdf_rr_orth = distn_rr.logpdf(y_orth) + + # Ensure that this has zero probability + assert_equal(pdf_rr_orth, 0.0) + assert_equal(logpdf_rr_orth, -np.inf) + + def test_degenerate_array(self): + # Test that we can generate arrays of random variate from a degenerate + # multivariate normal, and that the pdf for these samples is non-zero + # (i.e. samples from the distribution lie on the subspace) + k = 10 + for n in range(2, 6): + for r in range(1, n): + mn = np.zeros(n) + u = _sample_orthonormal_matrix(n)[:, :r] + vr = np.dot(u, u.T) + X = multivariate_normal.rvs(mean=mn, cov=vr, size=k) + + pdf = multivariate_normal.pdf(X, mean=mn, cov=vr, + allow_singular=True) + assert_equal(pdf.size, k) + assert np.all(pdf > 0.0) + + logpdf = multivariate_normal.logpdf(X, mean=mn, cov=vr, + allow_singular=True) + assert_equal(logpdf.size, k) + assert np.all(logpdf > -np.inf) + + def test_large_pseudo_determinant(self): + # Check that large pseudo-determinants are handled appropriately. + + # Construct a singular diagonal covariance matrix + # whose pseudo determinant overflows double precision. + large_total_log = 1000.0 + npos = 100 + nzero = 2 + large_entry = np.exp(large_total_log / npos) + n = npos + nzero + cov = np.zeros((n, n), dtype=float) + np.fill_diagonal(cov, large_entry) + cov[-nzero:, -nzero:] = 0 + + # Check some determinants. + assert_equal(scipy.linalg.det(cov), 0) + assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf) + assert_allclose(np.linalg.slogdet(cov[:npos, :npos]), + (1, large_total_log)) + + # Check the pseudo-determinant. + psd = _PSD(cov) + assert_allclose(psd.log_pdet, large_total_log) + + def test_broadcasting(self): + np.random.seed(1234) + n = 4 + + # Construct a random covariance matrix. + data = np.random.randn(n, n) + cov = np.dot(data, data.T) + mean = np.random.randn(n) + + # Construct an ndarray which can be interpreted as + # a 2x3 array whose elements are random data vectors. + X = np.random.randn(2, 3, n) + + # Check that multiple data points can be evaluated at once. + desired_pdf = multivariate_normal.pdf(X, mean, cov) + desired_cdf = multivariate_normal.cdf(X, mean, cov) + for i in range(2): + for j in range(3): + actual = multivariate_normal.pdf(X[i, j], mean, cov) + assert_allclose(actual, desired_pdf[i,j]) + # Repeat for cdf + actual = multivariate_normal.cdf(X[i, j], mean, cov) + assert_allclose(actual, desired_cdf[i,j], rtol=1e-3) + + def test_normal_1D(self): + # The probability density function for a 1D normal variable should + # agree with the standard normal distribution in scipy.stats.distributions + x = np.linspace(0, 2, 10) + mean, cov = 1.2, 0.9 + scale = cov**0.5 + d1 = norm.pdf(x, mean, scale) + d2 = multivariate_normal.pdf(x, mean, cov) + assert_allclose(d1, d2) + # The same should hold for the cumulative distribution function + d1 = norm.cdf(x, mean, scale) + d2 = multivariate_normal.cdf(x, mean, cov) + assert_allclose(d1, d2) + + def test_marginalization(self): + # Integrating out one of the variables of a 2D Gaussian should + # yield a 1D Gaussian + mean = np.array([2.5, 3.5]) + cov = np.array([[.5, 0.2], [0.2, .6]]) + n = 2 ** 8 + 1 # Number of samples + delta = 6 / (n - 1) # Grid spacing + + v = np.linspace(0, 6, n) + xv, yv = np.meshgrid(v, v) + pos = np.empty((n, n, 2)) + pos[:, :, 0] = xv + pos[:, :, 1] = yv + pdf = multivariate_normal.pdf(pos, mean, cov) + + # Marginalize over x and y axis + margin_x = romb(pdf, delta, axis=0) + margin_y = romb(pdf, delta, axis=1) + + # Compare with standard normal distribution + gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5) + gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5) + assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2) + assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2) + + def test_frozen(self): + # The frozen distribution should agree with the regular one + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.abs(np.random.randn(5)) + norm_frozen = multivariate_normal(mean, cov) + assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov)) + assert_allclose(norm_frozen.logpdf(x), + multivariate_normal.logpdf(x, mean, cov)) + assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov)) + assert_allclose(norm_frozen.logcdf(x), + multivariate_normal.logcdf(x, mean, cov)) + + @pytest.mark.parametrize( + 'covariance', + [ + np.eye(2), + Covariance.from_diagonal([1, 1]), + ] + ) + def test_frozen_multivariate_normal_exposes_attributes(self, covariance): + mean = np.ones((2,)) + cov_should_be = np.eye(2) + norm_frozen = multivariate_normal(mean, covariance) + assert np.allclose(norm_frozen.mean, mean) + assert np.allclose(norm_frozen.cov, cov_should_be) + + def test_pseudodet_pinv(self): + # Make sure that pseudo-inverse and pseudo-det agree on cutoff + + # Assemble random covariance matrix with large and small eigenvalues + np.random.seed(1234) + n = 7 + x = np.random.randn(n, n) + cov = np.dot(x, x.T) + s, u = scipy.linalg.eigh(cov) + s = np.full(n, 0.5) + s[0] = 1.0 + s[-1] = 1e-7 + cov = np.dot(u, np.dot(np.diag(s), u.T)) + + # Set cond so that the lowest eigenvalue is below the cutoff + cond = 1e-5 + psd = _PSD(cov, cond=cond) + psd_pinv = _PSD(psd.pinv, cond=cond) + + # Check that the log pseudo-determinant agrees with the sum + # of the logs of all but the smallest eigenvalue + assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1]))) + # Check that the pseudo-determinant of the pseudo-inverse + # agrees with 1 / pseudo-determinant + assert_allclose(-psd.log_pdet, psd_pinv.log_pdet) + + def test_exception_nonsquare_cov(self): + cov = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, _PSD, cov) + + def test_exception_nonfinite_cov(self): + cov_nan = [[1, 0], [0, np.nan]] + assert_raises(ValueError, _PSD, cov_nan) + cov_inf = [[1, 0], [0, np.inf]] + assert_raises(ValueError, _PSD, cov_inf) + + def test_exception_non_psd_cov(self): + cov = [[1, 0], [0, -1]] + assert_raises(ValueError, _PSD, cov) + + def test_exception_singular_cov(self): + np.random.seed(1234) + x = np.random.randn(5) + mean = np.random.randn(5) + cov = np.ones((5, 5)) + e = np.linalg.LinAlgError + assert_raises(e, multivariate_normal, mean, cov) + assert_raises(e, multivariate_normal.pdf, x, mean, cov) + assert_raises(e, multivariate_normal.logpdf, x, mean, cov) + assert_raises(e, multivariate_normal.cdf, x, mean, cov) + assert_raises(e, multivariate_normal.logcdf, x, mean, cov) + + # Message used to be "singular matrix", but this is more accurate. + # See gh-15508 + cov = [[1., 0.], [1., 1.]] + msg = "When `allow_singular is False`, the input matrix" + with pytest.raises(np.linalg.LinAlgError, match=msg): + multivariate_normal(cov=cov) + + def test_R_values(self): + # Compare the multivariate pdf with some values precomputed + # in R version 3.0.1 (2013-05-16) on Mac OS X 10.6. + + # The values below were generated by the following R-script: + # > library(mnormt) + # > x <- seq(0, 2, length=5) + # > y <- 3*x - 2 + # > z <- x + cos(y) + # > mu <- c(1, 3, 2) + # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3) + # > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma) + r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692, + 0.0103803050, 0.0140250800]) + + x = np.linspace(0, 2, 5) + y = 3 * x - 2 + z = x + np.cos(y) + r = np.array([x, y, z]).T + + mean = np.array([1, 3, 2], 'd') + cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd') + + pdf = multivariate_normal.pdf(r, mean, cov) + assert_allclose(pdf, r_pdf, atol=1e-10) + + # Compare the multivariate cdf with some values precomputed + # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux. + + # The values below were generated by the following R-script: + # > library(mnormt) + # > x <- seq(0, 2, length=5) + # > y <- 3*x - 2 + # > z <- x + cos(y) + # > mu <- c(1, 3, 2) + # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3) + # > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma) + r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761, + 0.1063242573, 0.2501068509]) + + cdf = multivariate_normal.cdf(r, mean, cov) + assert_allclose(cdf, r_cdf, atol=2e-5) + + # Also test bivariate cdf with some values precomputed + # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux. + + # The values below were generated by the following R-script: + # > library(mnormt) + # > x <- seq(0, 2, length=5) + # > y <- 3*x - 2 + # > mu <- c(1, 3) + # > Sigma <- matrix(c(1,2,2,5), 2, 2) + # > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma) + r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571, + 0.40696599, 0.66470577]) + + r2 = np.array([x, y]).T + + mean2 = np.array([1, 3], 'd') + cov2 = np.array([[1, 2], [2, 5]], 'd') + + cdf2 = multivariate_normal.cdf(r2, mean2, cov2) + assert_allclose(cdf2, r_cdf2, atol=1e-5) + + def test_multivariate_normal_rvs_zero_covariance(self): + mean = np.zeros(2) + covariance = np.zeros((2, 2)) + model = multivariate_normal(mean, covariance, allow_singular=True) + sample = model.rvs() + assert_equal(sample, [0, 0]) + + def test_rvs_shape(self): + # Check that rvs parses the mean and covariance correctly, and returns + # an array of the right shape + N = 300 + d = 4 + sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N) + assert_equal(sample.shape, (N, d)) + + sample = multivariate_normal.rvs(mean=None, + cov=np.array([[2, .1], [.1, 1]]), + size=N) + assert_equal(sample.shape, (N, 2)) + + u = multivariate_normal(mean=0, cov=1) + sample = u.rvs(N) + assert_equal(sample.shape, (N, )) + + def test_large_sample(self): + # Generate large sample and compare sample mean and sample covariance + # with mean and covariance matrix. + + np.random.seed(2846) + + n = 3 + mean = np.random.randn(n) + M = np.random.randn(n, n) + cov = np.dot(M, M.T) + size = 5000 + + sample = multivariate_normal.rvs(mean, cov, size) + + assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1) + assert_allclose(sample.mean(0), mean, rtol=1e-1) + + def test_entropy(self): + np.random.seed(2846) + + n = 3 + mean = np.random.randn(n) + M = np.random.randn(n, n) + cov = np.dot(M, M.T) + + rv = multivariate_normal(mean, cov) + + # Check that frozen distribution agrees with entropy function + assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov)) + # Compare entropy with manually computed expression involving + # the sum of the logs of the eigenvalues of the covariance matrix + eigs = np.linalg.eig(cov)[0] + desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs))) + assert_almost_equal(desired, rv.entropy()) + + def test_lnB(self): + alpha = np.array([1, 1, 1]) + desired = .5 # e^lnB = 1/2 for [1, 1, 1] + + assert_almost_equal(np.exp(_lnB(alpha)), desired) + + def test_cdf_with_lower_limit_arrays(self): + # test CDF with lower limit in several dimensions + rng = np.random.default_rng(2408071309372769818) + mean = [0, 0] + cov = np.eye(2) + a = rng.random((4, 3, 2))*6 - 3 + b = rng.random((4, 3, 2))*6 - 3 + + cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a) + + cdf2a = multivariate_normal.cdf(b, mean, cov) + cdf2b = multivariate_normal.cdf(a, mean, cov) + ab1 = np.concatenate((a[..., 0:1], b[..., 1:2]), axis=-1) + ab2 = np.concatenate((a[..., 1:2], b[..., 0:1]), axis=-1) + cdf2ab1 = multivariate_normal.cdf(ab1, mean, cov) + cdf2ab2 = multivariate_normal.cdf(ab2, mean, cov) + cdf2 = cdf2a + cdf2b - cdf2ab1 - cdf2ab2 + + assert_allclose(cdf1, cdf2) + + def test_cdf_with_lower_limit_consistency(self): + # check that multivariate normal CDF functions are consistent + rng = np.random.default_rng(2408071309372769818) + mean = rng.random(3) + cov = rng.random((3, 3)) + cov = cov @ cov.T + a = rng.random((2, 3))*6 - 3 + b = rng.random((2, 3))*6 - 3 + + cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a) + cdf2 = multivariate_normal(mean, cov).cdf(b, lower_limit=a) + cdf3 = np.exp(multivariate_normal.logcdf(b, mean, cov, lower_limit=a)) + cdf4 = np.exp(multivariate_normal(mean, cov).logcdf(b, lower_limit=a)) + + assert_allclose(cdf2, cdf1, rtol=1e-4) + assert_allclose(cdf3, cdf1, rtol=1e-4) + assert_allclose(cdf4, cdf1, rtol=1e-4) + + def test_cdf_signs(self): + # check that sign of output is correct when np.any(lower > x) + mean = np.zeros(3) + cov = np.eye(3) + b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]] + a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]] + # when odd number of elements of b < a, output is negative + expected_signs = np.array([1, -1, -1, 1]) + cdf = multivariate_normal.cdf(b, mean, cov, lower_limit=a) + assert_allclose(cdf, cdf[0]*expected_signs) + + def test_mean_cov(self): + # test the interaction between a Covariance object and mean + P = np.diag(1 / np.array([1, 2, 3])) + cov_object = _covariance.CovViaPrecision(P) + + message = "`cov` represents a covariance matrix in 3 dimensions..." + with pytest.raises(ValueError, match=message): + multivariate_normal.entropy([0, 0], cov_object) + + with pytest.raises(ValueError, match=message): + multivariate_normal([0, 0], cov_object) + + x = [0.5, 0.5, 0.5] + ref = multivariate_normal.pdf(x, [0, 0, 0], cov_object) + assert_equal(multivariate_normal.pdf(x, cov=cov_object), ref) + + ref = multivariate_normal.pdf(x, [1, 1, 1], cov_object) + assert_equal(multivariate_normal.pdf(x, 1, cov=cov_object), ref) + + def test_fit_wrong_fit_data_shape(self): + data = [1, 3] + error_msg = "`x` must be two-dimensional." + with pytest.raises(ValueError, match=error_msg): + multivariate_normal.fit(data) + + @pytest.mark.parametrize('dim', (3, 5)) + def test_fit_correctness(self, dim): + rng = np.random.default_rng(4385269356937404) + x = rng.random((100, dim)) + mean_est, cov_est = multivariate_normal.fit(x) + mean_ref, cov_ref = np.mean(x, axis=0), np.cov(x.T, ddof=0) + assert_allclose(mean_est, mean_ref, atol=1e-15) + assert_allclose(cov_est, cov_ref, rtol=1e-15) + + def test_fit_both_parameters_fixed(self): + data = np.full((2, 1), 3) + mean_fixed = 1. + cov_fixed = np.atleast_2d(1.) + mean, cov = multivariate_normal.fit(data, fix_mean=mean_fixed, + fix_cov=cov_fixed) + assert_equal(mean, mean_fixed) + assert_equal(cov, cov_fixed) + + @pytest.mark.parametrize('fix_mean', [np.zeros((2, 2)), + np.zeros((3, ))]) + def test_fit_fix_mean_input_validation(self, fix_mean): + msg = ("`fix_mean` must be a one-dimensional array the same " + "length as the dimensionality of the vectors `x`.") + with pytest.raises(ValueError, match=msg): + multivariate_normal.fit(np.eye(2), fix_mean=fix_mean) + + @pytest.mark.parametrize('fix_cov', [np.zeros((2, )), + np.zeros((3, 2)), + np.zeros((4, 4))]) + def test_fit_fix_cov_input_validation_dimension(self, fix_cov): + msg = ("`fix_cov` must be a two-dimensional square array " + "of same side length as the dimensionality of the " + "vectors `x`.") + with pytest.raises(ValueError, match=msg): + multivariate_normal.fit(np.eye(3), fix_cov=fix_cov) + + def test_fit_fix_cov_not_positive_semidefinite(self): + error_msg = "`fix_cov` must be symmetric positive semidefinite." + with pytest.raises(ValueError, match=error_msg): + fix_cov = np.array([[1., 0.], [0., -1.]]) + multivariate_normal.fit(np.eye(2), fix_cov=fix_cov) + + def test_fit_fix_mean(self): + rng = np.random.default_rng(4385269356937404) + loc = rng.random(3) + A = rng.random((3, 3)) + cov = np.dot(A, A.T) + samples = multivariate_normal.rvs(mean=loc, cov=cov, size=100, + random_state=rng) + mean_free, cov_free = multivariate_normal.fit(samples) + logp_free = multivariate_normal.logpdf(samples, mean=mean_free, + cov=cov_free).sum() + mean_fix, cov_fix = multivariate_normal.fit(samples, fix_mean=loc) + assert_equal(mean_fix, loc) + logp_fix = multivariate_normal.logpdf(samples, mean=mean_fix, + cov=cov_fix).sum() + # test that fixed parameters result in lower likelihood than free + # parameters + assert logp_fix < logp_free + # test that a small perturbation of the resulting parameters + # has lower likelihood than the estimated parameters + A = rng.random((3, 3)) + m = 1e-8 * np.dot(A, A.T) + cov_perturbed = cov_fix + m + logp_perturbed = (multivariate_normal.logpdf(samples, + mean=mean_fix, + cov=cov_perturbed) + ).sum() + assert logp_perturbed < logp_fix + + + def test_fit_fix_cov(self): + rng = np.random.default_rng(4385269356937404) + loc = rng.random(3) + A = rng.random((3, 3)) + cov = np.dot(A, A.T) + samples = multivariate_normal.rvs(mean=loc, cov=cov, + size=100, random_state=rng) + mean_free, cov_free = multivariate_normal.fit(samples) + logp_free = multivariate_normal.logpdf(samples, mean=mean_free, + cov=cov_free).sum() + mean_fix, cov_fix = multivariate_normal.fit(samples, fix_cov=cov) + assert_equal(mean_fix, np.mean(samples, axis=0)) + assert_equal(cov_fix, cov) + logp_fix = multivariate_normal.logpdf(samples, mean=mean_fix, + cov=cov_fix).sum() + # test that fixed parameters result in lower likelihood than free + # parameters + assert logp_fix < logp_free + # test that a small perturbation of the resulting parameters + # has lower likelihood than the estimated parameters + mean_perturbed = mean_fix + 1e-8 * rng.random(3) + logp_perturbed = (multivariate_normal.logpdf(samples, + mean=mean_perturbed, + cov=cov_fix) + ).sum() + assert logp_perturbed < logp_fix + + +class TestMatrixNormal: + + def test_bad_input(self): + # Check that bad inputs raise errors + num_rows = 4 + num_cols = 3 + M = np.full((num_rows,num_cols), 0.3) + U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) + V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) + + # Incorrect dimensions + assert_raises(ValueError, matrix_normal, np.zeros((5,4,3))) + assert_raises(ValueError, matrix_normal, M, np.zeros(10), V) + assert_raises(ValueError, matrix_normal, M, U, np.zeros(10)) + assert_raises(ValueError, matrix_normal, M, U, U) + assert_raises(ValueError, matrix_normal, M, V, V) + assert_raises(ValueError, matrix_normal, M.T, U, V) + + e = np.linalg.LinAlgError + # Singular covariance for the rvs method of a non-frozen instance + assert_raises(e, matrix_normal.rvs, + M, U, np.ones((num_cols, num_cols))) + assert_raises(e, matrix_normal.rvs, + M, np.ones((num_rows, num_rows)), V) + # Singular covariance for a frozen instance + assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols))) + assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V) + + def test_default_inputs(self): + # Check that default argument handling works + num_rows = 4 + num_cols = 3 + M = np.full((num_rows,num_cols), 0.3) + U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) + V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) + Z = np.zeros((num_rows, num_cols)) + Zr = np.zeros((num_rows, 1)) + Zc = np.zeros((1, num_cols)) + Ir = np.identity(num_rows) + Ic = np.identity(num_cols) + I1 = np.identity(1) + + assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(mean=M).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(rowcov=U).shape, + (num_rows, 1)) + assert_equal(matrix_normal.rvs(colcov=V).shape, + (1, num_cols)) + assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape, + (num_rows, num_cols)) + assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape, + (num_rows, num_cols)) + + assert_equal(matrix_normal(mean=M).rowcov, Ir) + assert_equal(matrix_normal(mean=M).colcov, Ic) + assert_equal(matrix_normal(rowcov=U).mean, Zr) + assert_equal(matrix_normal(rowcov=U).colcov, I1) + assert_equal(matrix_normal(colcov=V).mean, Zc) + assert_equal(matrix_normal(colcov=V).rowcov, I1) + assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic) + assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir) + assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z) + + def test_covariance_expansion(self): + # Check that covariance can be specified with scalar or vector + num_rows = 4 + num_cols = 3 + M = np.full((num_rows, num_cols), 0.3) + Uv = np.full(num_rows, 0.2) + Us = 0.2 + Vv = np.full(num_cols, 0.1) + Vs = 0.1 + + Ir = np.identity(num_rows) + Ic = np.identity(num_cols) + + assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov, + 0.2*Ir) + assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov, + 0.1*Ic) + assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov, + 0.2*Ir) + assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov, + 0.1*Ic) + + def test_frozen_matrix_normal(self): + for i in range(1,5): + for j in range(1,5): + M = np.full((i,j), 0.3) + U = 0.5 * np.identity(i) + np.full((i,i), 0.5) + V = 0.7 * np.identity(j) + np.full((j,j), 0.3) + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + + rvs1 = frozen.rvs(random_state=1234) + rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V, + random_state=1234) + assert_equal(rvs1, rvs2) + + X = frozen.rvs(random_state=1234) + + pdf1 = frozen.pdf(X) + pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) + assert_equal(pdf1, pdf2) + + logpdf1 = frozen.logpdf(X) + logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V) + assert_equal(logpdf1, logpdf2) + + def test_matches_multivariate(self): + # Check that the pdfs match those obtained by vectorising and + # treating as a multivariate normal. + for i in range(1,5): + for j in range(1,5): + M = np.full((i,j), 0.3) + U = 0.5 * np.identity(i) + np.full((i,i), 0.5) + V = 0.7 * np.identity(j) + np.full((j,j), 0.3) + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + X = frozen.rvs(random_state=1234) + pdf1 = frozen.pdf(X) + logpdf1 = frozen.logpdf(X) + entropy1 = frozen.entropy() + + vecX = X.T.flatten() + vecM = M.T.flatten() + cov = np.kron(V,U) + pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov) + logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov) + entropy2 = multivariate_normal.entropy(mean=vecM, cov=cov) + + assert_allclose(pdf1, pdf2, rtol=1E-10) + assert_allclose(logpdf1, logpdf2, rtol=1E-10) + assert_allclose(entropy1, entropy2) + + def test_array_input(self): + # Check array of inputs has the same output as the separate entries. + num_rows = 4 + num_cols = 3 + M = np.full((num_rows,num_cols), 0.3) + U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) + V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) + N = 10 + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + X1 = frozen.rvs(size=N, random_state=1234) + X2 = frozen.rvs(size=N, random_state=4321) + X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0) + assert_equal(X.shape, (2, N, num_rows, num_cols)) + + array_logpdf = frozen.logpdf(X) + assert_equal(array_logpdf.shape, (2, N)) + for i in range(2): + for j in range(N): + separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M, + rowcov=U, colcov=V) + assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10) + + def test_moments(self): + # Check that the sample moments match the parameters + num_rows = 4 + num_cols = 3 + M = np.full((num_rows,num_cols), 0.3) + U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5) + V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3) + N = 1000 + + frozen = matrix_normal(mean=M, rowcov=U, colcov=V) + X = frozen.rvs(size=N, random_state=1234) + + sample_mean = np.mean(X,axis=0) + assert_allclose(sample_mean, M, atol=0.1) + + sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T) + assert_allclose(sample_colcov, V, atol=0.1) + + sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape( + N*num_cols,num_rows).T) + assert_allclose(sample_rowcov, U, atol=0.1) + + def test_samples(self): + # Regression test to ensure that we always generate the same stream of + # random variates. + actual = matrix_normal.rvs( + mean=np.array([[1, 2], [3, 4]]), + rowcov=np.array([[4, -1], [-1, 2]]), + colcov=np.array([[5, 1], [1, 10]]), + random_state=np.random.default_rng(0), + size=2 + ) + expected = np.array( + [[[1.56228264238181, -1.24136424071189], + [2.46865788392114, 6.22964440489445]], + [[3.86405716144353, 10.73714311429529], + [2.59428444080606, 5.79987854490876]]] + ) + assert_allclose(actual, expected) + + +class TestDirichlet: + + def test_frozen_dirichlet(self): + np.random.seed(2846) + + n = np.random.randint(1, 32) + alpha = np.random.uniform(10e-10, 100, n) + + d = dirichlet(alpha) + + assert_equal(d.var(), dirichlet.var(alpha)) + assert_equal(d.mean(), dirichlet.mean(alpha)) + assert_equal(d.entropy(), dirichlet.entropy(alpha)) + num_tests = 10 + for i in range(num_tests): + x = np.random.uniform(10e-10, 100, n) + x /= np.sum(x) + assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha)) + assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha)) + + def test_numpy_rvs_shape_compatibility(self): + np.random.seed(2846) + alpha = np.array([1.0, 2.0, 3.0]) + x = np.random.dirichlet(alpha, size=7) + assert_equal(x.shape, (7, 3)) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + dirichlet.pdf(x.T, alpha) + dirichlet.pdf(x.T[:-1], alpha) + dirichlet.logpdf(x.T, alpha) + dirichlet.logpdf(x.T[:-1], alpha) + + def test_alpha_with_zeros(self): + np.random.seed(2846) + alpha = [1.0, 0.0, 3.0] + # don't pass invalid alpha to np.random.dirichlet + x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_alpha_with_negative_entries(self): + np.random.seed(2846) + alpha = [1.0, -2.0, 3.0] + # don't pass invalid alpha to np.random.dirichlet + x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_with_zeros(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.array([0.1, 0.0, 0.2, 0.7]) + dirichlet.pdf(x, alpha) + dirichlet.logpdf(x, alpha) + alpha = np.array([1.0, 1.0, 1.0, 1.0]) + assert_almost_equal(dirichlet.pdf(x, alpha), 6) + assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6)) + + def test_data_with_zeros_and_small_alpha(self): + alpha = np.array([1.0, 0.5, 3.0, 4.0]) + x = np.array([0.1, 0.0, 0.2, 0.7]) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_with_negative_entries(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.array([0.1, -0.1, 0.3, 0.7]) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_with_too_large_entries(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.array([0.1, 1.1, 0.3, 0.7]) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_too_deep_c(self): + alpha = np.array([1.0, 2.0, 3.0]) + x = np.full((2, 7, 7), 1 / 14) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_alpha_too_deep(self): + alpha = np.array([[1.0, 2.0], [3.0, 4.0]]) + x = np.full((2, 2, 7), 1 / 4) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_alpha_correct_depth(self): + alpha = np.array([1.0, 2.0, 3.0]) + x = np.full((3, 7), 1 / 3) + dirichlet.pdf(x, alpha) + dirichlet.logpdf(x, alpha) + + def test_non_simplex_data(self): + alpha = np.array([1.0, 2.0, 3.0]) + x = np.full((3, 7), 1 / 2) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_vector_too_short(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.full((2, 7), 1 / 2) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_data_vector_too_long(self): + alpha = np.array([1.0, 2.0, 3.0, 4.0]) + x = np.full((5, 7), 1 / 5) + assert_raises(ValueError, dirichlet.pdf, x, alpha) + assert_raises(ValueError, dirichlet.logpdf, x, alpha) + + def test_mean_var_cov(self): + # Reference values calculated by hand and confirmed with Mathematica, e.g. + # `Covariance[DirichletDistribution[{ 1, 0.8, 0.2, 10^-300}]]` + alpha = np.array([1., 0.8, 0.2]) + d = dirichlet(alpha) + + expected_mean = [0.5, 0.4, 0.1] + expected_var = [1. / 12., 0.08, 0.03] + expected_cov = [ + [ 1. / 12, -1. / 15, -1. / 60], + [-1. / 15, 2. / 25, -1. / 75], + [-1. / 60, -1. / 75, 3. / 100], + ] + + assert_array_almost_equal(d.mean(), expected_mean) + assert_array_almost_equal(d.var(), expected_var) + assert_array_almost_equal(d.cov(), expected_cov) + + def test_scalar_values(self): + alpha = np.array([0.2]) + d = dirichlet(alpha) + + # For alpha of length 1, mean and var should be scalar instead of array + assert_equal(d.mean().ndim, 0) + assert_equal(d.var().ndim, 0) + + assert_equal(d.pdf([1.]).ndim, 0) + assert_equal(d.logpdf([1.]).ndim, 0) + + def test_K_and_K_minus_1_calls_equal(self): + # Test that calls with K and K-1 entries yield the same results. + + np.random.seed(2846) + + n = np.random.randint(1, 32) + alpha = np.random.uniform(10e-10, 100, n) + + d = dirichlet(alpha) + num_tests = 10 + for i in range(num_tests): + x = np.random.uniform(10e-10, 100, n) + x /= np.sum(x) + assert_almost_equal(d.pdf(x[:-1]), d.pdf(x)) + + def test_multiple_entry_calls(self): + # Test that calls with multiple x vectors as matrix work + np.random.seed(2846) + + n = np.random.randint(1, 32) + alpha = np.random.uniform(10e-10, 100, n) + d = dirichlet(alpha) + + num_tests = 10 + num_multiple = 5 + xm = None + for i in range(num_tests): + for m in range(num_multiple): + x = np.random.uniform(10e-10, 100, n) + x /= np.sum(x) + if xm is not None: + xm = np.vstack((xm, x)) + else: + xm = x + rm = d.pdf(xm.T) + rs = None + for xs in xm: + r = d.pdf(xs) + if rs is not None: + rs = np.append(rs, r) + else: + rs = r + assert_array_almost_equal(rm, rs) + + def test_2D_dirichlet_is_beta(self): + np.random.seed(2846) + + alpha = np.random.uniform(10e-10, 100, 2) + d = dirichlet(alpha) + b = beta(alpha[0], alpha[1]) + + num_tests = 10 + for i in range(num_tests): + x = np.random.uniform(10e-10, 100, 2) + x /= np.sum(x) + assert_almost_equal(b.pdf(x), d.pdf([x])) + + assert_almost_equal(b.mean(), d.mean()[0]) + assert_almost_equal(b.var(), d.var()[0]) + + +def test_multivariate_normal_dimensions_mismatch(): + # Regression test for GH #3493. Check that setting up a PDF with a mean of + # length M and a covariance matrix of size (N, N), where M != N, raises a + # ValueError with an informative error message. + mu = np.array([0.0, 0.0]) + sigma = np.array([[1.0]]) + + assert_raises(ValueError, multivariate_normal, mu, sigma) + + # A simple check that the right error message was passed along. Checking + # that the entire message is there, word for word, would be somewhat + # fragile, so we just check for the leading part. + try: + multivariate_normal(mu, sigma) + except ValueError as e: + msg = "Dimension mismatch" + assert_equal(str(e)[:len(msg)], msg) + + +class TestWishart: + def test_scale_dimensions(self): + # Test that we can call the Wishart with various scale dimensions + + # Test case: dim=1, scale=1 + true_scale = np.array(1, ndmin=2) + scales = [ + 1, # scalar + [1], # iterable + np.array(1), # 0-dim + np.r_[1], # 1-dim + np.array(1, ndmin=2) # 2-dim + ] + for scale in scales: + w = wishart(1, scale) + assert_equal(w.scale, true_scale) + assert_equal(w.scale.shape, true_scale.shape) + + # Test case: dim=2, scale=[[1,0] + # [0,2] + true_scale = np.array([[1,0], + [0,2]]) + scales = [ + [1,2], # iterable + np.r_[1,2], # 1-dim + np.array([[1,0], # 2-dim + [0,2]]) + ] + for scale in scales: + w = wishart(2, scale) + assert_equal(w.scale, true_scale) + assert_equal(w.scale.shape, true_scale.shape) + + # We cannot call with a df < dim - 1 + assert_raises(ValueError, wishart, 1, np.eye(2)) + + # But we can call with dim - 1 < df < dim + wishart(1.1, np.eye(2)) # no error + # see gh-5562 + + # We cannot call with a 3-dimension array + scale = np.array(1, ndmin=3) + assert_raises(ValueError, wishart, 1, scale) + + def test_quantile_dimensions(self): + # Test that we can call the Wishart rvs with various quantile dimensions + + # If dim == 1, consider x.shape = [1,1,1] + X = [ + 1, # scalar + [1], # iterable + np.array(1), # 0-dim + np.r_[1], # 1-dim + np.array(1, ndmin=2), # 2-dim + np.array([1], ndmin=3) # 3-dim + ] + + w = wishart(1,1) + density = w.pdf(np.array(1, ndmin=3)) + for x in X: + assert_equal(w.pdf(x), density) + + # If dim == 1, consider x.shape = [1,1,*] + X = [ + [1,2,3], # iterable + np.r_[1,2,3], # 1-dim + np.array([1,2,3], ndmin=3) # 3-dim + ] + + w = wishart(1,1) + density = w.pdf(np.array([1,2,3], ndmin=3)) + for x in X: + assert_equal(w.pdf(x), density) + + # If dim == 2, consider x.shape = [2,2,1] + # where x[:,:,*] = np.eye(1)*2 + X = [ + 2, # scalar + [2,2], # iterable + np.array(2), # 0-dim + np.r_[2,2], # 1-dim + np.array([[2,0], + [0,2]]), # 2-dim + np.array([[2,0], + [0,2]])[:,:,np.newaxis] # 3-dim + ] + + w = wishart(2,np.eye(2)) + density = w.pdf(np.array([[2,0], + [0,2]])[:,:,np.newaxis]) + for x in X: + assert_equal(w.pdf(x), density) + + def test_frozen(self): + # Test that the frozen and non-frozen Wishart gives the same answers + + # Construct an arbitrary positive definite scale matrix + dim = 4 + scale = np.diag(np.arange(dim)+1) + scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2) + scale = np.dot(scale.T, scale) + + # Construct a collection of positive definite matrices to test the PDF + X = [] + for i in range(5): + x = np.diag(np.arange(dim)+(i+1)**2) + x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2) + x = np.dot(x.T, x) + X.append(x) + X = np.array(X).T + + # Construct a 1D and 2D set of parameters + parameters = [ + (10, 1, np.linspace(0.1, 10, 5)), # 1D case + (10, scale, X) + ] + + for (df, scale, x) in parameters: + w = wishart(df, scale) + assert_equal(w.var(), wishart.var(df, scale)) + assert_equal(w.mean(), wishart.mean(df, scale)) + assert_equal(w.mode(), wishart.mode(df, scale)) + assert_equal(w.entropy(), wishart.entropy(df, scale)) + assert_equal(w.pdf(x), wishart.pdf(x, df, scale)) + + def test_wishart_2D_rvs(self): + dim = 3 + df = 10 + + # Construct a simple non-diagonal positive definite matrix + scale = np.eye(dim) + scale[0,1] = 0.5 + scale[1,0] = 0.5 + + # Construct frozen Wishart random variables + w = wishart(df, scale) + + # Get the generated random variables from a known seed + np.random.seed(248042) + w_rvs = wishart.rvs(df, scale) + np.random.seed(248042) + frozen_w_rvs = w.rvs() + + # Manually calculate what it should be, based on the Bartlett (1933) + # decomposition of a Wishart into D A A' D', where D is the Cholesky + # factorization of the scale matrix and A is the lower triangular matrix + # with the square root of chi^2 variates on the diagonal and N(0,1) + # variates in the lower triangle. + np.random.seed(248042) + covariances = np.random.normal(size=3) + variances = np.r_[ + np.random.chisquare(df), + np.random.chisquare(df-1), + np.random.chisquare(df-2), + ]**0.5 + + # Construct the lower-triangular A matrix + A = np.diag(variances) + A[np.tril_indices(dim, k=-1)] = covariances + + # Wishart random variate + D = np.linalg.cholesky(scale) + DA = D.dot(A) + manual_w_rvs = np.dot(DA, DA.T) + + # Test for equality + assert_allclose(w_rvs, manual_w_rvs) + assert_allclose(frozen_w_rvs, manual_w_rvs) + + def test_1D_is_chisquared(self): + # The 1-dimensional Wishart with an identity scale matrix is just a + # chi-squared distribution. + # Test variance, mean, entropy, pdf + # Kolgomorov-Smirnov test for rvs + np.random.seed(482974) + + sn = 500 + dim = 1 + scale = np.eye(dim) + + df_range = np.arange(1, 10, 2, dtype=float) + X = np.linspace(0.1,10,num=10) + for df in df_range: + w = wishart(df, scale) + c = chi2(df) + + # Statistics + assert_allclose(w.var(), c.var()) + assert_allclose(w.mean(), c.mean()) + assert_allclose(w.entropy(), c.entropy()) + + # PDF + assert_allclose(w.pdf(X), c.pdf(X)) + + # rvs + rvs = w.rvs(size=sn) + args = (df,) + alpha = 0.01 + check_distribution_rvs('chi2', args, alpha, rvs) + + def test_is_scaled_chisquared(self): + # The 2-dimensional Wishart with an arbitrary scale matrix can be + # transformed to a scaled chi-squared distribution. + # For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have + # :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)` + np.random.seed(482974) + + sn = 500 + df = 10 + dim = 4 + # Construct an arbitrary positive definite matrix + scale = np.diag(np.arange(4)+1) + scale[np.tril_indices(4, k=-1)] = np.arange(6) + scale = np.dot(scale.T, scale) + # Use :math:`\lambda = [1, \dots, 1]'` + lamda = np.ones((dim,1)) + sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze() + w = wishart(df, sigma_lamda) + c = chi2(df, scale=sigma_lamda) + + # Statistics + assert_allclose(w.var(), c.var()) + assert_allclose(w.mean(), c.mean()) + assert_allclose(w.entropy(), c.entropy()) + + # PDF + X = np.linspace(0.1,10,num=10) + assert_allclose(w.pdf(X), c.pdf(X)) + + # rvs + rvs = w.rvs(size=sn) + args = (df,0,sigma_lamda) + alpha = 0.01 + check_distribution_rvs('chi2', args, alpha, rvs) + +class TestMultinomial: + def test_logpmf(self): + vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7)) + assert_allclose(vals1, -1.483270127243324, rtol=1e-8) + + vals2 = multinomial.logpmf([3, 4], 0, [.3, .7]) + assert vals2 == -np.inf + + vals3 = multinomial.logpmf([0, 0], 0, [.3, .7]) + assert vals3 == 0 + + vals4 = multinomial.logpmf([3, 4], 0, [-2, 3]) + assert_allclose(vals4, np.nan, rtol=1e-8) + + def test_reduces_binomial(self): + # test that the multinomial pmf reduces to the binomial pmf in the 2d + # case + val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7)) + val2 = binom.logpmf(3, 7, 0.3) + assert_allclose(val1, val2, rtol=1e-8) + + val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9)) + val2 = binom.pmf(6, 14, 0.1) + assert_allclose(val1, val2, rtol=1e-8) + + def test_R(self): + # test against the values produced by this R code + # (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html) + # X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3] + # X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL) + # X + # apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5))) + + n, p = 3, [1./8, 2./8, 5./8] + r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375, + (2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125, + (0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500, + (2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500, + (1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000} + for x in r_vals: + assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14) + + @pytest.mark.parametrize("n", [0, 3]) + def test_rvs_np(self, n): + # test that .rvs agrees w/numpy + sc_rvs = multinomial.rvs(n, [1/4.]*3, size=7, random_state=123) + rndm = np.random.RandomState(123) + np_rvs = rndm.multinomial(n, [1/4.]*3, size=7) + assert_equal(sc_rvs, np_rvs) + + def test_pmf(self): + vals0 = multinomial.pmf((5,), 5, (1,)) + assert_allclose(vals0, 1, rtol=1e-8) + + vals1 = multinomial.pmf((3,4), 7, (.3, .7)) + assert_allclose(vals1, .22689449999999994, rtol=1e-8) + + vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8, + (.1, .9)) + assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8) + + x = np.empty((0,2), dtype=np.float64) + vals3 = multinomial.pmf(x, 4, (.3, .7)) + assert_equal(vals3, np.empty([], dtype=np.float64)) + + vals4 = multinomial.pmf([1,2], 4, (.3, .7)) + assert_allclose(vals4, 0, rtol=1e-8) + + vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0]) + assert_allclose(vals5, 0.219478737997, rtol=1e-8) + + vals5 = multinomial.pmf([0, 0, 0], 0, [2/3.0, 1/3.0, 0]) + assert vals5 == 1 + + vals6 = multinomial.pmf([2, 1, 0], 0, [2/3.0, 1/3.0, 0]) + assert vals6 == 0 + + def test_pmf_broadcasting(self): + vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]]) + assert_allclose(vals0, [.243, .384], rtol=1e-8) + + vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9]) + assert_allclose(vals1, [.243, 0], rtol=1e-8) + + vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9]) + assert_allclose(vals2, [[.243, 0]], rtol=1e-8) + + vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9]) + assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8) + + vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9]) + assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8) + + @pytest.mark.parametrize("n", [0, 5]) + def test_cov(self, n): + cov1 = multinomial.cov(n, (.2, .3, .5)) + cov2 = [[n*.2*.8, -n*.2*.3, -n*.2*.5], + [-n*.3*.2, n*.3*.7, -n*.3*.5], + [-n*.5*.2, -n*.5*.3, n*.5*.5]] + assert_allclose(cov1, cov2, rtol=1e-8) + + def test_cov_broadcasting(self): + cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]]) + cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]] + assert_allclose(cov1, cov2, rtol=1e-8) + + cov3 = multinomial.cov([4, 5], [.1, .9]) + cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]] + assert_allclose(cov3, cov4, rtol=1e-8) + + cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) + cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]], + [[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]] + assert_allclose(cov5, cov6, rtol=1e-8) + + @pytest.mark.parametrize("n", [0, 2]) + def test_entropy(self, n): + # this is equivalent to a binomial distribution with n=2, so the + # entropy .77899774929 is easily computed "by hand" + ent0 = multinomial.entropy(n, [.2, .8]) + assert_allclose(ent0, binom.entropy(n, .2), rtol=1e-8) + + def test_entropy_broadcasting(self): + ent0 = multinomial.entropy([2, 3], [.2, .3]) + assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)], + rtol=1e-8) + + ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]]) + assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)], + rtol=1e-8) + + ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]]) + assert_allclose(ent2, + [[binom.entropy(7, .3), binom.entropy(7, .4)], + [binom.entropy(8, .3), binom.entropy(8, .4)]], + rtol=1e-8) + + @pytest.mark.parametrize("n", [0, 5]) + def test_mean(self, n): + mean1 = multinomial.mean(n, [.2, .8]) + assert_allclose(mean1, [n*.2, n*.8], rtol=1e-8) + + def test_mean_broadcasting(self): + mean1 = multinomial.mean([5, 6], [.2, .8]) + assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8) + + def test_frozen(self): + # The frozen distribution should agree with the regular one + np.random.seed(1234) + n = 12 + pvals = (.1, .2, .3, .4) + x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]] + x = np.asarray(x, dtype=np.float64) + mn_frozen = multinomial(n, pvals) + assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals)) + assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals)) + assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals)) + + def test_gh_11860(self): + # gh-11860 reported cases in which the adjustments made by multinomial + # to the last element of `p` can cause `nan`s even when the input is + # essentially valid. Check that a pathological case returns a finite, + # nonzero result. (This would fail in main before the PR.) + n = 88 + rng = np.random.default_rng(8879715917488330089) + p = rng.random(n) + p[-1] = 1e-30 + p /= np.sum(p) + x = np.ones(n) + logpmf = multinomial.logpmf(x, n, p) + assert np.isfinite(logpmf) + +class TestInvwishart: + def test_frozen(self): + # Test that the frozen and non-frozen inverse Wishart gives the same + # answers + + # Construct an arbitrary positive definite scale matrix + dim = 4 + scale = np.diag(np.arange(dim)+1) + scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2) + scale = np.dot(scale.T, scale) + + # Construct a collection of positive definite matrices to test the PDF + X = [] + for i in range(5): + x = np.diag(np.arange(dim)+(i+1)**2) + x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2) + x = np.dot(x.T, x) + X.append(x) + X = np.array(X).T + + # Construct a 1D and 2D set of parameters + parameters = [ + (10, 1, np.linspace(0.1, 10, 5)), # 1D case + (10, scale, X) + ] + + for (df, scale, x) in parameters: + iw = invwishart(df, scale) + assert_equal(iw.var(), invwishart.var(df, scale)) + assert_equal(iw.mean(), invwishart.mean(df, scale)) + assert_equal(iw.mode(), invwishart.mode(df, scale)) + assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale)) + + def test_1D_is_invgamma(self): + # The 1-dimensional inverse Wishart with an identity scale matrix is + # just an inverse gamma distribution. + # Test variance, mean, pdf, entropy + # Kolgomorov-Smirnov test for rvs + np.random.seed(482974) + + sn = 500 + dim = 1 + scale = np.eye(dim) + + df_range = np.arange(5, 20, 2, dtype=float) + X = np.linspace(0.1,10,num=10) + for df in df_range: + iw = invwishart(df, scale) + ig = invgamma(df/2, scale=1./2) + + # Statistics + assert_allclose(iw.var(), ig.var()) + assert_allclose(iw.mean(), ig.mean()) + + # PDF + assert_allclose(iw.pdf(X), ig.pdf(X)) + + # rvs + rvs = iw.rvs(size=sn) + args = (df/2, 0, 1./2) + alpha = 0.01 + check_distribution_rvs('invgamma', args, alpha, rvs) + + # entropy + assert_allclose(iw.entropy(), ig.entropy()) + + def test_invwishart_2D_rvs(self): + dim = 3 + df = 10 + + # Construct a simple non-diagonal positive definite matrix + scale = np.eye(dim) + scale[0,1] = 0.5 + scale[1,0] = 0.5 + + # Construct frozen inverse-Wishart random variables + iw = invwishart(df, scale) + + # Get the generated random variables from a known seed + np.random.seed(608072) + iw_rvs = invwishart.rvs(df, scale) + np.random.seed(608072) + frozen_iw_rvs = iw.rvs() + + # Manually calculate what it should be, based on the decomposition in + # https://arxiv.org/abs/2310.15884 of an invers-Wishart into L L', + # where L A = D, D is the Cholesky factorization of the scale matrix, + # and A is the lower triangular matrix with the square root of chi^2 + # variates on the diagonal and N(0,1) variates in the lower triangle. + # the diagonal chi^2 variates in this A are reversed compared to those + # in the Bartlett decomposition A for Wishart rvs. + np.random.seed(608072) + covariances = np.random.normal(size=3) + variances = np.r_[ + np.random.chisquare(df-2), + np.random.chisquare(df-1), + np.random.chisquare(df), + ]**0.5 + + # Construct the lower-triangular A matrix + A = np.diag(variances) + A[np.tril_indices(dim, k=-1)] = covariances + + # inverse-Wishart random variate + D = np.linalg.cholesky(scale) + L = np.linalg.solve(A.T, D.T).T + manual_iw_rvs = np.dot(L, L.T) + + # Test for equality + assert_allclose(iw_rvs, manual_iw_rvs) + assert_allclose(frozen_iw_rvs, manual_iw_rvs) + + def test_sample_mean(self): + """Test that sample mean consistent with known mean.""" + # Construct an arbitrary positive definite scale matrix + df = 10 + sample_size = 20_000 + for dim in [1, 5]: + scale = np.diag(np.arange(dim) + 1) + scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim - 1) / 2) + scale = np.dot(scale.T, scale) + + dist = invwishart(df, scale) + Xmean_exp = dist.mean() + Xvar_exp = dist.var() + Xmean_std = (Xvar_exp / sample_size)**0.5 # asymptotic SE of mean estimate + + X = dist.rvs(size=sample_size, random_state=1234) + Xmean_est = X.mean(axis=0) + + ntests = dim*(dim + 1)//2 + fail_rate = 0.01 / ntests # correct for multiple tests + max_diff = norm.ppf(1 - fail_rate / 2) + assert np.allclose( + (Xmean_est - Xmean_exp) / Xmean_std, + 0, + atol=max_diff, + ) + + def test_logpdf_4x4(self): + """Regression test for gh-8844.""" + X = np.array([[2, 1, 0, 0.5], + [1, 2, 0.5, 0.5], + [0, 0.5, 3, 1], + [0.5, 0.5, 1, 2]]) + Psi = np.array([[9, 7, 3, 1], + [7, 9, 5, 1], + [3, 5, 8, 2], + [1, 1, 2, 9]]) + nu = 6 + prob = invwishart.logpdf(X, nu, Psi) + # Explicit calculation from the formula on wikipedia. + p = X.shape[0] + sig, logdetX = np.linalg.slogdet(X) + sig, logdetPsi = np.linalg.slogdet(Psi) + M = np.linalg.solve(X, Psi) + expected = ((nu/2)*logdetPsi + - (nu*p/2)*np.log(2) + - multigammaln(nu/2, p) + - (nu + p + 1)/2*logdetX + - 0.5*M.trace()) + assert_allclose(prob, expected) + + +class TestSpecialOrthoGroup: + def test_reproducibility(self): + np.random.seed(514) + x = special_ortho_group.rvs(3) + expected = np.array([[-0.99394515, -0.04527879, 0.10011432], + [0.04821555, -0.99846897, 0.02711042], + [0.09873351, 0.03177334, 0.99460653]]) + assert_array_almost_equal(x, expected) + + random_state = np.random.RandomState(seed=514) + x = special_ortho_group.rvs(3, random_state=random_state) + assert_array_almost_equal(x, expected) + + def test_invalid_dim(self): + assert_raises(ValueError, special_ortho_group.rvs, None) + assert_raises(ValueError, special_ortho_group.rvs, (2, 2)) + assert_raises(ValueError, special_ortho_group.rvs, 1) + assert_raises(ValueError, special_ortho_group.rvs, 2.5) + + def test_frozen_matrix(self): + dim = 7 + frozen = special_ortho_group(dim) + + rvs1 = frozen.rvs(random_state=1234) + rvs2 = special_ortho_group.rvs(dim, random_state=1234) + + assert_equal(rvs1, rvs2) + + def test_det_and_ortho(self): + xs = [special_ortho_group.rvs(dim) + for dim in range(2,12) + for i in range(3)] + + # Test that determinants are always +1 + dets = [np.linalg.det(x) for x in xs] + assert_allclose(dets, [1.]*30, rtol=1e-13) + + # Test that these are orthogonal matrices + for x in xs: + assert_array_almost_equal(np.dot(x, x.T), + np.eye(x.shape[0])) + + def test_haar(self): + # Test that the distribution is constant under rotation + # Every column should have the same distribution + # Additionally, the distribution should be invariant under another rotation + + # Generate samples + dim = 5 + samples = 1000 # Not too many, or the test takes too long + ks_prob = .05 + np.random.seed(514) + xs = special_ortho_group.rvs(dim, size=samples) + + # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3), + # effectively picking off entries in the matrices of xs. + # These projections should all have the same distribution, + # establishing rotational invariance. We use the two-sided + # KS test to confirm this. + # We could instead test that angles between random vectors + # are uniformly distributed, but the below is sufficient. + # It is not feasible to consider all pairs, so pick a few. + els = ((0,0), (0,2), (1,4), (2,3)) + #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els} + proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els} + pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1] + ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs] + assert_array_less([ks_prob]*len(pairs), ks_tests) + + +class TestOrthoGroup: + def test_reproducibility(self): + seed = 514 + np.random.seed(seed) + x = ortho_group.rvs(3) + x2 = ortho_group.rvs(3, random_state=seed) + # Note this matrix has det -1, distinguishing O(N) from SO(N) + assert_almost_equal(np.linalg.det(x), -1) + expected = np.array([[0.381686, -0.090374, 0.919863], + [0.905794, -0.161537, -0.391718], + [-0.183993, -0.98272, -0.020204]]) + assert_array_almost_equal(x, expected) + assert_array_almost_equal(x2, expected) + + def test_invalid_dim(self): + assert_raises(ValueError, ortho_group.rvs, None) + assert_raises(ValueError, ortho_group.rvs, (2, 2)) + assert_raises(ValueError, ortho_group.rvs, 1) + assert_raises(ValueError, ortho_group.rvs, 2.5) + + def test_frozen_matrix(self): + dim = 7 + frozen = ortho_group(dim) + frozen_seed = ortho_group(dim, seed=1234) + + rvs1 = frozen.rvs(random_state=1234) + rvs2 = ortho_group.rvs(dim, random_state=1234) + rvs3 = frozen_seed.rvs(size=1) + + assert_equal(rvs1, rvs2) + assert_equal(rvs1, rvs3) + + def test_det_and_ortho(self): + xs = [[ortho_group.rvs(dim) + for i in range(10)] + for dim in range(2,12)] + + # Test that abs determinants are always +1 + dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs]) + assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13) + + # Test that these are orthogonal matrices + for xx in xs: + for x in xx: + assert_array_almost_equal(np.dot(x, x.T), + np.eye(x.shape[0])) + + @pytest.mark.parametrize("dim", [2, 5, 10, 20]) + def test_det_distribution_gh18272(self, dim): + # Test that positive and negative determinants are equally likely. + rng = np.random.default_rng(6796248956179332344) + dist = ortho_group(dim=dim) + rvs = dist.rvs(size=5000, random_state=rng) + dets = scipy.linalg.det(rvs) + k = np.sum(dets > 0) + n = len(dets) + res = stats.binomtest(k, n) + low, high = res.proportion_ci(confidence_level=0.95) + assert low < 0.5 < high + + def test_haar(self): + # Test that the distribution is constant under rotation + # Every column should have the same distribution + # Additionally, the distribution should be invariant under another rotation + + # Generate samples + dim = 5 + samples = 1000 # Not too many, or the test takes too long + ks_prob = .05 + np.random.seed(518) # Note that the test is sensitive to seed too + xs = ortho_group.rvs(dim, size=samples) + + # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3), + # effectively picking off entries in the matrices of xs. + # These projections should all have the same distribution, + # establishing rotational invariance. We use the two-sided + # KS test to confirm this. + # We could instead test that angles between random vectors + # are uniformly distributed, but the below is sufficient. + # It is not feasible to consider all pairs, so pick a few. + els = ((0,0), (0,2), (1,4), (2,3)) + #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els} + proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els} + pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1] + ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs] + assert_array_less([ks_prob]*len(pairs), ks_tests) + + @pytest.mark.slow + def test_pairwise_distances(self): + # Test that the distribution of pairwise distances is close to correct. + np.random.seed(514) + + def random_ortho(dim): + u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim))) + return np.dot(u, v) + + for dim in range(2, 6): + def generate_test_statistics(rvs, N=1000, eps=1e-10): + stats = np.array([ + np.sum((rvs(dim=dim) - rvs(dim=dim))**2) + for _ in range(N) + ]) + # Add a bit of noise to account for numeric accuracy. + stats += np.random.uniform(-eps, eps, size=stats.shape) + return stats + + expected = generate_test_statistics(random_ortho) + actual = generate_test_statistics(scipy.stats.ortho_group.rvs) + + _D, p = scipy.stats.ks_2samp(expected, actual) + + assert_array_less(.05, p) + + +class TestRandomCorrelation: + def test_reproducibility(self): + np.random.seed(514) + eigs = (.5, .8, 1.2, 1.5) + x = random_correlation.rvs(eigs) + x2 = random_correlation.rvs(eigs, random_state=514) + expected = np.array([[1., -0.184851, 0.109017, -0.227494], + [-0.184851, 1., 0.231236, 0.326669], + [0.109017, 0.231236, 1., -0.178912], + [-0.227494, 0.326669, -0.178912, 1.]]) + assert_array_almost_equal(x, expected) + assert_array_almost_equal(x2, expected) + + def test_invalid_eigs(self): + assert_raises(ValueError, random_correlation.rvs, None) + assert_raises(ValueError, random_correlation.rvs, 'test') + assert_raises(ValueError, random_correlation.rvs, 2.5) + assert_raises(ValueError, random_correlation.rvs, [2.5]) + assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]]) + assert_raises(ValueError, random_correlation.rvs, [2.5, -.5]) + assert_raises(ValueError, random_correlation.rvs, [1, 2, .1]) + + def test_frozen_matrix(self): + eigs = (.5, .8, 1.2, 1.5) + frozen = random_correlation(eigs) + frozen_seed = random_correlation(eigs, seed=514) + + rvs1 = random_correlation.rvs(eigs, random_state=514) + rvs2 = frozen.rvs(random_state=514) + rvs3 = frozen_seed.rvs() + + assert_equal(rvs1, rvs2) + assert_equal(rvs1, rvs3) + + def test_definition(self): + # Test the definition of a correlation matrix in several dimensions: + # + # 1. Det is product of eigenvalues (and positive by construction + # in examples) + # 2. 1's on diagonal + # 3. Matrix is symmetric + + def norm(i, e): + return i*e/sum(e) + + np.random.seed(123) + + eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)] + eigs.append([4,0,0,0]) + + ones = [[1.]*len(e) for e in eigs] + xs = [random_correlation.rvs(e) for e in eigs] + + # Test that determinants are products of eigenvalues + # These are positive by construction + # Could also test that the eigenvalues themselves are correct, + # but this seems sufficient. + dets = [np.fabs(np.linalg.det(x)) for x in xs] + dets_known = [np.prod(e) for e in eigs] + assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13) + + # Test for 1's on the diagonal + diags = [np.diag(x) for x in xs] + for a, b in zip(diags, ones): + assert_allclose(a, b, rtol=1e-13) + + # Correlation matrices are symmetric + for x in xs: + assert_allclose(x, x.T, rtol=1e-13) + + def test_to_corr(self): + # Check some corner cases in to_corr + + # ajj == 1 + m = np.array([[0.1, 0], [0, 1]], dtype=float) + m = random_correlation._to_corr(m) + assert_allclose(m, np.array([[1, 0], [0, 0.1]])) + + # Floating point overflow; fails to compute the correct + # rotation, but should still produce some valid rotation + # rather than infs/nans + with np.errstate(over='ignore'): + g = np.array([[0, 1], [-1, 0]]) + + m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m, g.T.dot(m0).dot(g)) + + m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m, g.T.dot(m0).dot(g)) + + # Zero discriminant; should set the first diag entry to 1 + m0 = np.array([[2, 1], [1, 2]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m[0,0], 1) + + # Slightly negative discriminant; should be approx correct still + m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float) + m = random_correlation._to_corr(m0.copy()) + assert_allclose(m[0,0], 1) + + +class TestUniformDirection: + @pytest.mark.parametrize("dim", [1, 3]) + @pytest.mark.parametrize("size", [None, 1, 5, (5, 4)]) + def test_samples(self, dim, size): + # test that samples have correct shape and norm 1 + rng = np.random.default_rng(2777937887058094419) + uniform_direction_dist = uniform_direction(dim, seed=rng) + samples = uniform_direction_dist.rvs(size) + mean, cov = np.zeros(dim), np.eye(dim) + expected_shape = rng.multivariate_normal(mean, cov, size=size).shape + assert samples.shape == expected_shape + norms = np.linalg.norm(samples, axis=-1) + assert_allclose(norms, 1.) + + @pytest.mark.parametrize("dim", [None, 0, (2, 2), 2.5]) + def test_invalid_dim(self, dim): + message = ("Dimension of vector must be specified, " + "and must be an integer greater than 0.") + with pytest.raises(ValueError, match=message): + uniform_direction.rvs(dim) + + def test_frozen_distribution(self): + dim = 5 + frozen = uniform_direction(dim) + frozen_seed = uniform_direction(dim, seed=514) + + rvs1 = frozen.rvs(random_state=514) + rvs2 = uniform_direction.rvs(dim, random_state=514) + rvs3 = frozen_seed.rvs() + + assert_equal(rvs1, rvs2) + assert_equal(rvs1, rvs3) + + @pytest.mark.parametrize("dim", [2, 5, 8]) + def test_uniform(self, dim): + rng = np.random.default_rng(1036978481269651776) + spherical_dist = uniform_direction(dim, seed=rng) + # generate random, orthogonal vectors + v1, v2 = spherical_dist.rvs(size=2) + v2 -= v1 @ v2 * v1 + v2 /= np.linalg.norm(v2) + assert_allclose(v1 @ v2, 0, atol=1e-14) # orthogonal + # generate data and project onto orthogonal vectors + samples = spherical_dist.rvs(size=10000) + s1 = samples @ v1 + s2 = samples @ v2 + angles = np.arctan2(s1, s2) + # test that angles follow a uniform distribution + # normalize angles to range [0, 1] + angles += np.pi + angles /= 2*np.pi + # perform KS test + uniform_dist = uniform() + kstest_result = kstest(angles, uniform_dist.cdf) + assert kstest_result.pvalue > 0.05 + + +class TestUnitaryGroup: + def test_reproducibility(self): + np.random.seed(514) + x = unitary_group.rvs(3) + x2 = unitary_group.rvs(3, random_state=514) + + expected = np.array( + [[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j], + [0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j], + [-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]] + ) + + assert_array_almost_equal(x, expected) + assert_array_almost_equal(x2, expected) + + def test_invalid_dim(self): + assert_raises(ValueError, unitary_group.rvs, None) + assert_raises(ValueError, unitary_group.rvs, (2, 2)) + assert_raises(ValueError, unitary_group.rvs, 1) + assert_raises(ValueError, unitary_group.rvs, 2.5) + + def test_frozen_matrix(self): + dim = 7 + frozen = unitary_group(dim) + frozen_seed = unitary_group(dim, seed=514) + + rvs1 = frozen.rvs(random_state=514) + rvs2 = unitary_group.rvs(dim, random_state=514) + rvs3 = frozen_seed.rvs(size=1) + + assert_equal(rvs1, rvs2) + assert_equal(rvs1, rvs3) + + def test_unitarity(self): + xs = [unitary_group.rvs(dim) + for dim in range(2,12) + for i in range(3)] + + # Test that these are unitary matrices + for x in xs: + assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15) + + def test_haar(self): + # Test that the eigenvalues, which lie on the unit circle in + # the complex plane, are uncorrelated. + + # Generate samples + dim = 5 + samples = 1000 # Not too many, or the test takes too long + np.random.seed(514) # Note that the test is sensitive to seed too + xs = unitary_group.rvs(dim, size=samples) + + # The angles "x" of the eigenvalues should be uniformly distributed + # Overall this seems to be a necessary but weak test of the distribution. + eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs]) + x = np.arctan2(eigs.imag, eigs.real) + res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf) + assert_(res.pvalue > 0.05) + + +class TestMultivariateT: + + # These tests were created by running vpa(mvtpdf(...)) in MATLAB. The + # function takes no `mu` parameter. The tests were run as + # + # >> ans = vpa(mvtpdf(x - mu, shape, df)); + # + PDF_TESTS = [( + # x + [ + [1, 2], + [4, 1], + [2, 1], + [2, 4], + [1, 4], + [4, 1], + [3, 2], + [3, 3], + [4, 4], + [5, 1], + ], + # loc + [0, 0], + # shape + [ + [1, 0], + [0, 1] + ], + # df + 4, + # ans + [ + 0.013972450422333741737457302178882, + 0.0010998721906793330026219646100571, + 0.013972450422333741737457302178882, + 0.00073682844024025606101402363634634, + 0.0010998721906793330026219646100571, + 0.0010998721906793330026219646100571, + 0.0020732579600816823488240725481546, + 0.00095660371505271429414668515889275, + 0.00021831953784896498569831346792114, + 0.00037725616140301147447000396084604 + ] + + ), ( + # x + [ + [0.9718, 0.1298, 0.8134], + [0.4922, 0.5522, 0.7185], + [0.3010, 0.1491, 0.5008], + [0.5971, 0.2585, 0.8940], + [0.5434, 0.5287, 0.9507], + ], + # loc + [-1, 1, 50], + # shape + [ + [1.0000, 0.5000, 0.2500], + [0.5000, 1.0000, -0.1000], + [0.2500, -0.1000, 1.0000], + ], + # df + 8, + # ans + [ + 0.00000000000000069609279697467772867405511133763, + 0.00000000000000073700739052207366474839369535934, + 0.00000000000000069522909962669171512174435447027, + 0.00000000000000074212293557998314091880208889767, + 0.00000000000000077039675154022118593323030449058, + ] + )] + + @pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS) + def test_pdf_correctness(self, x, loc, shape, df, ans): + dist = multivariate_t(loc, shape, df, seed=0) + val = dist.pdf(x) + assert_array_almost_equal(val, ans) + + @pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS) + def test_logpdf_correct(self, x, loc, shape, df, ans): + dist = multivariate_t(loc, shape, df, seed=0) + val1 = dist.pdf(x) + val2 = dist.logpdf(x) + assert_array_almost_equal(np.log(val1), val2) + + # https://github.com/scipy/scipy/issues/10042#issuecomment-576795195 + def test_mvt_with_df_one_is_cauchy(self): + x = [9, 7, 4, 1, -3, 9, 0, -3, -1, 3] + val = multivariate_t.pdf(x, df=1) + ans = cauchy.pdf(x) + assert_array_almost_equal(val, ans) + + def test_mvt_with_high_df_is_approx_normal(self): + # `normaltest` returns the chi-squared statistic and the associated + # p-value. The null hypothesis is that `x` came from a normal + # distribution, so a low p-value represents rejecting the null, i.e. + # that it is unlikely that `x` came a normal distribution. + P_VAL_MIN = 0.1 + + dist = multivariate_t(0, 1, df=100000, seed=1) + samples = dist.rvs(size=100000) + _, p = normaltest(samples) + assert (p > P_VAL_MIN) + + dist = multivariate_t([-2, 3], [[10, -1], [-1, 10]], df=100000, + seed=42) + samples = dist.rvs(size=100000) + _, p = normaltest(samples) + assert ((p > P_VAL_MIN).all()) + + @patch('scipy.stats.multivariate_normal._logpdf') + def test_mvt_with_inf_df_calls_normal(self, mock): + dist = multivariate_t(0, 1, df=np.inf, seed=7) + assert isinstance(dist, multivariate_normal_frozen) + multivariate_t.pdf(0, df=np.inf) + assert mock.call_count == 1 + multivariate_t.logpdf(0, df=np.inf) + assert mock.call_count == 2 + + def test_shape_correctness(self): + # pdf and logpdf should return scalar when the + # number of samples in x is one. + dim = 4 + loc = np.zeros(dim) + shape = np.eye(dim) + df = 4.5 + x = np.zeros(dim) + res = multivariate_t(loc, shape, df).pdf(x) + assert np.isscalar(res) + res = multivariate_t(loc, shape, df).logpdf(x) + assert np.isscalar(res) + + # pdf() and logpdf() should return probabilities of shape + # (n_samples,) when x has n_samples. + n_samples = 7 + x = np.random.random((n_samples, dim)) + res = multivariate_t(loc, shape, df).pdf(x) + assert (res.shape == (n_samples,)) + res = multivariate_t(loc, shape, df).logpdf(x) + assert (res.shape == (n_samples,)) + + # rvs() should return scalar unless a size argument is applied. + res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs() + assert np.isscalar(res) + + # rvs() should return vector of shape (size,) if size argument + # is applied. + size = 7 + res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs(size=size) + assert (res.shape == (size,)) + + def test_default_arguments(self): + dist = multivariate_t() + assert_equal(dist.loc, [0]) + assert_equal(dist.shape, [[1]]) + assert (dist.df == 1) + + DEFAULT_ARGS_TESTS = [ + (None, None, None, 0, 1, 1), + (None, None, 7, 0, 1, 7), + (None, [[7, 0], [0, 7]], None, [0, 0], [[7, 0], [0, 7]], 1), + (None, [[7, 0], [0, 7]], 7, [0, 0], [[7, 0], [0, 7]], 7), + ([7, 7], None, None, [7, 7], [[1, 0], [0, 1]], 1), + ([7, 7], None, 7, [7, 7], [[1, 0], [0, 1]], 7), + ([7, 7], [[7, 0], [0, 7]], None, [7, 7], [[7, 0], [0, 7]], 1), + ([7, 7], [[7, 0], [0, 7]], 7, [7, 7], [[7, 0], [0, 7]], 7) + ] + + @pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans", + DEFAULT_ARGS_TESTS) + def test_default_args(self, loc, shape, df, loc_ans, shape_ans, df_ans): + dist = multivariate_t(loc=loc, shape=shape, df=df) + assert_equal(dist.loc, loc_ans) + assert_equal(dist.shape, shape_ans) + assert (dist.df == df_ans) + + ARGS_SHAPES_TESTS = [ + (-1, 2, 3, [-1], [[2]], 3), + ([-1], [2], 3, [-1], [[2]], 3), + (np.array([-1]), np.array([2]), 3, [-1], [[2]], 3) + ] + + @pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans", + ARGS_SHAPES_TESTS) + def test_scalar_list_and_ndarray_arguments(self, loc, shape, df, loc_ans, + shape_ans, df_ans): + dist = multivariate_t(loc, shape, df) + assert_equal(dist.loc, loc_ans) + assert_equal(dist.shape, shape_ans) + assert_equal(dist.df, df_ans) + + def test_argument_error_handling(self): + # `loc` should be a one-dimensional vector. + loc = [[1, 1]] + assert_raises(ValueError, + multivariate_t, + **dict(loc=loc)) + + # `shape` should be scalar or square matrix. + shape = [[1, 1], [2, 2], [3, 3]] + assert_raises(ValueError, + multivariate_t, + **dict(loc=loc, shape=shape)) + + # `df` should be greater than zero. + loc = np.zeros(2) + shape = np.eye(2) + df = -1 + assert_raises(ValueError, + multivariate_t, + **dict(loc=loc, shape=shape, df=df)) + df = 0 + assert_raises(ValueError, + multivariate_t, + **dict(loc=loc, shape=shape, df=df)) + + def test_reproducibility(self): + rng = np.random.RandomState(4) + loc = rng.uniform(size=3) + shape = np.eye(3) + dist1 = multivariate_t(loc, shape, df=3, seed=2) + dist2 = multivariate_t(loc, shape, df=3, seed=2) + samples1 = dist1.rvs(size=10) + samples2 = dist2.rvs(size=10) + assert_equal(samples1, samples2) + + def test_allow_singular(self): + # Make shape singular and verify error was raised. + args = dict(loc=[0,0], shape=[[0,0],[0,1]], df=1, allow_singular=False) + assert_raises(np.linalg.LinAlgError, multivariate_t, **args) + + @pytest.mark.parametrize("size", [(10, 3), (5, 6, 4, 3)]) + @pytest.mark.parametrize("dim", [2, 3, 4, 5]) + @pytest.mark.parametrize("df", [1., 2., np.inf]) + def test_rvs(self, size, dim, df): + dist = multivariate_t(np.zeros(dim), np.eye(dim), df) + rvs = dist.rvs(size=size) + assert rvs.shape == size + (dim, ) + + def test_cdf_signs(self): + # check that sign of output is correct when np.any(lower > x) + mean = np.zeros(3) + cov = np.eye(3) + df = 10 + b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]] + a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]] + # when odd number of elements of b < a, output is negative + expected_signs = np.array([1, -1, -1, 1]) + cdf = multivariate_normal.cdf(b, mean, cov, df, lower_limit=a) + assert_allclose(cdf, cdf[0]*expected_signs) + + @pytest.mark.parametrize('dim', [1, 2, 5, 10]) + def test_cdf_against_multivariate_normal(self, dim): + # Check accuracy against MVN randomly-generated cases + self.cdf_against_mvn_test(dim) + + @pytest.mark.parametrize('dim', [3, 6, 9]) + def test_cdf_against_multivariate_normal_singular(self, dim): + # Check accuracy against MVN for randomly-generated singular cases + self.cdf_against_mvn_test(3, True) + + def cdf_against_mvn_test(self, dim, singular=False): + # Check for accuracy in the limit that df -> oo and MVT -> MVN + rng = np.random.default_rng(413722918996573) + n = 3 + + w = 10**rng.uniform(-2, 1, size=dim) + cov = _random_covariance(dim, w, rng, singular) + + mean = 10**rng.uniform(-1, 2, size=dim) * np.sign(rng.normal(size=dim)) + a = -10**rng.uniform(-1, 2, size=(n, dim)) + mean + b = 10**rng.uniform(-1, 2, size=(n, dim)) + mean + + res = stats.multivariate_t.cdf(b, mean, cov, df=10000, lower_limit=a, + allow_singular=True, random_state=rng) + ref = stats.multivariate_normal.cdf(b, mean, cov, allow_singular=True, + lower_limit=a) + assert_allclose(res, ref, atol=5e-4) + + def test_cdf_against_univariate_t(self): + rng = np.random.default_rng(413722918996573) + cov = 2 + mean = 0 + x = rng.normal(size=10, scale=np.sqrt(cov)) + df = 3 + + res = stats.multivariate_t.cdf(x, mean, cov, df, lower_limit=-np.inf, + random_state=rng) + ref = stats.t.cdf(x, df, mean, np.sqrt(cov)) + incorrect = stats.norm.cdf(x, mean, np.sqrt(cov)) + + assert_allclose(res, ref, atol=5e-4) # close to t + assert np.all(np.abs(res - incorrect) > 1e-3) # not close to normal + + @pytest.mark.parametrize("dim", [2, 3, 5, 10]) + @pytest.mark.parametrize("seed", [3363958638, 7891119608, 3887698049, + 5013150848, 1495033423, 6170824608]) + @pytest.mark.parametrize("singular", [False, True]) + def test_cdf_against_qsimvtv(self, dim, seed, singular): + if singular and seed != 3363958638: + pytest.skip('Agreement with qsimvtv is not great in singular case') + rng = np.random.default_rng(seed) + w = 10**rng.uniform(-2, 2, size=dim) + cov = _random_covariance(dim, w, rng, singular) + mean = rng.random(dim) + a = -rng.random(dim) + b = rng.random(dim) + df = rng.random() * 5 + + # no lower limit + res = stats.multivariate_t.cdf(b, mean, cov, df, random_state=rng, + allow_singular=True) + with np.errstate(invalid='ignore'): + ref = _qsimvtv(20000, df, cov, np.inf*a, b - mean, rng)[0] + assert_allclose(res, ref, atol=2e-4, rtol=1e-3) + + # with lower limit + res = stats.multivariate_t.cdf(b, mean, cov, df, lower_limit=a, + random_state=rng, allow_singular=True) + with np.errstate(invalid='ignore'): + ref = _qsimvtv(20000, df, cov, a - mean, b - mean, rng)[0] + assert_allclose(res, ref, atol=1e-4, rtol=1e-3) + + def test_cdf_against_generic_integrators(self): + # Compare result against generic numerical integrators + dim = 3 + rng = np.random.default_rng(41372291899657) + w = 10 ** rng.uniform(-1, 1, size=dim) + cov = _random_covariance(dim, w, rng, singular=True) + mean = rng.random(dim) + a = -rng.random(dim) + b = rng.random(dim) + df = rng.random() * 5 + + res = stats.multivariate_t.cdf(b, mean, cov, df, random_state=rng, + lower_limit=a) + + def integrand(x): + return stats.multivariate_t.pdf(x.T, mean, cov, df) + + ref = qmc_quad(integrand, a, b, qrng=stats.qmc.Halton(d=dim, seed=rng)) + assert_allclose(res, ref.integral, rtol=1e-3) + + def integrand(*zyx): + return stats.multivariate_t.pdf(zyx[::-1], mean, cov, df) + + ref = tplquad(integrand, a[0], b[0], a[1], b[1], a[2], b[2]) + assert_allclose(res, ref[0], rtol=1e-3) + + def test_against_matlab(self): + # Test against matlab mvtcdf: + # C = [6.21786909 0.2333667 7.95506077; + # 0.2333667 29.67390923 16.53946426; + # 7.95506077 16.53946426 19.17725252] + # df = 1.9559939787727658 + # mvtcdf([0, 0, 0], C, df) % 0.2523 + rng = np.random.default_rng(2967390923) + cov = np.array([[ 6.21786909, 0.2333667 , 7.95506077], + [ 0.2333667 , 29.67390923, 16.53946426], + [ 7.95506077, 16.53946426, 19.17725252]]) + df = 1.9559939787727658 + dist = stats.multivariate_t(shape=cov, df=df) + res = dist.cdf([0, 0, 0], random_state=rng) + ref = 0.2523 + assert_allclose(res, ref, rtol=1e-3) + + def test_frozen(self): + seed = 4137229573 + rng = np.random.default_rng(seed) + loc = rng.uniform(size=3) + x = rng.uniform(size=3) + loc + shape = np.eye(3) + df = rng.random() + args = (loc, shape, df) + + rng_frozen = np.random.default_rng(seed) + rng_unfrozen = np.random.default_rng(seed) + dist = stats.multivariate_t(*args, seed=rng_frozen) + assert_equal(dist.cdf(x), + multivariate_t.cdf(x, *args, random_state=rng_unfrozen)) + + def test_vectorized(self): + dim = 4 + n = (2, 3) + rng = np.random.default_rng(413722918996573) + A = rng.random(size=(dim, dim)) + cov = A @ A.T + mean = rng.random(dim) + x = rng.random(n + (dim,)) + df = rng.random() * 5 + + res = stats.multivariate_t.cdf(x, mean, cov, df, random_state=rng) + + def _cdf_1d(x): + return _qsimvtv(10000, df, cov, -np.inf*x, x-mean, rng)[0] + + ref = np.apply_along_axis(_cdf_1d, -1, x) + assert_allclose(res, ref, atol=1e-4, rtol=1e-3) + + @pytest.mark.parametrize("dim", (3, 7)) + def test_against_analytical(self, dim): + rng = np.random.default_rng(413722918996573) + A = scipy.linalg.toeplitz(c=[1] + [0.5] * (dim - 1)) + res = stats.multivariate_t(shape=A).cdf([0] * dim, random_state=rng) + ref = 1 / (dim + 1) + assert_allclose(res, ref, rtol=5e-5) + + def test_entropy_inf_df(self): + cov = np.eye(3, 3) + df = np.inf + mvt_entropy = stats.multivariate_t.entropy(shape=cov, df=df) + mvn_entropy = stats.multivariate_normal.entropy(None, cov) + assert mvt_entropy == mvn_entropy + + @pytest.mark.parametrize("df", [1, 10, 100]) + def test_entropy_1d(self, df): + mvt_entropy = stats.multivariate_t.entropy(shape=1., df=df) + t_entropy = stats.t.entropy(df=df) + assert_allclose(mvt_entropy, t_entropy, rtol=1e-13) + + # entropy reference values were computed via numerical integration + # + # def integrand(x, y, mvt): + # vec = np.array([x, y]) + # return mvt.logpdf(vec) * mvt.pdf(vec) + + # def multivariate_t_entropy_quad_2d(df, cov): + # dim = cov.shape[0] + # loc = np.zeros((dim, )) + # mvt = stats.multivariate_t(loc, cov, df) + # limit = 100 + # return -integrate.dblquad(integrand, -limit, limit, -limit, limit, + # args=(mvt, ))[0] + + @pytest.mark.parametrize("df, cov, ref, tol", + [(10, np.eye(2, 2), 3.0378770664093313, 1e-14), + (100, np.array([[0.5, 1], [1, 10]]), + 3.55102424550609, 1e-8)]) + def test_entropy_vs_numerical_integration(self, df, cov, ref, tol): + loc = np.zeros((2, )) + mvt = stats.multivariate_t(loc, cov, df) + assert_allclose(mvt.entropy(), ref, rtol=tol) + + @pytest.mark.parametrize( + "df, dim, ref, tol", + [ + (10, 1, 1.5212624929756808, 1e-15), + (100, 1, 1.4289633653182439, 1e-13), + (500, 1, 1.420939531869349, 1e-14), + (1e20, 1, 1.4189385332046727, 1e-15), + (1e100, 1, 1.4189385332046727, 1e-15), + (10, 10, 15.069150450832911, 1e-15), + (1000, 10, 14.19936546446673, 1e-13), + (1e20, 10, 14.189385332046728, 1e-15), + (1e100, 10, 14.189385332046728, 1e-15), + (10, 100, 148.28902883192654, 1e-15), + (1000, 100, 141.99155538003762, 1e-14), + (1e20, 100, 141.8938533204673, 1e-15), + (1e100, 100, 141.8938533204673, 1e-15), + ] + ) + def test_extreme_entropy(self, df, dim, ref, tol): + # Reference values were calculated with mpmath: + # from mpmath import mp + # mp.dps = 500 + # + # def mul_t_mpmath_entropy(dim, df=1): + # dim = mp.mpf(dim) + # df = mp.mpf(df) + # halfsum = (dim + df)/2 + # half_df = df/2 + # + # return float( + # -mp.loggamma(halfsum) + mp.loggamma(half_df) + # + dim / 2 * mp.log(df * mp.pi) + # + halfsum * (mp.digamma(halfsum) - mp.digamma(half_df)) + # + 0.0 + # ) + mvt = stats.multivariate_t(shape=np.eye(dim), df=df) + assert_allclose(mvt.entropy(), ref, rtol=tol) + + def test_entropy_with_covariance(self): + # Generated using np.randn(5, 5) and then rounding + # to two decimal places + _A = np.array([ + [1.42, 0.09, -0.49, 0.17, 0.74], + [-1.13, -0.01, 0.71, 0.4, -0.56], + [1.07, 0.44, -0.28, -0.44, 0.29], + [-1.5, -0.94, -0.67, 0.73, -1.1], + [0.17, -0.08, 1.46, -0.32, 1.36] + ]) + # Set cov to be a symmetric positive semi-definite matrix + cov = _A @ _A.T + + # Test the asymptotic case. For large degrees of freedom + # the entropy approaches the multivariate normal entropy. + df = 1e20 + mul_t_entropy = stats.multivariate_t.entropy(shape=cov, df=df) + mul_norm_entropy = multivariate_normal(None, cov=cov).entropy() + assert_allclose(mul_t_entropy, mul_norm_entropy, rtol=1e-15) + + # Test the regular case. For a dim of 5 the threshold comes out + # to be approximately 766.45. So using slightly + # different dfs on each site of the threshold, the entropies + # are being compared. + df1 = 765 + df2 = 768 + _entropy1 = stats.multivariate_t.entropy(shape=cov, df=df1) + _entropy2 = stats.multivariate_t.entropy(shape=cov, df=df2) + assert_allclose(_entropy1, _entropy2, rtol=1e-5) + + +class TestMultivariateHypergeom: + @pytest.mark.parametrize( + "x, m, n, expected", + [ + # Ground truth value from R dmvhyper + ([3, 4], [5, 10], 7, -1.119814), + # test for `n=0` + ([3, 4], [5, 10], 0, -np.inf), + # test for `x < 0` + ([-3, 4], [5, 10], 7, -np.inf), + # test for `m < 0` (RuntimeWarning issue) + ([3, 4], [-5, 10], 7, np.nan), + # test for all `m < 0` and `x.sum() != n` + ([[1, 2], [3, 4]], [[-4, -6], [-5, -10]], + [3, 7], [np.nan, np.nan]), + # test for `x < 0` and `m < 0` (RuntimeWarning issue) + ([-3, 4], [-5, 10], 1, np.nan), + # test for `x > m` + ([1, 11], [10, 1], 12, np.nan), + # test for `m < 0` (RuntimeWarning issue) + ([1, 11], [10, -1], 12, np.nan), + # test for `n < 0` + ([3, 4], [5, 10], -7, np.nan), + # test for `x.sum() != n` + ([3, 3], [5, 10], 7, -np.inf) + ] + ) + def test_logpmf(self, x, m, n, expected): + vals = multivariate_hypergeom.logpmf(x, m, n) + assert_allclose(vals, expected, rtol=1e-6) + + def test_reduces_hypergeom(self): + # test that the multivariate_hypergeom pmf reduces to the + # hypergeom pmf in the 2d case. + val1 = multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4) + val2 = hypergeom.pmf(k=3, M=15, n=4, N=10) + assert_allclose(val1, val2, rtol=1e-8) + + val1 = multivariate_hypergeom.pmf(x=[7, 3], m=[15, 10], n=10) + val2 = hypergeom.pmf(k=7, M=25, n=10, N=15) + assert_allclose(val1, val2, rtol=1e-8) + + def test_rvs(self): + # test if `rvs` is unbiased and large sample size converges + # to the true mean. + rv = multivariate_hypergeom(m=[3, 5], n=4) + rvs = rv.rvs(size=1000, random_state=123) + assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2) + + def test_rvs_broadcasting(self): + rv = multivariate_hypergeom(m=[[3, 5], [5, 10]], n=[4, 9]) + rvs = rv.rvs(size=(1000, 2), random_state=123) + assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2) + + @pytest.mark.parametrize('m, n', ( + ([0, 0, 20, 0, 0], 5), ([0, 0, 0, 0, 0], 0), + ([0, 0], 0), ([0], 0) + )) + def test_rvs_gh16171(self, m, n): + res = multivariate_hypergeom.rvs(m, n) + m = np.asarray(m) + res_ex = m.copy() + res_ex[m != 0] = n + assert_equal(res, res_ex) + + @pytest.mark.parametrize( + "x, m, n, expected", + [ + ([5], [5], 5, 1), + ([3, 4], [5, 10], 7, 0.3263403), + # Ground truth value from R dmvhyper + ([[[3, 5], [0, 8]], [[-1, 9], [1, 1]]], + [5, 10], [[8, 8], [8, 2]], + [[0.3916084, 0.006993007], [0, 0.4761905]]), + # test with empty arrays. + (np.array([], dtype=int), np.array([], dtype=int), 0, []), + ([1, 2], [4, 5], 5, 0), + # Ground truth value from R dmvhyper + ([3, 3, 0], [5, 6, 7], 6, 0.01077354) + ] + ) + def test_pmf(self, x, m, n, expected): + vals = multivariate_hypergeom.pmf(x, m, n) + assert_allclose(vals, expected, rtol=1e-7) + + @pytest.mark.parametrize( + "x, m, n, expected", + [ + ([3, 4], [[5, 10], [10, 15]], 7, [0.3263403, 0.3407531]), + ([[1], [2]], [[3], [4]], [1, 3], [1., 0.]), + ([[[1], [2]]], [[3], [4]], [1, 3], [[1., 0.]]), + ([[1], [2]], [[[[3]]]], [1, 3], [[[1., 0.]]]) + ] + ) + def test_pmf_broadcasting(self, x, m, n, expected): + vals = multivariate_hypergeom.pmf(x, m, n) + assert_allclose(vals, expected, rtol=1e-7) + + def test_cov(self): + cov1 = multivariate_hypergeom.cov(m=[3, 7, 10], n=12) + cov2 = [[0.64421053, -0.26526316, -0.37894737], + [-0.26526316, 1.14947368, -0.88421053], + [-0.37894737, -0.88421053, 1.26315789]] + assert_allclose(cov1, cov2, rtol=1e-8) + + def test_cov_broadcasting(self): + cov1 = multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12]) + cov2 = [[[1.05, -1.05], [-1.05, 1.05]], + [[1.56, -1.56], [-1.56, 1.56]]] + assert_allclose(cov1, cov2, rtol=1e-8) + + cov3 = multivariate_hypergeom.cov(m=[[4], [5]], n=[4, 5]) + cov4 = [[[0.]], [[0.]]] + assert_allclose(cov3, cov4, rtol=1e-8) + + cov5 = multivariate_hypergeom.cov(m=[7, 9], n=[8, 12]) + cov6 = [[[1.05, -1.05], [-1.05, 1.05]], + [[0.7875, -0.7875], [-0.7875, 0.7875]]] + assert_allclose(cov5, cov6, rtol=1e-8) + + def test_var(self): + # test with hypergeom + var0 = multivariate_hypergeom.var(m=[10, 5], n=4) + var1 = hypergeom.var(M=15, n=4, N=10) + assert_allclose(var0, var1, rtol=1e-8) + + def test_var_broadcasting(self): + var0 = multivariate_hypergeom.var(m=[10, 5], n=[4, 8]) + var1 = multivariate_hypergeom.var(m=[10, 5], n=4) + var2 = multivariate_hypergeom.var(m=[10, 5], n=8) + assert_allclose(var0[0], var1, rtol=1e-8) + assert_allclose(var0[1], var2, rtol=1e-8) + + var3 = multivariate_hypergeom.var(m=[[10, 5], [10, 14]], n=[4, 8]) + var4 = [[0.6984127, 0.6984127], [1.352657, 1.352657]] + assert_allclose(var3, var4, rtol=1e-8) + + var5 = multivariate_hypergeom.var(m=[[5], [10]], n=[5, 10]) + var6 = [[0.], [0.]] + assert_allclose(var5, var6, rtol=1e-8) + + def test_mean(self): + # test with hypergeom + mean0 = multivariate_hypergeom.mean(m=[10, 5], n=4) + mean1 = hypergeom.mean(M=15, n=4, N=10) + assert_allclose(mean0[0], mean1, rtol=1e-8) + + mean2 = multivariate_hypergeom.mean(m=[12, 8], n=10) + mean3 = [12.*10./20., 8.*10./20.] + assert_allclose(mean2, mean3, rtol=1e-8) + + def test_mean_broadcasting(self): + mean0 = multivariate_hypergeom.mean(m=[[3, 5], [10, 5]], n=[4, 8]) + mean1 = [[3.*4./8., 5.*4./8.], [10.*8./15., 5.*8./15.]] + assert_allclose(mean0, mean1, rtol=1e-8) + + def test_mean_edge_cases(self): + mean0 = multivariate_hypergeom.mean(m=[0, 0, 0], n=0) + assert_equal(mean0, [0., 0., 0.]) + + mean1 = multivariate_hypergeom.mean(m=[1, 0, 0], n=2) + assert_equal(mean1, [np.nan, np.nan, np.nan]) + + mean2 = multivariate_hypergeom.mean(m=[[1, 0, 0], [1, 0, 1]], n=2) + assert_allclose(mean2, [[np.nan, np.nan, np.nan], [1., 0., 1.]], + rtol=1e-17) + + mean3 = multivariate_hypergeom.mean(m=np.array([], dtype=int), n=0) + assert_equal(mean3, []) + assert_(mean3.shape == (0, )) + + def test_var_edge_cases(self): + var0 = multivariate_hypergeom.var(m=[0, 0, 0], n=0) + assert_allclose(var0, [0., 0., 0.], rtol=1e-16) + + var1 = multivariate_hypergeom.var(m=[1, 0, 0], n=2) + assert_equal(var1, [np.nan, np.nan, np.nan]) + + var2 = multivariate_hypergeom.var(m=[[1, 0, 0], [1, 0, 1]], n=2) + assert_allclose(var2, [[np.nan, np.nan, np.nan], [0., 0., 0.]], + rtol=1e-17) + + var3 = multivariate_hypergeom.var(m=np.array([], dtype=int), n=0) + assert_equal(var3, []) + assert_(var3.shape == (0, )) + + def test_cov_edge_cases(self): + cov0 = multivariate_hypergeom.cov(m=[1, 0, 0], n=1) + cov1 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]] + assert_allclose(cov0, cov1, rtol=1e-17) + + cov3 = multivariate_hypergeom.cov(m=[0, 0, 0], n=0) + cov4 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]] + assert_equal(cov3, cov4) + + cov5 = multivariate_hypergeom.cov(m=np.array([], dtype=int), n=0) + cov6 = np.array([], dtype=np.float64).reshape(0, 0) + assert_allclose(cov5, cov6, rtol=1e-17) + assert_(cov5.shape == (0, 0)) + + def test_frozen(self): + # The frozen distribution should agree with the regular one + np.random.seed(1234) + n = 12 + m = [7, 9, 11, 13] + x = [[0, 0, 0, 12], [0, 0, 1, 11], [0, 1, 1, 10], + [1, 1, 1, 9], [1, 1, 2, 8]] + x = np.asarray(x, dtype=int) + mhg_frozen = multivariate_hypergeom(m, n) + assert_allclose(mhg_frozen.pmf(x), + multivariate_hypergeom.pmf(x, m, n)) + assert_allclose(mhg_frozen.logpmf(x), + multivariate_hypergeom.logpmf(x, m, n)) + assert_allclose(mhg_frozen.var(), multivariate_hypergeom.var(m, n)) + assert_allclose(mhg_frozen.cov(), multivariate_hypergeom.cov(m, n)) + + def test_invalid_params(self): + assert_raises(ValueError, multivariate_hypergeom.pmf, 5, 10, 5) + assert_raises(ValueError, multivariate_hypergeom.pmf, 5, [10], 5) + assert_raises(ValueError, multivariate_hypergeom.pmf, [5, 4], [10], 5) + assert_raises(TypeError, multivariate_hypergeom.pmf, [5.5, 4.5], + [10, 15], 5) + assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4], + [10.5, 15.5], 5) + assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4], + [10, 15], 5.5) + + +class TestRandomTable: + def get_rng(self): + return np.random.default_rng(628174795866951638) + + def test_process_parameters(self): + message = "`row` must be one-dimensional" + with pytest.raises(ValueError, match=message): + random_table([[1, 2]], [1, 2]) + + message = "`col` must be one-dimensional" + with pytest.raises(ValueError, match=message): + random_table([1, 2], [[1, 2]]) + + message = "each element of `row` must be non-negative" + with pytest.raises(ValueError, match=message): + random_table([1, -1], [1, 2]) + + message = "each element of `col` must be non-negative" + with pytest.raises(ValueError, match=message): + random_table([1, 2], [1, -2]) + + message = "sums over `row` and `col` must be equal" + with pytest.raises(ValueError, match=message): + random_table([1, 2], [1, 0]) + + message = "each element of `row` must be an integer" + with pytest.raises(ValueError, match=message): + random_table([2.1, 2.1], [1, 1, 2]) + + message = "each element of `col` must be an integer" + with pytest.raises(ValueError, match=message): + random_table([1, 2], [1.1, 1.1, 1]) + + row = [1, 3] + col = [2, 1, 1] + r, c, n = random_table._process_parameters([1, 3], [2, 1, 1]) + assert_equal(row, r) + assert_equal(col, c) + assert n == np.sum(row) + + @pytest.mark.parametrize("scale,method", + ((1, "boyett"), (100, "patefield"))) + def test_process_rvs_method_on_None(self, scale, method): + row = np.array([1, 3]) * scale + col = np.array([2, 1, 1]) * scale + + ct = random_table + expected = ct.rvs(row, col, method=method, random_state=1) + got = ct.rvs(row, col, method=None, random_state=1) + + assert_equal(expected, got) + + def test_process_rvs_method_bad_argument(self): + row = [1, 3] + col = [2, 1, 1] + + # order of items in set is random, so cannot check that + message = "'foo' not recognized, must be one of" + with pytest.raises(ValueError, match=message): + random_table.rvs(row, col, method="foo") + + @pytest.mark.parametrize('frozen', (True, False)) + @pytest.mark.parametrize('log', (True, False)) + def test_pmf_logpmf(self, frozen, log): + # The pmf is tested through random sample generation + # with Boyett's algorithm, whose implementation is simple + # enough to verify manually for correctness. + rng = self.get_rng() + row = [2, 6] + col = [1, 3, 4] + rvs = random_table.rvs(row, col, size=1000, + method="boyett", random_state=rng) + + obj = random_table(row, col) if frozen else random_table + method = getattr(obj, "logpmf" if log else "pmf") + if not frozen: + original_method = method + + def method(x): + return original_method(x, row, col) + pmf = (lambda x: np.exp(method(x))) if log else method + + unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True) + + # rough accuracy check + p = pmf(unique_rvs) + assert_allclose(p * len(rvs), counts, rtol=0.1) + + # accept any iterable + p2 = pmf(list(unique_rvs[0])) + assert_equal(p2, p[0]) + + # accept high-dimensional input and 2d input + rvs_nd = rvs.reshape((10, 100) + rvs.shape[1:]) + p = pmf(rvs_nd) + assert p.shape == (10, 100) + for i in range(p.shape[0]): + for j in range(p.shape[1]): + pij = p[i, j] + rvij = rvs_nd[i, j] + qij = pmf(rvij) + assert_equal(pij, qij) + + # probability is zero if column marginal does not match + x = [[0, 1, 1], [2, 1, 3]] + assert_equal(np.sum(x, axis=-1), row) + p = pmf(x) + assert p == 0 + + # probability is zero if row marginal does not match + x = [[0, 1, 2], [1, 2, 2]] + assert_equal(np.sum(x, axis=-2), col) + p = pmf(x) + assert p == 0 + + # response to invalid inputs + message = "`x` must be at least two-dimensional" + with pytest.raises(ValueError, match=message): + pmf([1]) + + message = "`x` must contain only integral values" + with pytest.raises(ValueError, match=message): + pmf([[1.1]]) + + message = "`x` must contain only integral values" + with pytest.raises(ValueError, match=message): + pmf([[np.nan]]) + + message = "`x` must contain only non-negative values" + with pytest.raises(ValueError, match=message): + pmf([[-1]]) + + message = "shape of `x` must agree with `row`" + with pytest.raises(ValueError, match=message): + pmf([[1, 2, 3]]) + + message = "shape of `x` must agree with `col`" + with pytest.raises(ValueError, match=message): + pmf([[1, 2], + [3, 4]]) + + @pytest.mark.parametrize("method", ("boyett", "patefield")) + def test_rvs_mean(self, method): + # test if `rvs` is unbiased and large sample size converges + # to the true mean. + rng = self.get_rng() + row = [2, 6] + col = [1, 3, 4] + rvs = random_table.rvs(row, col, size=1000, method=method, + random_state=rng) + mean = random_table.mean(row, col) + assert_equal(np.sum(mean), np.sum(row)) + assert_allclose(rvs.mean(0), mean, atol=0.05) + assert_equal(rvs.sum(axis=-1), np.broadcast_to(row, (1000, 2))) + assert_equal(rvs.sum(axis=-2), np.broadcast_to(col, (1000, 3))) + + def test_rvs_cov(self): + # test if `rvs` generated with patefield and boyett algorithms + # produce approximately the same covariance matrix + rng = self.get_rng() + row = [2, 6] + col = [1, 3, 4] + rvs1 = random_table.rvs(row, col, size=10000, method="boyett", + random_state=rng) + rvs2 = random_table.rvs(row, col, size=10000, method="patefield", + random_state=rng) + cov1 = np.var(rvs1, axis=0) + cov2 = np.var(rvs2, axis=0) + assert_allclose(cov1, cov2, atol=0.02) + + @pytest.mark.parametrize("method", ("boyett", "patefield")) + def test_rvs_size(self, method): + row = [2, 6] + col = [1, 3, 4] + + # test size `None` + rv = random_table.rvs(row, col, method=method, + random_state=self.get_rng()) + assert rv.shape == (2, 3) + + # test size 1 + rv2 = random_table.rvs(row, col, size=1, method=method, + random_state=self.get_rng()) + assert rv2.shape == (1, 2, 3) + assert_equal(rv, rv2[0]) + + # test size 0 + rv3 = random_table.rvs(row, col, size=0, method=method, + random_state=self.get_rng()) + assert rv3.shape == (0, 2, 3) + + # test other valid size + rv4 = random_table.rvs(row, col, size=20, method=method, + random_state=self.get_rng()) + assert rv4.shape == (20, 2, 3) + + rv5 = random_table.rvs(row, col, size=(4, 5), method=method, + random_state=self.get_rng()) + assert rv5.shape == (4, 5, 2, 3) + + assert_allclose(rv5.reshape(20, 2, 3), rv4, rtol=1e-15) + + # test invalid size + message = "`size` must be a non-negative integer or `None`" + with pytest.raises(ValueError, match=message): + random_table.rvs(row, col, size=-1, method=method, + random_state=self.get_rng()) + + with pytest.raises(ValueError, match=message): + random_table.rvs(row, col, size=np.nan, method=method, + random_state=self.get_rng()) + + @pytest.mark.parametrize("method", ("boyett", "patefield")) + def test_rvs_method(self, method): + # This test assumes that pmf is correct and checks that random samples + # follow this probability distribution. This seems like a circular + # argument, since pmf is checked in test_pmf_logpmf with random samples + # generated with the rvs method. This test is not redundant, because + # test_pmf_logpmf intentionally uses rvs generation with Boyett only, + # but here we test both Boyett and Patefield. + row = [2, 6] + col = [1, 3, 4] + + ct = random_table + rvs = ct.rvs(row, col, size=100000, method=method, + random_state=self.get_rng()) + + unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True) + + # generated frequencies should match expected frequencies + p = ct.pmf(unique_rvs, row, col) + assert_allclose(p * len(rvs), counts, rtol=0.02) + + @pytest.mark.parametrize("method", ("boyett", "patefield")) + def test_rvs_with_zeros_in_col_row(self, method): + row = [0, 1, 0] + col = [1, 0, 0, 0] + d = random_table(row, col) + rv = d.rvs(1000, method=method, random_state=self.get_rng()) + expected = np.zeros((1000, len(row), len(col))) + expected[...] = [[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 0]] + assert_equal(rv, expected) + + @pytest.mark.parametrize("method", (None, "boyett", "patefield")) + @pytest.mark.parametrize("col", ([], [0])) + @pytest.mark.parametrize("row", ([], [0])) + def test_rvs_with_edge_cases(self, method, row, col): + d = random_table(row, col) + rv = d.rvs(10, method=method, random_state=self.get_rng()) + expected = np.zeros((10, len(row), len(col))) + assert_equal(rv, expected) + + @pytest.mark.parametrize('v', (1, 2)) + def test_rvs_rcont(self, v): + # This test checks the internal low-level interface. + # It is implicitly also checked by the other test_rvs* calls. + import scipy.stats._rcont as _rcont + + row = np.array([1, 3], dtype=np.int64) + col = np.array([2, 1, 1], dtype=np.int64) + + rvs = getattr(_rcont, f"rvs_rcont{v}") + + ntot = np.sum(row) + result = rvs(row, col, ntot, 1, self.get_rng()) + + assert result.shape == (1, len(row), len(col)) + assert np.sum(result) == ntot + + def test_frozen(self): + row = [2, 6] + col = [1, 3, 4] + d = random_table(row, col, seed=self.get_rng()) + + sample = d.rvs() + + expected = random_table.mean(row, col) + assert_equal(expected, d.mean()) + + expected = random_table.pmf(sample, row, col) + assert_equal(expected, d.pmf(sample)) + + expected = random_table.logpmf(sample, row, col) + assert_equal(expected, d.logpmf(sample)) + + @pytest.mark.parametrize("method", ("boyett", "patefield")) + def test_rvs_frozen(self, method): + row = [2, 6] + col = [1, 3, 4] + d = random_table(row, col, seed=self.get_rng()) + + expected = random_table.rvs(row, col, size=10, method=method, + random_state=self.get_rng()) + got = d.rvs(size=10, method=method) + assert_equal(expected, got) + + +def check_pickling(distfn, args): + # check that a distribution instance pickles and unpickles + # pay special attention to the random_state property + + # save the random_state (restore later) + rndm = distfn.random_state + + distfn.random_state = 1234 + distfn.rvs(*args, size=8) + s = pickle.dumps(distfn) + r0 = distfn.rvs(*args, size=8) + + unpickled = pickle.loads(s) + r1 = unpickled.rvs(*args, size=8) + assert_equal(r0, r1) + + # restore the random_state + distfn.random_state = rndm + + +def test_random_state_property(): + scale = np.eye(3) + scale[0, 1] = 0.5 + scale[1, 0] = 0.5 + dists = [ + [multivariate_normal, ()], + [dirichlet, (np.array([1.]), )], + [wishart, (10, scale)], + [invwishart, (10, scale)], + [multinomial, (5, [0.5, 0.4, 0.1])], + [ortho_group, (2,)], + [special_ortho_group, (2,)] + ] + for distfn, args in dists: + check_random_state_property(distfn, args) + check_pickling(distfn, args) + + +class TestVonMises_Fisher: + @pytest.mark.parametrize("dim", [2, 3, 4, 6]) + @pytest.mark.parametrize("size", [None, 1, 5, (5, 4)]) + def test_samples(self, dim, size): + # test that samples have correct shape and norm 1 + rng = np.random.default_rng(2777937887058094419) + mu = np.full((dim, ), 1/np.sqrt(dim)) + vmf_dist = vonmises_fisher(mu, 1, seed=rng) + samples = vmf_dist.rvs(size) + mean, cov = np.zeros(dim), np.eye(dim) + expected_shape = rng.multivariate_normal(mean, cov, size=size).shape + assert samples.shape == expected_shape + norms = np.linalg.norm(samples, axis=-1) + assert_allclose(norms, 1.) + + @pytest.mark.parametrize("dim", [5, 8]) + @pytest.mark.parametrize("kappa", [1e15, 1e20, 1e30]) + def test_sampling_high_concentration(self, dim, kappa): + # test that no warnings are encountered for high values + rng = np.random.default_rng(2777937887058094419) + mu = np.full((dim, ), 1/np.sqrt(dim)) + vmf_dist = vonmises_fisher(mu, kappa, seed=rng) + vmf_dist.rvs(10) + + def test_two_dimensional_mu(self): + mu = np.ones((2, 2)) + msg = "'mu' must have one-dimensional shape." + with pytest.raises(ValueError, match=msg): + vonmises_fisher(mu, 1) + + def test_wrong_norm_mu(self): + mu = np.ones((2, )) + msg = "'mu' must be a unit vector of norm 1." + with pytest.raises(ValueError, match=msg): + vonmises_fisher(mu, 1) + + def test_one_entry_mu(self): + mu = np.ones((1, )) + msg = "'mu' must have at least two entries." + with pytest.raises(ValueError, match=msg): + vonmises_fisher(mu, 1) + + @pytest.mark.parametrize("kappa", [-1, (5, 3)]) + def test_kappa_validation(self, kappa): + msg = "'kappa' must be a positive scalar." + with pytest.raises(ValueError, match=msg): + vonmises_fisher([1, 0], kappa) + + @pytest.mark.parametrize("kappa", [0, 0.]) + def test_kappa_zero(self, kappa): + msg = ("For 'kappa=0' the von Mises-Fisher distribution " + "becomes the uniform distribution on the sphere " + "surface. Consider using 'scipy.stats.uniform_direction' " + "instead.") + with pytest.raises(ValueError, match=msg): + vonmises_fisher([1, 0], kappa) + + + @pytest.mark.parametrize("method", [vonmises_fisher.pdf, + vonmises_fisher.logpdf]) + def test_invalid_shapes_pdf_logpdf(self, method): + x = np.array([1., 0., 0]) + msg = ("The dimensionality of the last axis of 'x' must " + "match the dimensionality of the von Mises Fisher " + "distribution.") + with pytest.raises(ValueError, match=msg): + method(x, [1, 0], 1) + + @pytest.mark.parametrize("method", [vonmises_fisher.pdf, + vonmises_fisher.logpdf]) + def test_unnormalized_input(self, method): + x = np.array([0.5, 0.]) + msg = "'x' must be unit vectors of norm 1 along last dimension." + with pytest.raises(ValueError, match=msg): + method(x, [1, 0], 1) + + # Expected values of the vonmises-fisher logPDF were computed via mpmath + # from mpmath import mp + # import numpy as np + # mp.dps = 50 + # def logpdf_mpmath(x, mu, kappa): + # dim = mu.size + # halfdim = mp.mpf(0.5 * dim) + # kappa = mp.mpf(kappa) + # const = (kappa**(halfdim - mp.one)/((2*mp.pi)**halfdim * \ + # mp.besseli(halfdim -mp.one, kappa))) + # return float(const * mp.exp(kappa*mp.fdot(x, mu))) + + @pytest.mark.parametrize('x, mu, kappa, reference', + [(np.array([1., 0., 0.]), np.array([1., 0., 0.]), + 1e-4, 0.0795854295583605), + (np.array([1., 0., 0]), np.array([0., 0., 1.]), + 1e-4, 0.07957747141331854), + (np.array([1., 0., 0.]), np.array([1., 0., 0.]), + 100, 15.915494309189533), + (np.array([1., 0., 0]), np.array([0., 0., 1.]), + 100, 5.920684802611232e-43), + (np.array([1., 0., 0.]), + np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]), + 2000, 5.930499050746588e-07), + (np.array([1., 0., 0]), np.array([1., 0., 0.]), + 2000, 318.3098861837907), + (np.array([1., 0., 0., 0., 0.]), + np.array([1., 0., 0., 0., 0.]), + 2000, 101371.86957712633), + (np.array([1., 0., 0., 0., 0.]), + np.array([np.sqrt(0.98), np.sqrt(0.02), 0., + 0, 0.]), + 2000, 0.00018886808182653578), + (np.array([1., 0., 0., 0., 0.]), + np.array([np.sqrt(0.8), np.sqrt(0.2), 0., + 0, 0.]), + 2000, 2.0255393314603194e-87)]) + def test_pdf_accuracy(self, x, mu, kappa, reference): + pdf = vonmises_fisher(mu, kappa).pdf(x) + assert_allclose(pdf, reference, rtol=1e-13) + + # Expected values of the vonmises-fisher logPDF were computed via mpmath + # from mpmath import mp + # import numpy as np + # mp.dps = 50 + # def logpdf_mpmath(x, mu, kappa): + # dim = mu.size + # halfdim = mp.mpf(0.5 * dim) + # kappa = mp.mpf(kappa) + # two = mp.mpf(2.) + # const = (kappa**(halfdim - mp.one)/((two*mp.pi)**halfdim * \ + # mp.besseli(halfdim - mp.one, kappa))) + # return float(mp.log(const * mp.exp(kappa*mp.fdot(x, mu)))) + + @pytest.mark.parametrize('x, mu, kappa, reference', + [(np.array([1., 0., 0.]), np.array([1., 0., 0.]), + 1e-4, -2.5309242486359573), + (np.array([1., 0., 0]), np.array([0., 0., 1.]), + 1e-4, -2.5310242486359575), + (np.array([1., 0., 0.]), np.array([1., 0., 0.]), + 100, 2.767293119578746), + (np.array([1., 0., 0]), np.array([0., 0., 1.]), + 100, -97.23270688042125), + (np.array([1., 0., 0.]), + np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]), + 2000, -14.337987284534103), + (np.array([1., 0., 0]), np.array([1., 0., 0.]), + 2000, 5.763025393132737), + (np.array([1., 0., 0., 0., 0.]), + np.array([1., 0., 0., 0., 0.]), + 2000, 11.526550911307156), + (np.array([1., 0., 0., 0., 0.]), + np.array([np.sqrt(0.98), np.sqrt(0.02), 0., + 0, 0.]), + 2000, -8.574461766359684), + (np.array([1., 0., 0., 0., 0.]), + np.array([np.sqrt(0.8), np.sqrt(0.2), 0., + 0, 0.]), + 2000, -199.61906708886113)]) + def test_logpdf_accuracy(self, x, mu, kappa, reference): + logpdf = vonmises_fisher(mu, kappa).logpdf(x) + assert_allclose(logpdf, reference, rtol=1e-14) + + # Expected values of the vonmises-fisher entropy were computed via mpmath + # from mpmath import mp + # import numpy as np + # mp.dps = 50 + # def entropy_mpmath(dim, kappa): + # mu = np.full((dim, ), 1/np.sqrt(dim)) + # kappa = mp.mpf(kappa) + # halfdim = mp.mpf(0.5 * dim) + # logconstant = (mp.log(kappa**(halfdim - mp.one) + # /((2*mp.pi)**halfdim + # * mp.besseli(halfdim -mp.one, kappa))) + # return float(-logconstant - kappa * mp.besseli(halfdim, kappa)/ + # mp.besseli(halfdim -1, kappa)) + + @pytest.mark.parametrize('dim, kappa, reference', + [(3, 1e-4, 2.531024245302624), + (3, 100, -1.7672931195787458), + (5, 5000, -11.359032310024453), + (8, 1, 3.4189526482545527)]) + def test_entropy_accuracy(self, dim, kappa, reference): + mu = np.full((dim, ), 1/np.sqrt(dim)) + entropy = vonmises_fisher(mu, kappa).entropy() + assert_allclose(entropy, reference, rtol=2e-14) + + @pytest.mark.parametrize("method", [vonmises_fisher.pdf, + vonmises_fisher.logpdf]) + def test_broadcasting(self, method): + # test that pdf and logpdf values are correctly broadcasted + testshape = (2, 2) + rng = np.random.default_rng(2777937887058094419) + x = uniform_direction(3).rvs(testshape, random_state=rng) + mu = np.full((3, ), 1/np.sqrt(3)) + kappa = 5 + result_all = method(x, mu, kappa) + assert result_all.shape == testshape + for i in range(testshape[0]): + for j in range(testshape[1]): + current_val = method(x[i, j, :], mu, kappa) + assert_allclose(current_val, result_all[i, j], rtol=1e-15) + + def test_vs_vonmises_2d(self): + # test that in 2D, von Mises-Fisher yields the same results + # as the von Mises distribution + rng = np.random.default_rng(2777937887058094419) + mu = np.array([0, 1]) + mu_angle = np.arctan2(mu[1], mu[0]) + kappa = 20 + vmf = vonmises_fisher(mu, kappa) + vonmises_dist = vonmises(loc=mu_angle, kappa=kappa) + vectors = uniform_direction(2).rvs(10, random_state=rng) + angles = np.arctan2(vectors[:, 1], vectors[:, 0]) + assert_allclose(vonmises_dist.entropy(), vmf.entropy()) + assert_allclose(vonmises_dist.pdf(angles), vmf.pdf(vectors)) + assert_allclose(vonmises_dist.logpdf(angles), vmf.logpdf(vectors)) + + @pytest.mark.parametrize("dim", [2, 3, 6]) + @pytest.mark.parametrize("kappa, mu_tol, kappa_tol", + [(1, 5e-2, 5e-2), + (10, 1e-2, 1e-2), + (100, 5e-3, 2e-2), + (1000, 1e-3, 2e-2)]) + def test_fit_accuracy(self, dim, kappa, mu_tol, kappa_tol): + mu = np.full((dim, ), 1/np.sqrt(dim)) + vmf_dist = vonmises_fisher(mu, kappa) + rng = np.random.default_rng(2777937887058094419) + n_samples = 10000 + samples = vmf_dist.rvs(n_samples, random_state=rng) + mu_fit, kappa_fit = vonmises_fisher.fit(samples) + angular_error = np.arccos(mu.dot(mu_fit)) + assert_allclose(angular_error, 0., atol=mu_tol, rtol=0) + assert_allclose(kappa, kappa_fit, rtol=kappa_tol) + + def test_fit_error_one_dimensional_data(self): + x = np.zeros((3, )) + msg = "'x' must be two dimensional." + with pytest.raises(ValueError, match=msg): + vonmises_fisher.fit(x) + + def test_fit_error_unnormalized_data(self): + x = np.ones((3, 3)) + msg = "'x' must be unit vectors of norm 1 along last dimension." + with pytest.raises(ValueError, match=msg): + vonmises_fisher.fit(x) + + def test_frozen_distribution(self): + mu = np.array([0, 0, 1]) + kappa = 5 + frozen = vonmises_fisher(mu, kappa) + frozen_seed = vonmises_fisher(mu, kappa, seed=514) + + rvs1 = frozen.rvs(random_state=514) + rvs2 = vonmises_fisher.rvs(mu, kappa, random_state=514) + rvs3 = frozen_seed.rvs() + + assert_equal(rvs1, rvs2) + assert_equal(rvs1, rvs3) + + +class TestDirichletMultinomial: + @classmethod + def get_params(self, m): + rng = np.random.default_rng(28469824356873456) + alpha = rng.uniform(0, 100, size=2) + x = rng.integers(1, 20, size=(m, 2)) + n = x.sum(axis=-1) + return rng, m, alpha, n, x + + def test_frozen(self): + rng = np.random.default_rng(28469824356873456) + + alpha = rng.uniform(0, 100, 10) + x = rng.integers(0, 10, 10) + n = np.sum(x, axis=-1) + + d = dirichlet_multinomial(alpha, n) + assert_equal(d.logpmf(x), dirichlet_multinomial.logpmf(x, alpha, n)) + assert_equal(d.pmf(x), dirichlet_multinomial.pmf(x, alpha, n)) + assert_equal(d.mean(), dirichlet_multinomial.mean(alpha, n)) + assert_equal(d.var(), dirichlet_multinomial.var(alpha, n)) + assert_equal(d.cov(), dirichlet_multinomial.cov(alpha, n)) + + def test_pmf_logpmf_against_R(self): + # # Compare PMF against R's extraDistr ddirmnon + # # library(extraDistr) + # # options(digits=16) + # ddirmnom(c(1, 2, 3), 6, c(3, 4, 5)) + x = np.array([1, 2, 3]) + n = np.sum(x) + alpha = np.array([3, 4, 5]) + res = dirichlet_multinomial.pmf(x, alpha, n) + logres = dirichlet_multinomial.logpmf(x, alpha, n) + ref = 0.08484162895927638 + assert_allclose(res, ref) + assert_allclose(logres, np.log(ref)) + assert res.shape == logres.shape == () + + # library(extraDistr) + # options(digits=16) + # ddirmnom(c(4, 3, 2, 0, 2, 3, 5, 7, 4, 7), 37, + # c(45.01025314, 21.98739582, 15.14851365, 80.21588671, + # 52.84935481, 25.20905262, 53.85373737, 4.88568118, + # 89.06440654, 20.11359466)) + rng = np.random.default_rng(28469824356873456) + alpha = rng.uniform(0, 100, 10) + x = rng.integers(0, 10, 10) + n = np.sum(x, axis=-1) + res = dirichlet_multinomial(alpha, n).pmf(x) + logres = dirichlet_multinomial.logpmf(x, alpha, n) + ref = 3.65409306285992e-16 + assert_allclose(res, ref) + assert_allclose(logres, np.log(ref)) + + def test_pmf_logpmf_support(self): + # when the sum of the category counts does not equal the number of + # trials, the PMF is zero + rng, m, alpha, n, x = self.get_params(1) + n += 1 + assert_equal(dirichlet_multinomial(alpha, n).pmf(x), 0) + assert_equal(dirichlet_multinomial(alpha, n).logpmf(x), -np.inf) + + rng, m, alpha, n, x = self.get_params(10) + i = rng.random(size=10) > 0.5 + x[i] = np.round(x[i] * 2) # sum of these x does not equal n + assert_equal(dirichlet_multinomial(alpha, n).pmf(x)[i], 0) + assert_equal(dirichlet_multinomial(alpha, n).logpmf(x)[i], -np.inf) + assert np.all(dirichlet_multinomial(alpha, n).pmf(x)[~i] > 0) + assert np.all(dirichlet_multinomial(alpha, n).logpmf(x)[~i] > -np.inf) + + def test_dimensionality_one(self): + # if the dimensionality is one, there is only one possible outcome + n = 6 # number of trials + alpha = [10] # concentration parameters + x = np.asarray([n]) # counts + dist = dirichlet_multinomial(alpha, n) + + assert_equal(dist.pmf(x), 1) + assert_equal(dist.pmf(x+1), 0) + assert_equal(dist.logpmf(x), 0) + assert_equal(dist.logpmf(x+1), -np.inf) + assert_equal(dist.mean(), n) + assert_equal(dist.var(), 0) + assert_equal(dist.cov(), 0) + + @pytest.mark.parametrize('method_name', ['pmf', 'logpmf']) + def test_against_betabinom_pmf(self, method_name): + rng, m, alpha, n, x = self.get_params(100) + + method = getattr(dirichlet_multinomial(alpha, n), method_name) + ref_method = getattr(stats.betabinom(n, *alpha.T), method_name) + + res = method(x) + ref = ref_method(x.T[0]) + assert_allclose(res, ref) + + @pytest.mark.parametrize('method_name', ['mean', 'var']) + def test_against_betabinom_moments(self, method_name): + rng, m, alpha, n, x = self.get_params(100) + + method = getattr(dirichlet_multinomial(alpha, n), method_name) + ref_method = getattr(stats.betabinom(n, *alpha.T), method_name) + + res = method()[:, 0] + ref = ref_method() + assert_allclose(res, ref) + + def test_moments(self): + message = 'Needs NumPy 1.22.0 for multinomial broadcasting' + if Version(np.__version__) < Version("1.22.0"): + pytest.skip(reason=message) + + rng = np.random.default_rng(28469824356873456) + dim = 5 + n = rng.integers(1, 100) + alpha = rng.random(size=dim) * 10 + dist = dirichlet_multinomial(alpha, n) + + # Generate a random sample from the distribution using NumPy + m = 100000 + p = rng.dirichlet(alpha, size=m) + x = rng.multinomial(n, p, size=m) + + assert_allclose(dist.mean(), np.mean(x, axis=0), rtol=5e-3) + assert_allclose(dist.var(), np.var(x, axis=0), rtol=1e-2) + assert dist.mean().shape == dist.var().shape == (dim,) + + cov = dist.cov() + assert cov.shape == (dim, dim) + assert_allclose(cov, np.cov(x.T), rtol=2e-2) + assert_equal(np.diag(cov), dist.var()) + assert np.all(scipy.linalg.eigh(cov)[0] > 0) # positive definite + + def test_input_validation(self): + # valid inputs + x0 = np.array([1, 2, 3]) + n0 = np.sum(x0) + alpha0 = np.array([3, 4, 5]) + + text = "`x` must contain only non-negative integers." + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf([1, -1, 3], alpha0, n0) + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf([1, 2.1, 3], alpha0, n0) + + text = "`alpha` must contain only positive values." + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf(x0, [3, 0, 4], n0) + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf(x0, [3, -1, 4], n0) + + text = "`n` must be a positive integer." + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf(x0, alpha0, 49.1) + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf(x0, alpha0, 0) + + x = np.array([1, 2, 3, 4]) + alpha = np.array([3, 4, 5]) + text = "`x` and `alpha` must be broadcastable." + with assert_raises(ValueError, match=text): + dirichlet_multinomial.logpmf(x, alpha, x.sum()) + + @pytest.mark.parametrize('method', ['pmf', 'logpmf']) + def test_broadcasting_pmf(self, method): + alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]]) + n = np.array([[6], [7], [8]]) + x = np.array([[1, 2, 3], [2, 2, 3]]).reshape((2, 1, 1, 3)) + method = getattr(dirichlet_multinomial, method) + res = method(x, alpha, n) + assert res.shape == (2, 3, 4) + for i in range(len(x)): + for j in range(len(n)): + for k in range(len(alpha)): + res_ijk = res[i, j, k] + ref = method(x[i].squeeze(), alpha[k].squeeze(), n[j].squeeze()) + assert_allclose(res_ijk, ref) + + @pytest.mark.parametrize('method_name', ['mean', 'var', 'cov']) + def test_broadcasting_moments(self, method_name): + alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]]) + n = np.array([[6], [7], [8]]) + method = getattr(dirichlet_multinomial, method_name) + res = method(alpha, n) + assert res.shape == (3, 4, 3) if method_name != 'cov' else (3, 4, 3, 3) + for j in range(len(n)): + for k in range(len(alpha)): + res_ijk = res[j, k] + ref = method(alpha[k].squeeze(), n[j].squeeze()) + assert_allclose(res_ijk, ref) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_odds_ratio.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_odds_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..ffb38a05c8df2e0bd4d7336ef344aa2f73bd0d6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_odds_ratio.py @@ -0,0 +1,147 @@ +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from .._discrete_distns import nchypergeom_fisher, hypergeom +from scipy.stats._odds_ratio import odds_ratio +from .data.fisher_exact_results_from_r import data + + +class TestOddsRatio: + + @pytest.mark.parametrize('parameters, rresult', data) + def test_results_from_r(self, parameters, rresult): + alternative = parameters.alternative.replace('.', '-') + result = odds_ratio(parameters.table) + # The results computed by R are not very accurate. + if result.statistic < 400: + or_rtol = 5e-4 + ci_rtol = 2e-2 + else: + or_rtol = 5e-2 + ci_rtol = 1e-1 + assert_allclose(result.statistic, + rresult.conditional_odds_ratio, rtol=or_rtol) + ci = result.confidence_interval(parameters.confidence_level, + alternative) + assert_allclose((ci.low, ci.high), rresult.conditional_odds_ratio_ci, + rtol=ci_rtol) + + # Also do a self-check for the conditional odds ratio. + # With the computed conditional odds ratio as the noncentrality + # parameter of the noncentral hypergeometric distribution with + # parameters table.sum(), table[0].sum(), and table[:,0].sum() as + # total, ngood and nsample, respectively, the mean of the distribution + # should equal table[0, 0]. + cor = result.statistic + table = np.array(parameters.table) + total = table.sum() + ngood = table[0].sum() + nsample = table[:, 0].sum() + # nchypergeom_fisher does not allow the edge cases where the + # noncentrality parameter is 0 or inf, so handle those values + # separately here. + if cor == 0: + nchg_mean = hypergeom.support(total, ngood, nsample)[0] + elif cor == np.inf: + nchg_mean = hypergeom.support(total, ngood, nsample)[1] + else: + nchg_mean = nchypergeom_fisher.mean(total, ngood, nsample, cor) + assert_allclose(nchg_mean, table[0, 0], rtol=1e-13) + + # Check that the confidence interval is correct. + alpha = 1 - parameters.confidence_level + if alternative == 'two-sided': + if ci.low > 0: + sf = nchypergeom_fisher.sf(table[0, 0] - 1, + total, ngood, nsample, ci.low) + assert_allclose(sf, alpha/2, rtol=1e-11) + if np.isfinite(ci.high): + cdf = nchypergeom_fisher.cdf(table[0, 0], + total, ngood, nsample, ci.high) + assert_allclose(cdf, alpha/2, rtol=1e-11) + elif alternative == 'less': + if np.isfinite(ci.high): + cdf = nchypergeom_fisher.cdf(table[0, 0], + total, ngood, nsample, ci.high) + assert_allclose(cdf, alpha, rtol=1e-11) + else: + # alternative == 'greater' + if ci.low > 0: + sf = nchypergeom_fisher.sf(table[0, 0] - 1, + total, ngood, nsample, ci.low) + assert_allclose(sf, alpha, rtol=1e-11) + + @pytest.mark.parametrize('table', [ + [[0, 0], [5, 10]], + [[5, 10], [0, 0]], + [[0, 5], [0, 10]], + [[5, 0], [10, 0]], + ]) + def test_row_or_col_zero(self, table): + result = odds_ratio(table) + assert_equal(result.statistic, np.nan) + ci = result.confidence_interval() + assert_equal((ci.low, ci.high), (0, np.inf)) + + @pytest.mark.parametrize("case", + [[0.95, 'two-sided', 0.4879913, 2.635883], + [0.90, 'two-sided', 0.5588516, 2.301663]]) + def test_sample_odds_ratio_ci(self, case): + # Compare the sample odds ratio confidence interval to the R function + # oddsratio.wald from the epitools package, e.g. + # > library(epitools) + # > table = matrix(c(10, 20, 41, 93), nrow=2, ncol=2, byrow=TRUE) + # > result = oddsratio.wald(table) + # > result$measure + # odds ratio with 95% C.I. + # Predictor estimate lower upper + # Exposed1 1.000000 NA NA + # Exposed2 1.134146 0.4879913 2.635883 + + confidence_level, alternative, ref_low, ref_high = case + table = [[10, 20], [41, 93]] + result = odds_ratio(table, kind='sample') + assert_allclose(result.statistic, 1.134146, rtol=1e-6) + ci = result.confidence_interval(confidence_level, alternative) + assert_allclose([ci.low, ci.high], [ref_low, ref_high], rtol=1e-6) + + @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided']) + def test_sample_odds_ratio_one_sided_ci(self, alternative): + # can't find a good reference for one-sided CI, so bump up the sample + # size and compare against the conditional odds ratio CI + table = [[1000, 2000], [4100, 9300]] + res = odds_ratio(table, kind='sample') + ref = odds_ratio(table, kind='conditional') + assert_allclose(res.statistic, ref.statistic, atol=1e-5) + assert_allclose(res.confidence_interval(alternative=alternative), + ref.confidence_interval(alternative=alternative), + atol=2e-3) + + @pytest.mark.parametrize('kind', ['sample', 'conditional']) + @pytest.mark.parametrize('bad_table', [123, "foo", [10, 11, 12]]) + def test_invalid_table_shape(self, kind, bad_table): + with pytest.raises(ValueError, match="Invalid shape"): + odds_ratio(bad_table, kind=kind) + + def test_invalid_table_type(self): + with pytest.raises(ValueError, match='must be an array of integers'): + odds_ratio([[1.0, 3.4], [5.0, 9.9]]) + + def test_negative_table_values(self): + with pytest.raises(ValueError, match='must be nonnegative'): + odds_ratio([[1, 2], [3, -4]]) + + def test_invalid_kind(self): + with pytest.raises(ValueError, match='`kind` must be'): + odds_ratio([[10, 20], [30, 14]], kind='magnetoreluctance') + + def test_invalid_alternative(self): + result = odds_ratio([[5, 10], [2, 32]]) + with pytest.raises(ValueError, match='`alternative` must be'): + result.confidence_interval(alternative='depleneration') + + @pytest.mark.parametrize('level', [-0.5, 1.5]) + def test_invalid_confidence_level(self, level): + result = odds_ratio([[5, 10], [2, 32]]) + with pytest.raises(ValueError, match='must be between 0 and 1'): + result.confidence_interval(confidence_level=level) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_qmc.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_qmc.py new file mode 100644 index 0000000000000000000000000000000000000000..968e45c8196671470dd69271cf6cbac206b24f40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_qmc.py @@ -0,0 +1,1410 @@ +import os +from collections import Counter +from itertools import combinations, product + +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_array_equal + +from scipy.spatial import distance +from scipy.stats import shapiro +from scipy.stats._sobol import _test_find_index +from scipy.stats import qmc +from scipy.stats._qmc import ( + van_der_corput, n_primes, primes_from_2_to, + update_discrepancy, QMCEngine, _l1_norm, + _perturb_discrepancy, _lloyd_centroidal_voronoi_tessellation +) + + +class TestUtils: + def test_scale(self): + # 1d scalar + space = [[0], [1], [0.5]] + out = [[-2], [6], [2]] + scaled_space = qmc.scale(space, l_bounds=-2, u_bounds=6) + + assert_allclose(scaled_space, out) + + # 2d space + space = [[0, 0], [1, 1], [0.5, 0.5]] + bounds = np.array([[-2, 0], [6, 5]]) + out = [[-2, 0], [6, 5], [2, 2.5]] + + scaled_space = qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) + + assert_allclose(scaled_space, out) + + scaled_back_space = qmc.scale(scaled_space, l_bounds=bounds[0], + u_bounds=bounds[1], reverse=True) + assert_allclose(scaled_back_space, space) + + # broadcast + space = [[0, 0, 0], [1, 1, 1], [0.5, 0.5, 0.5]] + l_bounds, u_bounds = 0, [6, 5, 3] + out = [[0, 0, 0], [6, 5, 3], [3, 2.5, 1.5]] + + scaled_space = qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds) + + assert_allclose(scaled_space, out) + + def test_scale_random(self): + rng = np.random.default_rng(317589836511269190194010915937762468165) + sample = rng.random((30, 10)) + a = -rng.random(10) * 10 + b = rng.random(10) * 10 + scaled = qmc.scale(sample, a, b, reverse=False) + unscaled = qmc.scale(scaled, a, b, reverse=True) + assert_allclose(unscaled, sample) + + def test_scale_errors(self): + with pytest.raises(ValueError, match=r"Sample is not a 2D array"): + space = [0, 1, 0.5] + qmc.scale(space, l_bounds=-2, u_bounds=6) + + with pytest.raises(ValueError, match=r"Bounds are not consistent"): + space = [[0, 0], [1, 1], [0.5, 0.5]] + bounds = np.array([[-2, 6], [6, 5]]) + qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) + + with pytest.raises(ValueError, match=r"'l_bounds' and 'u_bounds'" + r" must be broadcastable"): + space = [[0, 0], [1, 1], [0.5, 0.5]] + l_bounds, u_bounds = [-2, 0, 2], [6, 5] + qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds) + + with pytest.raises(ValueError, match=r"'l_bounds' and 'u_bounds'" + r" must be broadcastable"): + space = [[0, 0], [1, 1], [0.5, 0.5]] + bounds = np.array([[-2, 0, 2], [6, 5, 5]]) + qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) + + with pytest.raises(ValueError, match=r"Sample is not in unit " + r"hypercube"): + space = [[0, 0], [1, 1.5], [0.5, 0.5]] + bounds = np.array([[-2, 0], [6, 5]]) + qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) + + with pytest.raises(ValueError, match=r"Sample is out of bounds"): + out = [[-2, 0], [6, 5], [8, 2.5]] + bounds = np.array([[-2, 0], [6, 5]]) + qmc.scale(out, l_bounds=bounds[0], u_bounds=bounds[1], + reverse=True) + + def test_discrepancy(self): + space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) + space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0) + space_2 = np.array([[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]]) + space_2 = (2.0 * space_2 - 1.0) / (2.0 * 6.0) + + # From Fang et al. Design and modeling for computer experiments, 2006 + assert_allclose(qmc.discrepancy(space_1), 0.0081, atol=1e-4) + assert_allclose(qmc.discrepancy(space_2), 0.0105, atol=1e-4) + + # From Zhou Y.-D. et al. Mixture discrepancy for quasi-random point + # sets. Journal of Complexity, 29 (3-4), pp. 283-301, 2013. + # Example 4 on Page 298 + sample = np.array([[2, 1, 1, 2, 2, 2], + [1, 2, 2, 2, 2, 2], + [2, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 2, 2], + [1, 2, 2, 2, 1, 1], + [2, 2, 2, 2, 1, 1], + [2, 2, 2, 1, 2, 2]]) + sample = (2.0 * sample - 1.0) / (2.0 * 2.0) + + assert_allclose(qmc.discrepancy(sample, method='MD'), 2.5000, + atol=1e-4) + assert_allclose(qmc.discrepancy(sample, method='WD'), 1.3680, + atol=1e-4) + assert_allclose(qmc.discrepancy(sample, method='CD'), 0.3172, + atol=1e-4) + + # From Tim P. et al. Minimizing the L2 and Linf star discrepancies + # of a single point in the unit hypercube. JCAM, 2005 + # Table 1 on Page 283 + for dim in [2, 4, 8, 16, 32, 64]: + ref = np.sqrt(3**(-dim)) + assert_allclose(qmc.discrepancy(np.array([[1]*dim]), + method='L2-star'), ref) + + def test_discrepancy_errors(self): + sample = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) + + with pytest.raises( + ValueError, match=r"Sample is not in unit hypercube" + ): + qmc.discrepancy(sample) + + with pytest.raises(ValueError, match=r"Sample is not a 2D array"): + qmc.discrepancy([1, 3]) + + sample = [[0, 0], [1, 1], [0.5, 0.5]] + with pytest.raises(ValueError, match=r"'toto' is not a valid ..."): + qmc.discrepancy(sample, method="toto") + + def test_discrepancy_parallel(self, monkeypatch): + sample = np.array([[2, 1, 1, 2, 2, 2], + [1, 2, 2, 2, 2, 2], + [2, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 2, 2], + [1, 2, 2, 2, 1, 1], + [2, 2, 2, 2, 1, 1], + [2, 2, 2, 1, 2, 2]]) + sample = (2.0 * sample - 1.0) / (2.0 * 2.0) + + assert_allclose(qmc.discrepancy(sample, method='MD', workers=8), + 2.5000, + atol=1e-4) + assert_allclose(qmc.discrepancy(sample, method='WD', workers=8), + 1.3680, + atol=1e-4) + assert_allclose(qmc.discrepancy(sample, method='CD', workers=8), + 0.3172, + atol=1e-4) + + # From Tim P. et al. Minimizing the L2 and Linf star discrepancies + # of a single point in the unit hypercube. JCAM, 2005 + # Table 1 on Page 283 + for dim in [2, 4, 8, 16, 32, 64]: + ref = np.sqrt(3 ** (-dim)) + assert_allclose(qmc.discrepancy(np.array([[1] * dim]), + method='L2-star', workers=-1), ref) + + monkeypatch.setattr(os, 'cpu_count', lambda: None) + with pytest.raises(NotImplementedError, match="Cannot determine the"): + qmc.discrepancy(sample, workers=-1) + + with pytest.raises(ValueError, match="Invalid number of workers..."): + qmc.discrepancy(sample, workers=-2) + + def test_geometric_discrepancy_errors(self): + sample = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) + + with pytest.raises(ValueError, match=r"Sample is not in unit hypercube"): + qmc.geometric_discrepancy(sample) + + with pytest.raises(ValueError, match=r"Sample is not a 2D array"): + qmc.geometric_discrepancy([1, 3]) + + sample = [[0, 0], [1, 1], [0.5, 0.5]] + with pytest.raises(ValueError, match=r"'toto' is not a valid ..."): + qmc.geometric_discrepancy(sample, method="toto") + + sample = np.array([[0, 0], [0, 0], [0, 1]]) + with pytest.warns(UserWarning, match="Sample contains duplicate points."): + qmc.geometric_discrepancy(sample) + + sample = np.array([[0.5, 0.5]]) + with pytest.raises(ValueError, match="Sample must contain at least two points"): + qmc.geometric_discrepancy(sample) + + def test_geometric_discrepancy(self): + sample = np.array([[0, 0], [1, 1]]) + assert_allclose(qmc.geometric_discrepancy(sample), np.sqrt(2)) + assert_allclose(qmc.geometric_discrepancy(sample, method="mst"), np.sqrt(2)) + + sample = np.array([[0, 0], [0, 1], [0.5, 1]]) + assert_allclose(qmc.geometric_discrepancy(sample), 0.5) + assert_allclose(qmc.geometric_discrepancy(sample, method="mst"), 0.75) + + sample = np.array([[0, 0], [0.25, 0.25], [1, 1]]) + assert_allclose(qmc.geometric_discrepancy(sample), np.sqrt(2) / 4) + assert_allclose(qmc.geometric_discrepancy(sample, method="mst"), np.sqrt(2) / 2) + assert_allclose(qmc.geometric_discrepancy(sample, metric="chebyshev"), 0.25) + assert_allclose( + qmc.geometric_discrepancy(sample, method="mst", metric="chebyshev"), 0.5 + ) + + rng = np.random.default_rng(191468432622931918890291693003068437394) + sample = qmc.LatinHypercube(d=3, seed=rng).random(50) + assert_allclose(qmc.geometric_discrepancy(sample), 0.05106012076093356) + assert_allclose( + qmc.geometric_discrepancy(sample, method='mst'), 0.19704396643366182 + ) + + @pytest.mark.xfail( + reason="minimum_spanning_tree ignores zero distances (#18892)", + strict=True, + ) + def test_geometric_discrepancy_mst_with_zero_distances(self): + sample = np.array([[0, 0], [0, 0], [0, 1]]) + assert_allclose(qmc.geometric_discrepancy(sample, method='mst'), 0.5) + + def test_update_discrepancy(self): + # From Fang et al. Design and modeling for computer experiments, 2006 + space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) + space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0) + + disc_init = qmc.discrepancy(space_1[:-1], iterative=True) + disc_iter = update_discrepancy(space_1[-1], space_1[:-1], disc_init) + + assert_allclose(disc_iter, 0.0081, atol=1e-4) + + # n QMCEngine: + if self.can_scramble: + return self.qmce(scramble=scramble, seed=seed, **kwargs) + else: + if scramble: + pytest.skip() + else: + return self.qmce(seed=seed, **kwargs) + + def reference(self, scramble: bool) -> np.ndarray: + return self.scramble_nd if scramble else self.unscramble_nd + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_0dim(self, scramble): + engine = self.engine(d=0, scramble=scramble) + sample = engine.random(4) + assert_array_equal(np.empty((4, 0)), sample) + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_0sample(self, scramble): + engine = self.engine(d=2, scramble=scramble) + sample = engine.random(0) + assert_array_equal(np.empty((0, 2)), sample) + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_1sample(self, scramble): + engine = self.engine(d=2, scramble=scramble) + sample = engine.random(1) + assert (1, 2) == sample.shape + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_bounds(self, scramble): + engine = self.engine(d=100, scramble=scramble) + sample = engine.random(512) + assert np.all(sample >= 0) + assert np.all(sample <= 1) + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_sample(self, scramble): + ref_sample = self.reference(scramble=scramble) + engine = self.engine(d=2, scramble=scramble) + sample = engine.random(n=len(ref_sample)) + + assert_allclose(sample, ref_sample, atol=1e-1) + assert engine.num_generated == len(ref_sample) + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_continuing(self, scramble): + engine = self.engine(d=2, scramble=scramble) + ref_sample = engine.random(n=8) + + engine = self.engine(d=2, scramble=scramble) + + n_half = len(ref_sample) // 2 + + _ = engine.random(n=n_half) + sample = engine.random(n=n_half) + assert_allclose(sample, ref_sample[n_half:], atol=1e-1) + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + @pytest.mark.parametrize( + "seed", + ( + 170382760648021597650530316304495310428, + np.random.default_rng(170382760648021597650530316304495310428), + None, + ), + ) + def test_reset(self, scramble, seed): + engine = self.engine(d=2, scramble=scramble, seed=seed) + ref_sample = engine.random(n=8) + + engine.reset() + assert engine.num_generated == 0 + + sample = engine.random(n=8) + assert_allclose(sample, ref_sample) + + @pytest.mark.parametrize("scramble", scramble, ids=ids) + def test_fast_forward(self, scramble): + engine = self.engine(d=2, scramble=scramble) + ref_sample = engine.random(n=8) + + engine = self.engine(d=2, scramble=scramble) + + engine.fast_forward(4) + sample = engine.random(n=4) + + assert_allclose(sample, ref_sample[4:], atol=1e-1) + + # alternate fast forwarding with sampling + engine.reset() + even_draws = [] + for i in range(8): + if i % 2 == 0: + even_draws.append(engine.random()) + else: + engine.fast_forward(1) + assert_allclose( + ref_sample[[i for i in range(8) if i % 2 == 0]], + np.concatenate(even_draws), + atol=1e-5 + ) + + @pytest.mark.parametrize("scramble", [True]) + def test_distribution(self, scramble): + d = 50 + engine = self.engine(d=d, scramble=scramble) + sample = engine.random(1024) + assert_allclose( + np.mean(sample, axis=0), np.repeat(0.5, d), atol=1e-2 + ) + assert_allclose( + np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=1e-2 + ) + assert_allclose( + np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=1e-2 + ) + + def test_raises_optimizer(self): + message = r"'toto' is not a valid optimization method" + with pytest.raises(ValueError, match=message): + self.engine(d=1, scramble=False, optimization="toto") + + @pytest.mark.parametrize( + "optimization,metric", + [ + ("random-CD", qmc.discrepancy), + ("lloyd", lambda sample: -_l1_norm(sample))] + ) + def test_optimizers(self, optimization, metric): + engine = self.engine(d=2, scramble=False) + sample_ref = engine.random(n=64) + metric_ref = metric(sample_ref) + + optimal_ = self.engine(d=2, scramble=False, optimization=optimization) + sample_ = optimal_.random(n=64) + metric_ = metric(sample_) + + assert metric_ < metric_ref + + def test_consume_prng_state(self): + rng = np.random.default_rng(0xa29cabb11cfdf44ff6cac8bec254c2a0) + sample = [] + for i in range(3): + engine = self.engine(d=2, scramble=True, seed=rng) + sample.append(engine.random(4)) + + with pytest.raises(AssertionError, match="Arrays are not equal"): + assert_equal(sample[0], sample[1]) + with pytest.raises(AssertionError, match="Arrays are not equal"): + assert_equal(sample[0], sample[2]) + + +class TestHalton(QMCEngineTests): + qmce = qmc.Halton + can_scramble = True + # theoretical values known from Van der Corput + unscramble_nd = np.array([[0, 0], [1 / 2, 1 / 3], + [1 / 4, 2 / 3], [3 / 4, 1 / 9], + [1 / 8, 4 / 9], [5 / 8, 7 / 9], + [3 / 8, 2 / 9], [7 / 8, 5 / 9]]) + # theoretical values unknown: convergence properties checked + scramble_nd = np.array([[0.50246036, 0.93382481], + [0.00246036, 0.26715815], + [0.75246036, 0.60049148], + [0.25246036, 0.8227137 ], + [0.62746036, 0.15604704], + [0.12746036, 0.48938037], + [0.87746036, 0.71160259], + [0.37746036, 0.04493592]]) + + def test_workers(self): + ref_sample = self.reference(scramble=True) + engine = self.engine(d=2, scramble=True) + sample = engine.random(n=len(ref_sample), workers=8) + + assert_allclose(sample, ref_sample, atol=1e-3) + + # worker + integers + engine.reset() + ref_sample = engine.integers(10) + engine.reset() + sample = engine.integers(10, workers=8) + assert_equal(sample, ref_sample) + + +class TestLHS(QMCEngineTests): + qmce = qmc.LatinHypercube + can_scramble = True + + def test_continuing(self, *args): + pytest.skip("Not applicable: not a sequence.") + + def test_fast_forward(self, *args): + pytest.skip("Not applicable: not a sequence.") + + def test_sample(self, *args): + pytest.skip("Not applicable: the value of reference sample is" + " implementation dependent.") + + @pytest.mark.parametrize("strength", [1, 2]) + @pytest.mark.parametrize("scramble", [False, True]) + @pytest.mark.parametrize("optimization", [None, "random-CD"]) + def test_sample_stratified(self, optimization, scramble, strength): + seed = np.random.default_rng(37511836202578819870665127532742111260) + p = 5 + n = p**2 + d = 6 + + engine = qmc.LatinHypercube(d=d, scramble=scramble, + strength=strength, + optimization=optimization, + seed=seed) + sample = engine.random(n=n) + assert sample.shape == (n, d) + assert engine.num_generated == n + + # centering stratifies samples in the middle of equal segments: + # * inter-sample distance is constant in 1D sub-projections + # * after ordering, columns are equal + expected1d = (np.arange(n) + 0.5) / n + expected = np.broadcast_to(expected1d, (d, n)).T + assert np.any(sample != expected) + + sorted_sample = np.sort(sample, axis=0) + tol = 0.5 / n if scramble else 0 + + assert_allclose(sorted_sample, expected, atol=tol) + assert np.any(sample - expected > tol) + + if strength == 2 and optimization is None: + unique_elements = np.arange(p) + desired = set(product(unique_elements, unique_elements)) + + for i, j in combinations(range(engine.d), 2): + samples_2d = sample[:, [i, j]] + res = (samples_2d * p).astype(int) + res_set = {tuple(row) for row in res} + assert_equal(res_set, desired) + + def test_optimizer_1d(self): + # discrepancy measures are invariant under permuting factors and runs + engine = self.engine(d=1, scramble=False) + sample_ref = engine.random(n=64) + + optimal_ = self.engine(d=1, scramble=False, optimization="random-CD") + sample_ = optimal_.random(n=64) + + assert_array_equal(sample_ref, sample_) + + def test_raises(self): + message = r"not a valid strength" + with pytest.raises(ValueError, match=message): + qmc.LatinHypercube(1, strength=3) + + message = r"n is not the square of a prime number" + with pytest.raises(ValueError, match=message): + engine = qmc.LatinHypercube(d=2, strength=2) + engine.random(16) + + message = r"n is not the square of a prime number" + with pytest.raises(ValueError, match=message): + engine = qmc.LatinHypercube(d=2, strength=2) + engine.random(5) # because int(sqrt(5)) would result in 2 + + message = r"n is too small for d" + with pytest.raises(ValueError, match=message): + engine = qmc.LatinHypercube(d=5, strength=2) + engine.random(9) + + +class TestSobol(QMCEngineTests): + qmce = qmc.Sobol + can_scramble = True + # theoretical values from Joe Kuo2010 + unscramble_nd = np.array([[0., 0.], + [0.5, 0.5], + [0.75, 0.25], + [0.25, 0.75], + [0.375, 0.375], + [0.875, 0.875], + [0.625, 0.125], + [0.125, 0.625]]) + + # theoretical values unknown: convergence properties checked + scramble_nd = np.array([[0.25331921, 0.41371179], + [0.8654213, 0.9821167], + [0.70097554, 0.03664616], + [0.18027647, 0.60895735], + [0.10521339, 0.21897069], + [0.53019685, 0.66619033], + [0.91122276, 0.34580743], + [0.45337471, 0.78912079]]) + + def test_warning(self): + with pytest.warns(UserWarning, match=r"The balance properties of " + r"Sobol' points"): + engine = qmc.Sobol(1) + engine.random(10) + + def test_random_base2(self): + engine = qmc.Sobol(2, scramble=False) + sample = engine.random_base2(2) + assert_array_equal(self.unscramble_nd[:4], sample) + + # resampling still having N=2**n + sample = engine.random_base2(2) + assert_array_equal(self.unscramble_nd[4:8], sample) + + # resampling again but leading to N!=2**n + with pytest.raises(ValueError, match=r"The balance properties of " + r"Sobol' points"): + engine.random_base2(2) + + def test_raise(self): + with pytest.raises(ValueError, match=r"Maximum supported " + r"dimensionality"): + qmc.Sobol(qmc.Sobol.MAXDIM + 1) + + with pytest.raises(ValueError, match=r"Maximum supported " + r"'bits' is 64"): + qmc.Sobol(1, bits=65) + + def test_high_dim(self): + engine = qmc.Sobol(1111, scramble=False) + count1 = Counter(engine.random().flatten().tolist()) + count2 = Counter(engine.random().flatten().tolist()) + assert_equal(count1, Counter({0.0: 1111})) + assert_equal(count2, Counter({0.5: 1111})) + + @pytest.mark.parametrize("bits", [2, 3]) + def test_bits(self, bits): + engine = qmc.Sobol(2, scramble=False, bits=bits) + ns = 2**bits + sample = engine.random(ns) + assert_array_equal(self.unscramble_nd[:ns], sample) + + with pytest.raises(ValueError, match="increasing `bits`"): + engine.random() + + def test_64bits(self): + engine = qmc.Sobol(2, scramble=False, bits=64) + sample = engine.random(8) + assert_array_equal(self.unscramble_nd, sample) + + +class TestPoisson(QMCEngineTests): + qmce = qmc.PoissonDisk + can_scramble = False + + def test_bounds(self, *args): + pytest.skip("Too costly in memory.") + + def test_fast_forward(self, *args): + pytest.skip("Not applicable: recursive process.") + + def test_sample(self, *args): + pytest.skip("Not applicable: the value of reference sample is" + " implementation dependent.") + + def test_continuing(self, *args): + # can continue a sampling, but will not preserve the same order + # because candidates are lost, so we will not select the same center + radius = 0.05 + ns = 6 + engine = self.engine(d=2, radius=radius, scramble=False) + + sample_init = engine.random(n=ns) + assert len(sample_init) <= ns + assert l2_norm(sample_init) >= radius + + sample_continued = engine.random(n=ns) + assert len(sample_continued) <= ns + assert l2_norm(sample_continued) >= radius + + sample = np.concatenate([sample_init, sample_continued], axis=0) + assert len(sample) <= ns * 2 + assert l2_norm(sample) >= radius + + def test_mindist(self): + rng = np.random.default_rng(132074951149370773672162394161442690287) + ns = 50 + + low, high = 0.08, 0.2 + radii = (high - low) * rng.random(5) + low + + dimensions = [1, 3, 4] + hypersphere_methods = ["volume", "surface"] + + gen = product(dimensions, radii, hypersphere_methods) + + for d, radius, hypersphere in gen: + engine = self.qmce( + d=d, radius=radius, hypersphere=hypersphere, seed=rng + ) + sample = engine.random(ns) + + assert len(sample) <= ns + assert l2_norm(sample) >= radius + + def test_fill_space(self): + radius = 0.2 + engine = self.qmce(d=2, radius=radius) + + sample = engine.fill_space() + # circle packing problem is np complex + assert l2_norm(sample) >= radius + + def test_raises(self): + message = r"'toto' is not a valid hypersphere sampling" + with pytest.raises(ValueError, match=message): + qmc.PoissonDisk(1, hypersphere="toto") + + +class TestMultinomialQMC: + def test_validations(self): + # negative Ps + p = np.array([0.12, 0.26, -0.05, 0.35, 0.22]) + with pytest.raises(ValueError, match=r"Elements of pvals must " + r"be non-negative."): + qmc.MultinomialQMC(p, n_trials=10) + + # sum of P too large + p = np.array([0.12, 0.26, 0.1, 0.35, 0.22]) + message = r"Elements of pvals must sum to 1." + with pytest.raises(ValueError, match=message): + qmc.MultinomialQMC(p, n_trials=10) + + p = np.array([0.12, 0.26, 0.05, 0.35, 0.22]) + + message = r"Dimension of `engine` must be 1." + with pytest.raises(ValueError, match=message): + qmc.MultinomialQMC(p, n_trials=10, engine=qmc.Sobol(d=2)) + + message = r"`engine` must be an instance of..." + with pytest.raises(ValueError, match=message): + qmc.MultinomialQMC(p, n_trials=10, engine=np.random.default_rng()) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_MultinomialBasicDraw(self): + seed = np.random.default_rng(6955663962957011631562466584467607969) + p = np.array([0.12, 0.26, 0.05, 0.35, 0.22]) + n_trials = 100 + expected = np.atleast_2d(n_trials * p).astype(int) + engine = qmc.MultinomialQMC(p, n_trials=n_trials, seed=seed) + assert_allclose(engine.random(1), expected, atol=1) + + def test_MultinomialDistribution(self): + seed = np.random.default_rng(77797854505813727292048130876699859000) + p = np.array([0.12, 0.26, 0.05, 0.35, 0.22]) + engine = qmc.MultinomialQMC(p, n_trials=8192, seed=seed) + draws = engine.random(1) + assert_allclose(draws / np.sum(draws), np.atleast_2d(p), atol=1e-4) + + def test_FindIndex(self): + p_cumulative = np.array([0.1, 0.4, 0.45, 0.6, 0.75, 0.9, 0.99, 1.0]) + size = len(p_cumulative) + assert_equal(_test_find_index(p_cumulative, size, 0.0), 0) + assert_equal(_test_find_index(p_cumulative, size, 0.4), 2) + assert_equal(_test_find_index(p_cumulative, size, 0.44999), 2) + assert_equal(_test_find_index(p_cumulative, size, 0.45001), 3) + assert_equal(_test_find_index(p_cumulative, size, 1.0), size - 1) + + @pytest.mark.filterwarnings('ignore::UserWarning') + def test_other_engine(self): + # same as test_MultinomialBasicDraw with different engine + seed = np.random.default_rng(283753519042773243071753037669078065412) + p = np.array([0.12, 0.26, 0.05, 0.35, 0.22]) + n_trials = 100 + expected = np.atleast_2d(n_trials * p).astype(int) + base_engine = qmc.Sobol(1, scramble=True, seed=seed) + engine = qmc.MultinomialQMC(p, n_trials=n_trials, engine=base_engine, + seed=seed) + assert_allclose(engine.random(1), expected, atol=1) + + +class TestNormalQMC: + def test_NormalQMC(self): + # d = 1 + engine = qmc.MultivariateNormalQMC(mean=np.zeros(1)) + samples = engine.random() + assert_equal(samples.shape, (1, 1)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 1)) + # d = 2 + engine = qmc.MultivariateNormalQMC(mean=np.zeros(2)) + samples = engine.random() + assert_equal(samples.shape, (1, 2)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 2)) + + def test_NormalQMCInvTransform(self): + # d = 1 + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(1), inv_transform=True) + samples = engine.random() + assert_equal(samples.shape, (1, 1)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 1)) + # d = 2 + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(2), inv_transform=True) + samples = engine.random() + assert_equal(samples.shape, (1, 2)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 2)) + + def test_NormalQMCSeeded(self): + # test even dimension + seed = np.random.default_rng(274600237797326520096085022671371676017) + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(2), inv_transform=False, seed=seed) + samples = engine.random(n=2) + samples_expected = np.array([[-0.932001, -0.522923], + [-1.477655, 0.846851]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + # test odd dimension + seed = np.random.default_rng(274600237797326520096085022671371676017) + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(3), inv_transform=False, seed=seed) + samples = engine.random(n=2) + samples_expected = np.array([[-0.932001, -0.522923, 0.036578], + [-1.778011, 0.912428, -0.065421]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + # same test with another engine + seed = np.random.default_rng(274600237797326520096085022671371676017) + base_engine = qmc.Sobol(4, scramble=True, seed=seed) + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(3), inv_transform=False, + engine=base_engine, seed=seed + ) + samples = engine.random(n=2) + samples_expected = np.array([[-0.932001, -0.522923, 0.036578], + [-1.778011, 0.912428, -0.065421]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + def test_NormalQMCSeededInvTransform(self): + # test even dimension + seed = np.random.default_rng(288527772707286126646493545351112463929) + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(2), seed=seed, inv_transform=True) + samples = engine.random(n=2) + samples_expected = np.array([[-0.913237, -0.964026], + [0.255904, 0.003068]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + # test odd dimension + seed = np.random.default_rng(288527772707286126646493545351112463929) + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(3), seed=seed, inv_transform=True) + samples = engine.random(n=2) + samples_expected = np.array([[-0.913237, -0.964026, 0.355501], + [0.699261, 2.90213 , -0.6418]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + def test_other_engine(self): + for d in (0, 1, 2): + base_engine = qmc.Sobol(d=d, scramble=False) + engine = qmc.MultivariateNormalQMC(mean=np.zeros(d), + engine=base_engine, + inv_transform=True) + samples = engine.random() + assert_equal(samples.shape, (1, d)) + + def test_NormalQMCShapiro(self): + rng = np.random.default_rng(13242) + engine = qmc.MultivariateNormalQMC(mean=np.zeros(2), seed=rng) + samples = engine.random(n=256) + assert all(np.abs(samples.mean(axis=0)) < 1e-2) + assert all(np.abs(samples.std(axis=0) - 1) < 1e-2) + # perform Shapiro-Wilk test for normality + for i in (0, 1): + _, pval = shapiro(samples[:, i]) + assert pval > 0.9 + # make sure samples are uncorrelated + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1]) < 1e-2 + + def test_NormalQMCShapiroInvTransform(self): + rng = np.random.default_rng(32344554) + engine = qmc.MultivariateNormalQMC( + mean=np.zeros(2), inv_transform=True, seed=rng) + samples = engine.random(n=256) + assert all(np.abs(samples.mean(axis=0)) < 1e-2) + assert all(np.abs(samples.std(axis=0) - 1) < 1e-2) + # perform Shapiro-Wilk test for normality + for i in (0, 1): + _, pval = shapiro(samples[:, i]) + assert pval > 0.9 + # make sure samples are uncorrelated + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1]) < 1e-2 + + +class TestMultivariateNormalQMC: + + def test_validations(self): + + message = r"Dimension of `engine` must be consistent" + with pytest.raises(ValueError, match=message): + qmc.MultivariateNormalQMC([0], engine=qmc.Sobol(d=2)) + + message = r"Dimension of `engine` must be consistent" + with pytest.raises(ValueError, match=message): + qmc.MultivariateNormalQMC([0, 0, 0], engine=qmc.Sobol(d=4)) + + message = r"`engine` must be an instance of..." + with pytest.raises(ValueError, match=message): + qmc.MultivariateNormalQMC([0, 0], engine=np.random.default_rng()) + + message = r"Covariance matrix not PSD." + with pytest.raises(ValueError, match=message): + qmc.MultivariateNormalQMC([0, 0], [[1, 2], [2, 1]]) + + message = r"Covariance matrix is not symmetric." + with pytest.raises(ValueError, match=message): + qmc.MultivariateNormalQMC([0, 0], [[1, 0], [2, 1]]) + + message = r"Dimension mismatch between mean and covariance." + with pytest.raises(ValueError, match=message): + qmc.MultivariateNormalQMC([0], [[1, 0], [0, 1]]) + + def test_MultivariateNormalQMCNonPD(self): + # try with non-pd but psd cov; should work + engine = qmc.MultivariateNormalQMC( + [0, 0, 0], [[1, 0, 1], [0, 1, 1], [1, 1, 2]], + ) + assert engine._corr_matrix is not None + + def test_MultivariateNormalQMC(self): + # d = 1 scalar + engine = qmc.MultivariateNormalQMC(mean=0, cov=5) + samples = engine.random() + assert_equal(samples.shape, (1, 1)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 1)) + + # d = 2 list + engine = qmc.MultivariateNormalQMC(mean=[0, 1], cov=[[1, 0], [0, 1]]) + samples = engine.random() + assert_equal(samples.shape, (1, 2)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 2)) + + # d = 3 np.array + mean = np.array([0, 1, 2]) + cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + engine = qmc.MultivariateNormalQMC(mean, cov) + samples = engine.random() + assert_equal(samples.shape, (1, 3)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 3)) + + def test_MultivariateNormalQMCInvTransform(self): + # d = 1 scalar + engine = qmc.MultivariateNormalQMC(mean=0, cov=5, inv_transform=True) + samples = engine.random() + assert_equal(samples.shape, (1, 1)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 1)) + + # d = 2 list + engine = qmc.MultivariateNormalQMC( + mean=[0, 1], cov=[[1, 0], [0, 1]], inv_transform=True, + ) + samples = engine.random() + assert_equal(samples.shape, (1, 2)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 2)) + + # d = 3 np.array + mean = np.array([0, 1, 2]) + cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + engine = qmc.MultivariateNormalQMC(mean, cov, inv_transform=True) + samples = engine.random() + assert_equal(samples.shape, (1, 3)) + samples = engine.random(n=5) + assert_equal(samples.shape, (5, 3)) + + def test_MultivariateNormalQMCSeeded(self): + # test even dimension + rng = np.random.default_rng(180182791534511062935571481899241825000) + a = rng.standard_normal((2, 2)) + A = a @ a.transpose() + np.diag(rng.random(2)) + engine = qmc.MultivariateNormalQMC(np.array([0, 0]), A, + inv_transform=False, seed=rng) + samples = engine.random(n=2) + samples_expected = np.array([[-0.64419, -0.882413], + [0.837199, 2.045301]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + # test odd dimension + rng = np.random.default_rng(180182791534511062935571481899241825000) + a = rng.standard_normal((3, 3)) + A = a @ a.transpose() + np.diag(rng.random(3)) + engine = qmc.MultivariateNormalQMC(np.array([0, 0, 0]), A, + inv_transform=False, seed=rng) + samples = engine.random(n=2) + samples_expected = np.array([[-0.693853, -1.265338, -0.088024], + [1.620193, 2.679222, 0.457343]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + def test_MultivariateNormalQMCSeededInvTransform(self): + # test even dimension + rng = np.random.default_rng(224125808928297329711992996940871155974) + a = rng.standard_normal((2, 2)) + A = a @ a.transpose() + np.diag(rng.random(2)) + engine = qmc.MultivariateNormalQMC( + np.array([0, 0]), A, seed=rng, inv_transform=True + ) + samples = engine.random(n=2) + samples_expected = np.array([[0.682171, -3.114233], + [-0.098463, 0.668069]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + # test odd dimension + rng = np.random.default_rng(224125808928297329711992996940871155974) + a = rng.standard_normal((3, 3)) + A = a @ a.transpose() + np.diag(rng.random(3)) + engine = qmc.MultivariateNormalQMC( + np.array([0, 0, 0]), A, seed=rng, inv_transform=True + ) + samples = engine.random(n=2) + samples_expected = np.array([[0.988061, -1.644089, -0.877035], + [-1.771731, 1.096988, 2.024744]]) + assert_allclose(samples, samples_expected, atol=1e-4) + + def test_MultivariateNormalQMCShapiro(self): + # test the standard case + seed = np.random.default_rng(188960007281846377164494575845971640) + engine = qmc.MultivariateNormalQMC( + mean=[0, 0], cov=[[1, 0], [0, 1]], seed=seed + ) + samples = engine.random(n=256) + assert all(np.abs(samples.mean(axis=0)) < 1e-2) + assert all(np.abs(samples.std(axis=0) - 1) < 1e-2) + # perform Shapiro-Wilk test for normality + for i in (0, 1): + _, pval = shapiro(samples[:, i]) + assert pval > 0.9 + # make sure samples are uncorrelated + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1]) < 1e-2 + + # test the correlated, non-zero mean case + engine = qmc.MultivariateNormalQMC( + mean=[1.0, 2.0], cov=[[1.5, 0.5], [0.5, 1.5]], seed=seed + ) + samples = engine.random(n=256) + assert all(np.abs(samples.mean(axis=0) - [1, 2]) < 1e-2) + assert all(np.abs(samples.std(axis=0) - np.sqrt(1.5)) < 1e-2) + # perform Shapiro-Wilk test for normality + for i in (0, 1): + _, pval = shapiro(samples[:, i]) + assert pval > 0.9 + # check covariance + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1] - 0.5) < 1e-2 + + def test_MultivariateNormalQMCShapiroInvTransform(self): + # test the standard case + seed = np.random.default_rng(200089821034563288698994840831440331329) + engine = qmc.MultivariateNormalQMC( + mean=[0, 0], cov=[[1, 0], [0, 1]], seed=seed, inv_transform=True + ) + samples = engine.random(n=256) + assert all(np.abs(samples.mean(axis=0)) < 1e-2) + assert all(np.abs(samples.std(axis=0) - 1) < 1e-2) + # perform Shapiro-Wilk test for normality + for i in (0, 1): + _, pval = shapiro(samples[:, i]) + assert pval > 0.9 + # make sure samples are uncorrelated + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1]) < 1e-2 + + # test the correlated, non-zero mean case + engine = qmc.MultivariateNormalQMC( + mean=[1.0, 2.0], + cov=[[1.5, 0.5], [0.5, 1.5]], + seed=seed, + inv_transform=True, + ) + samples = engine.random(n=256) + assert all(np.abs(samples.mean(axis=0) - [1, 2]) < 1e-2) + assert all(np.abs(samples.std(axis=0) - np.sqrt(1.5)) < 1e-2) + # perform Shapiro-Wilk test for normality + for i in (0, 1): + _, pval = shapiro(samples[:, i]) + assert pval > 0.9 + # check covariance + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1] - 0.5) < 1e-2 + + def test_MultivariateNormalQMCDegenerate(self): + # X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z) + seed = np.random.default_rng(16320637417581448357869821654290448620) + engine = qmc.MultivariateNormalQMC( + mean=[0.0, 0.0, 0.0], + cov=[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 2.0]], + seed=seed, + ) + samples = engine.random(n=512) + assert all(np.abs(samples.mean(axis=0)) < 1e-2) + assert np.abs(np.std(samples[:, 0]) - 1) < 1e-2 + assert np.abs(np.std(samples[:, 1]) - 1) < 1e-2 + assert np.abs(np.std(samples[:, 2]) - np.sqrt(2)) < 1e-2 + for i in (0, 1, 2): + _, pval = shapiro(samples[:, i]) + assert pval > 0.8 + cov = np.cov(samples.transpose()) + assert np.abs(cov[0, 1]) < 1e-2 + assert np.abs(cov[0, 2] - 1) < 1e-2 + # check to see if X + Y = Z almost exactly + assert all(np.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) + < 1e-5) + + +class TestLloyd: + def test_lloyd(self): + # quite sensible seed as it can go up before going further down + rng = np.random.RandomState(1809831) + sample = rng.uniform(0, 1, size=(128, 2)) + base_l1 = _l1_norm(sample) + base_l2 = l2_norm(sample) + + for _ in range(4): + sample_lloyd = _lloyd_centroidal_voronoi_tessellation( + sample, maxiter=1, + ) + curr_l1 = _l1_norm(sample_lloyd) + curr_l2 = l2_norm(sample_lloyd) + + # higher is better for the distance measures + assert base_l1 < curr_l1 + assert base_l2 < curr_l2 + + base_l1 = curr_l1 + base_l2 = curr_l2 + + sample = sample_lloyd + + def test_lloyd_non_mutating(self): + """ + Verify that the input samples are not mutated in place and that they do + not share memory with the output. + """ + sample_orig = np.array([[0.1, 0.1], + [0.1, 0.2], + [0.2, 0.1], + [0.2, 0.2]]) + sample_copy = sample_orig.copy() + new_sample = _lloyd_centroidal_voronoi_tessellation( + sample=sample_orig + ) + assert_allclose(sample_orig, sample_copy) + assert not np.may_share_memory(sample_orig, new_sample) + + def test_lloyd_errors(self): + with pytest.raises(ValueError, match=r"`sample` is not a 2D array"): + sample = [0, 1, 0.5] + _lloyd_centroidal_voronoi_tessellation(sample) + + msg = r"`sample` dimension is not >= 2" + with pytest.raises(ValueError, match=msg): + sample = [[0], [0.4], [1]] + _lloyd_centroidal_voronoi_tessellation(sample) + + msg = r"`sample` is not in unit hypercube" + with pytest.raises(ValueError, match=msg): + sample = [[-1.1, 0], [0.1, 0.4], [1, 2]] + _lloyd_centroidal_voronoi_tessellation(sample) + + +# mindist +def l2_norm(sample): + return distance.pdist(sample).min() diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_rank.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_rank.py new file mode 100644 index 0000000000000000000000000000000000000000..2d65902425fd2ac808299fe3ef4bbb8c05b260c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_rank.py @@ -0,0 +1,336 @@ +import numpy as np +from numpy.testing import assert_equal, assert_array_equal +import pytest + +from scipy.stats import rankdata, tiecorrect +from scipy._lib._util import np_long + + +class TestTieCorrect: + + def test_empty(self): + """An empty array requires no correction, should return 1.0.""" + ranks = np.array([], dtype=np.float64) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + + def test_one(self): + """A single element requires no correction, should return 1.0.""" + ranks = np.array([1.0], dtype=np.float64) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + + def test_no_correction(self): + """Arrays with no ties require no correction.""" + ranks = np.arange(2.0) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + ranks = np.arange(3.0) + c = tiecorrect(ranks) + assert_equal(c, 1.0) + + def test_basic(self): + """Check a few basic examples of the tie correction factor.""" + # One tie of two elements + ranks = np.array([1.0, 2.5, 2.5]) + c = tiecorrect(ranks) + T = 2.0 + N = ranks.size + expected = 1.0 - (T**3 - T) / (N**3 - N) + assert_equal(c, expected) + + # One tie of two elements (same as above, but tie is not at the end) + ranks = np.array([1.5, 1.5, 3.0]) + c = tiecorrect(ranks) + T = 2.0 + N = ranks.size + expected = 1.0 - (T**3 - T) / (N**3 - N) + assert_equal(c, expected) + + # One tie of three elements + ranks = np.array([1.0, 3.0, 3.0, 3.0]) + c = tiecorrect(ranks) + T = 3.0 + N = ranks.size + expected = 1.0 - (T**3 - T) / (N**3 - N) + assert_equal(c, expected) + + # Two ties, lengths 2 and 3. + ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0]) + c = tiecorrect(ranks) + T1 = 2.0 + T2 = 3.0 + N = ranks.size + expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N) + assert_equal(c, expected) + + def test_overflow(self): + ntie, k = 2000, 5 + a = np.repeat(np.arange(k), ntie) + n = a.size # ntie * k + out = tiecorrect(rankdata(a)) + assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n)) + + +class TestRankData: + + def test_empty(self): + """stats.rankdata([]) should return an empty array.""" + a = np.array([], dtype=int) + r = rankdata(a) + assert_array_equal(r, np.array([], dtype=np.float64)) + r = rankdata([]) + assert_array_equal(r, np.array([], dtype=np.float64)) + + @pytest.mark.parametrize("shape", [(0, 1, 2)]) + @pytest.mark.parametrize("axis", [None, *range(3)]) + def test_empty_multidim(self, shape, axis): + a = np.empty(shape, dtype=int) + r = rankdata(a, axis=axis) + expected_shape = (0,) if axis is None else shape + assert_equal(r.shape, expected_shape) + assert_equal(r.dtype, np.float64) + + def test_one(self): + """Check stats.rankdata with an array of length 1.""" + data = [100] + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, np.array([1.0], dtype=np.float64)) + r = rankdata(data) + assert_array_equal(r, np.array([1.0], dtype=np.float64)) + + def test_basic(self): + """Basic tests of stats.rankdata.""" + data = [100, 10, 50] + expected = np.array([3.0, 1.0, 2.0], dtype=np.float64) + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, expected) + r = rankdata(data) + assert_array_equal(r, expected) + + data = [40, 10, 30, 10, 50] + expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64) + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, expected) + r = rankdata(data) + assert_array_equal(r, expected) + + data = [20, 20, 20, 10, 10, 10] + expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64) + a = np.array(data, dtype=int) + r = rankdata(a) + assert_array_equal(r, expected) + r = rankdata(data) + assert_array_equal(r, expected) + # The docstring states explicitly that the argument is flattened. + a2d = a.reshape(2, 3) + r = rankdata(a2d) + assert_array_equal(r, expected) + + def test_rankdata_object_string(self): + + def min_rank(a): + return [1 + sum(i < j for i in a) for j in a] + + def max_rank(a): + return [sum(i <= j for i in a) for j in a] + + def ordinal_rank(a): + return min_rank([(x, i) for i, x in enumerate(a)]) + + def average_rank(a): + return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))] + + def dense_rank(a): + b = np.unique(a) + return [1 + sum(i < j for i in b) for j in a] + + rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank, + average=average_rank, dense=dense_rank) + + def check_ranks(a): + for method in 'min', 'max', 'dense', 'ordinal', 'average': + out = rankdata(a, method=method) + assert_array_equal(out, rankf[method](a)) + + val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz'] + check_ranks(np.random.choice(val, 200)) + check_ranks(np.random.choice(val, 200).astype('object')) + + val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object') + check_ranks(np.random.choice(val, 200).astype('object')) + + def test_large_int(self): + data = np.array([2**60, 2**60+1], dtype=np.uint64) + r = rankdata(data) + assert_array_equal(r, [1.0, 2.0]) + + data = np.array([2**60, 2**60+1], dtype=np.int64) + r = rankdata(data) + assert_array_equal(r, [1.0, 2.0]) + + data = np.array([2**60, -2**60+1], dtype=np.int64) + r = rankdata(data) + assert_array_equal(r, [2.0, 1.0]) + + def test_big_tie(self): + for n in [10000, 100000, 1000000]: + data = np.ones(n, dtype=int) + r = rankdata(data) + expected_rank = 0.5 * (n + 1) + assert_array_equal(r, expected_rank * data, + "test failed with n=%d" % n) + + def test_axis(self): + data = [[0, 2, 1], + [4, 2, 2]] + expected0 = [[1., 1.5, 1.], + [2., 1.5, 2.]] + r0 = rankdata(data, axis=0) + assert_array_equal(r0, expected0) + expected1 = [[1., 3., 2.], + [3., 1.5, 1.5]] + r1 = rankdata(data, axis=1) + assert_array_equal(r1, expected1) + + methods = ["average", "min", "max", "dense", "ordinal"] + dtypes = [np.float64] + [np_long]*4 + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("method, dtype", zip(methods, dtypes)) + def test_size_0_axis(self, axis, method, dtype): + shape = (3, 0) + data = np.zeros(shape) + r = rankdata(data, method=method, axis=axis) + assert_equal(r.shape, shape) + assert_equal(r.dtype, dtype) + + @pytest.mark.parametrize('axis', range(3)) + @pytest.mark.parametrize('method', methods) + def test_nan_policy_omit_3d(self, axis, method): + shape = (20, 21, 22) + rng = np.random.RandomState(23983242) + + a = rng.random(size=shape) + i = rng.random(size=shape) < 0.4 + j = rng.random(size=shape) < 0.1 + k = rng.random(size=shape) < 0.1 + a[i] = np.nan + a[j] = -np.inf + a[k] - np.inf + + def rank_1d_omit(a, method): + out = np.zeros_like(a) + i = np.isnan(a) + a_compressed = a[~i] + res = rankdata(a_compressed, method) + out[~i] = res + out[i] = np.nan + return out + + def rank_omit(a, method, axis): + return np.apply_along_axis(lambda a: rank_1d_omit(a, method), + axis, a) + + res = rankdata(a, method, axis=axis, nan_policy='omit') + res0 = rank_omit(a, method, axis=axis) + + assert_array_equal(res, res0) + + def test_nan_policy_2d_axis_none(self): + # 2 2d-array test with axis=None + data = [[0, np.nan, 3], + [4, 2, np.nan], + [1, 2, 2]] + assert_array_equal(rankdata(data, axis=None, nan_policy='omit'), + [1., np.nan, 6., 7., 4., np.nan, 2., 4., 4.]) + assert_array_equal(rankdata(data, axis=None, nan_policy='propagate'), + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, + np.nan, np.nan, np.nan]) + + def test_nan_policy_raise(self): + # 1 1d-array test + data = [0, 2, 3, -2, np.nan, np.nan] + with pytest.raises(ValueError, match="The input contains nan"): + rankdata(data, nan_policy='raise') + + # 2 2d-array test + data = [[0, np.nan, 3], + [4, 2, np.nan], + [np.nan, 2, 2]] + + with pytest.raises(ValueError, match="The input contains nan"): + rankdata(data, axis=0, nan_policy="raise") + + with pytest.raises(ValueError, match="The input contains nan"): + rankdata(data, axis=1, nan_policy="raise") + + def test_nan_policy_propagate(self): + # 1 1d-array test + data = [0, 2, 3, -2, np.nan, np.nan] + assert_array_equal(rankdata(data, nan_policy='propagate'), + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]) + + # 2 2d-array test + data = [[0, np.nan, 3], + [4, 2, np.nan], + [1, 2, 2]] + assert_array_equal(rankdata(data, axis=0, nan_policy='propagate'), + [[1, np.nan, np.nan], + [3, np.nan, np.nan], + [2, np.nan, np.nan]]) + assert_array_equal(rankdata(data, axis=1, nan_policy='propagate'), + [[np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan], + [1, 2.5, 2.5]]) + + +_cases = ( + # values, method, expected + ([], 'average', []), + ([], 'min', []), + ([], 'max', []), + ([], 'dense', []), + ([], 'ordinal', []), + # + ([100], 'average', [1.0]), + ([100], 'min', [1.0]), + ([100], 'max', [1.0]), + ([100], 'dense', [1.0]), + ([100], 'ordinal', [1.0]), + # + ([100, 100, 100], 'average', [2.0, 2.0, 2.0]), + ([100, 100, 100], 'min', [1.0, 1.0, 1.0]), + ([100, 100, 100], 'max', [3.0, 3.0, 3.0]), + ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]), + ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]), + # + ([100, 300, 200], 'average', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'min', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'max', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]), + ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]), + # + ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]), + ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]), + ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]), + ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]), + ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]), + # + ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]), + ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]), + ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]), + ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]), + ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]), + # + ([10] * 30, 'ordinal', np.arange(1.0, 31.0)), +) + + +def test_cases(): + for values, method, expected in _cases: + r = rankdata(values, method=method) + assert_array_equal(r, expected) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_relative_risk.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_relative_risk.py new file mode 100644 index 0000000000000000000000000000000000000000..b75e64d929f319465b1f1d62af4fb2096c2ab2ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_relative_risk.py @@ -0,0 +1,95 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose, assert_equal +from scipy.stats.contingency import relative_risk + + +# Test just the calculation of the relative risk, including edge +# cases that result in a relative risk of 0, inf or nan. +@pytest.mark.parametrize( + 'exposed_cases, exposed_total, control_cases, control_total, expected_rr', + [(1, 4, 3, 8, 0.25 / 0.375), + (0, 10, 5, 20, 0), + (0, 10, 0, 20, np.nan), + (5, 15, 0, 20, np.inf)] +) +def test_relative_risk(exposed_cases, exposed_total, + control_cases, control_total, expected_rr): + result = relative_risk(exposed_cases, exposed_total, + control_cases, control_total) + assert_allclose(result.relative_risk, expected_rr, rtol=1e-13) + + +def test_relative_risk_confidence_interval(): + result = relative_risk(exposed_cases=16, exposed_total=128, + control_cases=24, control_total=256) + rr = result.relative_risk + ci = result.confidence_interval(confidence_level=0.95) + # The corresponding calculation in R using the epitools package. + # + # > library(epitools) + # > c <- matrix(c(232, 112, 24, 16), nrow=2) + # > result <- riskratio(c) + # > result$measure + # risk ratio with 95% C.I. + # Predictor estimate lower upper + # Exposed1 1.000000 NA NA + # Exposed2 1.333333 0.7347317 2.419628 + # + # The last line is the result that we want. + assert_allclose(rr, 4/3) + assert_allclose((ci.low, ci.high), (0.7347317, 2.419628), rtol=5e-7) + + +def test_relative_risk_ci_conflevel0(): + result = relative_risk(exposed_cases=4, exposed_total=12, + control_cases=5, control_total=30) + rr = result.relative_risk + assert_allclose(rr, 2.0, rtol=1e-14) + ci = result.confidence_interval(0) + assert_allclose((ci.low, ci.high), (2.0, 2.0), rtol=1e-12) + + +def test_relative_risk_ci_conflevel1(): + result = relative_risk(exposed_cases=4, exposed_total=12, + control_cases=5, control_total=30) + ci = result.confidence_interval(1) + assert_equal((ci.low, ci.high), (0, np.inf)) + + +def test_relative_risk_ci_edge_cases_00(): + result = relative_risk(exposed_cases=0, exposed_total=12, + control_cases=0, control_total=30) + assert_equal(result.relative_risk, np.nan) + ci = result.confidence_interval() + assert_equal((ci.low, ci.high), (np.nan, np.nan)) + + +def test_relative_risk_ci_edge_cases_01(): + result = relative_risk(exposed_cases=0, exposed_total=12, + control_cases=1, control_total=30) + assert_equal(result.relative_risk, 0) + ci = result.confidence_interval() + assert_equal((ci.low, ci.high), (0.0, np.nan)) + + +def test_relative_risk_ci_edge_cases_10(): + result = relative_risk(exposed_cases=1, exposed_total=12, + control_cases=0, control_total=30) + assert_equal(result.relative_risk, np.inf) + ci = result.confidence_interval() + assert_equal((ci.low, ci.high), (np.nan, np.inf)) + + +@pytest.mark.parametrize('ec, et, cc, ct', [(0, 0, 10, 20), + (-1, 10, 1, 5), + (1, 10, 0, 0), + (1, 10, -1, 4)]) +def test_relative_risk_bad_value(ec, et, cc, ct): + with pytest.raises(ValueError, match="must be an integer not less than"): + relative_risk(ec, et, cc, ct) + + +def test_relative_risk_bad_type(): + with pytest.raises(TypeError, match="must be an integer"): + relative_risk(1, 10, 2.0, 40) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_resampling.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb216b61495272bdceca31ea1b3b804a26437ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_resampling.py @@ -0,0 +1,1748 @@ +import numpy as np +import pytest +from scipy.stats import bootstrap, monte_carlo_test, permutation_test +from numpy.testing import assert_allclose, assert_equal, suppress_warnings +from scipy import stats +from scipy import special +from .. import _resampling as _resampling +from scipy._lib._util import rng_integers +from scipy.optimize import root + + +def test_bootstrap_iv(): + + message = "`data` must be a sequence of samples." + with pytest.raises(ValueError, match=message): + bootstrap(1, np.mean) + + message = "`data` must contain at least one sample." + with pytest.raises(ValueError, match=message): + bootstrap(tuple(), np.mean) + + message = "each sample in `data` must contain two or more observations..." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3], [1]), np.mean) + + message = ("When `paired is True`, all samples must have the same length ") + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True) + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + bootstrap(1, np.mean, vectorized='ekki') + + message = "`axis` must be an integer." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, axis=1.5) + + message = "could not convert string to float" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, confidence_level='ni') + + message = "`n_resamples` must be a non-negative integer." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000) + + message = "`n_resamples` must be a non-negative integer." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, batch=-1000) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, batch=1000.5) + + message = "`method` must be in" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, method='ekki') + + message = "`bootstrap_result` must have attribute `bootstrap_distribution'" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, bootstrap_result=10) + + message = "Either `bootstrap_result.bootstrap_distribution.size`" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, n_resamples=0) + + message = "'herring' cannot be used to seed a" + with pytest.raises(ValueError, match=message): + bootstrap(([1, 2, 3],), np.mean, random_state='herring') + + +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_bootstrap_batch(method, axis): + # for one-sample statistics, batch size shouldn't affect the result + np.random.seed(0) + + x = np.random.rand(10, 11, 12) + res1 = bootstrap((x,), np.mean, batch=None, method=method, + random_state=0, axis=axis, n_resamples=100) + res2 = bootstrap((x,), np.mean, batch=10, method=method, + random_state=0, axis=axis, n_resamples=100) + + assert_equal(res2.confidence_interval.low, res1.confidence_interval.low) + assert_equal(res2.confidence_interval.high, res1.confidence_interval.high) + assert_equal(res2.standard_error, res1.standard_error) + + +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +def test_bootstrap_paired(method): + # test that `paired` works as expected + np.random.seed(0) + n = 100 + x = np.random.rand(n) + y = np.random.rand(n) + + def my_statistic(x, y, axis=-1): + return ((x-y)**2).mean(axis=axis) + + def my_paired_statistic(i, axis=-1): + a = x[i] + b = y[i] + res = my_statistic(a, b) + return res + + i = np.arange(len(x)) + + res1 = bootstrap((i,), my_paired_statistic, random_state=0) + res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0) + + assert_allclose(res1.confidence_interval, res2.confidence_interval) + assert_allclose(res1.standard_error, res2.standard_error) + + +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +@pytest.mark.parametrize("axis", [0, 1, 2]) +@pytest.mark.parametrize("paired", [True, False]) +def test_bootstrap_vectorized(method, axis, paired): + # test that paired is vectorized as expected: when samples are tiled, + # CI and standard_error of each axis-slice is the same as those of the + # original 1d sample + + np.random.seed(0) + + def my_statistic(x, y, z, axis=-1): + return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis) + + shape = 10, 11, 12 + n_samples = shape[axis] + + x = np.random.rand(n_samples) + y = np.random.rand(n_samples) + z = np.random.rand(n_samples) + res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method, + random_state=0, axis=0, n_resamples=100) + assert (res1.bootstrap_distribution.shape + == res1.standard_error.shape + (100,)) + + reshape = [1, 1, 1] + reshape[axis] = n_samples + x = np.broadcast_to(x.reshape(reshape), shape) + y = np.broadcast_to(y.reshape(reshape), shape) + z = np.broadcast_to(z.reshape(reshape), shape) + res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method, + random_state=0, axis=axis, n_resamples=100) + + assert_allclose(res2.confidence_interval.low, + res1.confidence_interval.low) + assert_allclose(res2.confidence_interval.high, + res1.confidence_interval.high) + assert_allclose(res2.standard_error, res1.standard_error) + + result_shape = list(shape) + result_shape.pop(axis) + + assert_equal(res2.confidence_interval.low.shape, result_shape) + assert_equal(res2.confidence_interval.high.shape, result_shape) + assert_equal(res2.standard_error.shape, result_shape) + + +@pytest.mark.xfail_on_32bit("MemoryError with BCa observed in CI") +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +def test_bootstrap_against_theory(method): + # based on https://www.statology.org/confidence-intervals-python/ + rng = np.random.default_rng(2442101192988600726) + data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=rng) + alpha = 0.95 + dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data)) + expected_interval = dist.interval(confidence=alpha) + expected_se = dist.std() + + config = dict(data=(data,), statistic=np.mean, n_resamples=5000, + method=method, random_state=rng) + res = bootstrap(**config, confidence_level=alpha) + assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4) + assert_allclose(res.standard_error, expected_se, atol=3e-4) + + config.update(dict(n_resamples=0, bootstrap_result=res)) + res = bootstrap(**config, confidence_level=alpha, alternative='less') + assert_allclose(res.confidence_interval.high, dist.ppf(alpha), rtol=5e-4) + + config.update(dict(n_resamples=0, bootstrap_result=res)) + res = bootstrap(**config, confidence_level=alpha, alternative='greater') + assert_allclose(res.confidence_interval.low, dist.ppf(1-alpha), rtol=5e-4) + + +tests_R = {"basic": (23.77, 79.12), + "percentile": (28.86, 84.21), + "BCa": (32.31, 91.43)} + + +@pytest.mark.parametrize("method, expected", tests_R.items()) +def test_bootstrap_against_R(method, expected): + # Compare against R's "boot" library + # library(boot) + + # stat <- function (x, a) { + # mean(x[a]) + # } + + # x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22, + # 23, 34, 50, 81, 89, 121, 134, 213) + + # # Use a large value so we get a few significant digits for the CI. + # n = 1000000 + # bootresult = boot(x, stat, n) + # result <- boot.ci(bootresult) + # print(result) + x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22, + 23, 34, 50, 81, 89, 121, 134, 213]) + res = bootstrap((x,), np.mean, n_resamples=1000000, method=method, + random_state=0) + assert_allclose(res.confidence_interval, expected, rtol=0.005) + + +tests_against_itself_1samp = {"basic": 1780, + "percentile": 1784, + "BCa": 1784} + + +def test_multisample_BCa_against_R(): + # Because bootstrap is stochastic, it's tricky to test against reference + # behavior. Here, we show that SciPy's BCa CI matches R wboot's BCa CI + # much more closely than the other SciPy CIs do. + + # arbitrary skewed data + x = [0.75859206, 0.5910282, -0.4419409, -0.36654601, + 0.34955357, -1.38835871, 0.76735821] + y = [1.41186073, 0.49775975, 0.08275588, 0.24086388, + 0.03567057, 0.52024419, 0.31966611, 1.32067634] + + # a multi-sample statistic for which the BCa CI tends to be different + # from the other CIs + def statistic(x, y, axis): + s1 = stats.skew(x, axis=axis) + s2 = stats.skew(y, axis=axis) + return s1 - s2 + + # compute confidence intervals using each method + rng = np.random.default_rng(468865032284792692) + + res_basic = stats.bootstrap((x, y), statistic, method='basic', + batch=100, random_state=rng) + res_percent = stats.bootstrap((x, y), statistic, method='percentile', + batch=100, random_state=rng) + res_bca = stats.bootstrap((x, y), statistic, method='bca', + batch=100, random_state=rng) + + # compute midpoints so we can compare just one number for each + mid_basic = np.mean(res_basic.confidence_interval) + mid_percent = np.mean(res_percent.confidence_interval) + mid_bca = np.mean(res_bca.confidence_interval) + + # reference for BCA CI computed using R wboot package: + # library(wBoot) + # library(moments) + + # x = c(0.75859206, 0.5910282, -0.4419409, -0.36654601, + # 0.34955357, -1.38835871, 0.76735821) + # y = c(1.41186073, 0.49775975, 0.08275588, 0.24086388, + # 0.03567057, 0.52024419, 0.31966611, 1.32067634) + + # twoskew <- function(x1, y1) {skewness(x1) - skewness(y1)} + # boot.two.bca(x, y, skewness, conf.level = 0.95, + # R = 9999, stacked = FALSE) + mid_wboot = -1.5519 + + # compute percent difference relative to wboot BCA method + diff_basic = (mid_basic - mid_wboot)/abs(mid_wboot) + diff_percent = (mid_percent - mid_wboot)/abs(mid_wboot) + diff_bca = (mid_bca - mid_wboot)/abs(mid_wboot) + + # SciPy's BCa CI midpoint is much closer than that of the other methods + assert diff_basic < -0.15 + assert diff_percent > 0.15 + assert abs(diff_bca) < 0.03 + + +def test_BCa_acceleration_against_reference(): + # Compare the (deterministic) acceleration parameter for a multi-sample + # problem against a reference value. The example is from [1], but Efron's + # value seems inaccurate. Straightorward code for computing the + # reference acceleration (0.011008228344026734) is available at: + # https://github.com/scipy/scipy/pull/16455#issuecomment-1193400981 + + y = np.array([10, 27, 31, 40, 46, 50, 52, 104, 146]) + z = np.array([16, 23, 38, 94, 99, 141, 197]) + + def statistic(z, y, axis=0): + return np.mean(z, axis=axis) - np.mean(y, axis=axis) + + data = [z, y] + res = stats.bootstrap(data, statistic) + + axis = -1 + alpha = 0.95 + theta_hat_b = res.bootstrap_distribution + batch = 100 + _, _, a_hat = _resampling._bca_interval(data, statistic, axis, alpha, + theta_hat_b, batch) + assert_allclose(a_hat, 0.011008228344026734) + + +@pytest.mark.parametrize("method, expected", + tests_against_itself_1samp.items()) +def test_bootstrap_against_itself_1samp(method, expected): + # The expected values in this test were generated using bootstrap + # to check for unintended changes in behavior. The test also makes sure + # that bootstrap works with multi-sample statistics and that the + # `axis` argument works as expected / function is vectorized. + np.random.seed(0) + + n = 100 # size of sample + n_resamples = 999 # number of bootstrap resamples used to form each CI + confidence_level = 0.9 + + # The true mean is 5 + dist = stats.norm(loc=5, scale=1) + stat_true = dist.mean() + + # Do the same thing 2000 times. (The code is fully vectorized.) + n_replications = 2000 + data = dist.rvs(size=(n_replications, n)) + res = bootstrap((data,), + statistic=np.mean, + confidence_level=confidence_level, + n_resamples=n_resamples, + batch=50, + method=method, + axis=-1) + ci = res.confidence_interval + + # ci contains vectors of lower and upper confidence interval bounds + ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1])) + assert ci_contains_true == expected + + # ci_contains_true is not inconsistent with confidence_level + pvalue = stats.binomtest(ci_contains_true, n_replications, + confidence_level).pvalue + assert pvalue > 0.1 + + +tests_against_itself_2samp = {"basic": 892, + "percentile": 890} + + +@pytest.mark.parametrize("method, expected", + tests_against_itself_2samp.items()) +def test_bootstrap_against_itself_2samp(method, expected): + # The expected values in this test were generated using bootstrap + # to check for unintended changes in behavior. The test also makes sure + # that bootstrap works with multi-sample statistics and that the + # `axis` argument works as expected / function is vectorized. + np.random.seed(0) + + n1 = 100 # size of sample 1 + n2 = 120 # size of sample 2 + n_resamples = 999 # number of bootstrap resamples used to form each CI + confidence_level = 0.9 + + # The statistic we're interested in is the difference in means + def my_stat(data1, data2, axis=-1): + mean1 = np.mean(data1, axis=axis) + mean2 = np.mean(data2, axis=axis) + return mean1 - mean2 + + # The true difference in the means is -0.1 + dist1 = stats.norm(loc=0, scale=1) + dist2 = stats.norm(loc=0.1, scale=1) + stat_true = dist1.mean() - dist2.mean() + + # Do the same thing 1000 times. (The code is fully vectorized.) + n_replications = 1000 + data1 = dist1.rvs(size=(n_replications, n1)) + data2 = dist2.rvs(size=(n_replications, n2)) + res = bootstrap((data1, data2), + statistic=my_stat, + confidence_level=confidence_level, + n_resamples=n_resamples, + batch=50, + method=method, + axis=-1) + ci = res.confidence_interval + + # ci contains vectors of lower and upper confidence interval bounds + ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1])) + assert ci_contains_true == expected + + # ci_contains_true is not inconsistent with confidence_level + pvalue = stats.binomtest(ci_contains_true, n_replications, + confidence_level).pvalue + assert pvalue > 0.1 + + +@pytest.mark.parametrize("method", ["basic", "percentile"]) +@pytest.mark.parametrize("axis", [0, 1]) +def test_bootstrap_vectorized_3samp(method, axis): + def statistic(*data, axis=0): + # an arbitrary, vectorized statistic + return sum(sample.mean(axis) for sample in data) + + def statistic_1d(*data): + # the same statistic, not vectorized + for sample in data: + assert sample.ndim == 1 + return statistic(*data, axis=0) + + np.random.seed(0) + x = np.random.rand(4, 5) + y = np.random.rand(4, 5) + z = np.random.rand(4, 5) + res1 = bootstrap((x, y, z), statistic, vectorized=True, + axis=axis, n_resamples=100, method=method, random_state=0) + res2 = bootstrap((x, y, z), statistic_1d, vectorized=False, + axis=axis, n_resamples=100, method=method, random_state=0) + assert_allclose(res1.confidence_interval, res2.confidence_interval) + assert_allclose(res1.standard_error, res2.standard_error) + + +@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107") +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +@pytest.mark.parametrize("axis", [0, 1]) +def test_bootstrap_vectorized_1samp(method, axis): + def statistic(x, axis=0): + # an arbitrary, vectorized statistic + return x.mean(axis=axis) + + def statistic_1d(x): + # the same statistic, not vectorized + assert x.ndim == 1 + return statistic(x, axis=0) + + np.random.seed(0) + x = np.random.rand(4, 5) + res1 = bootstrap((x,), statistic, vectorized=True, axis=axis, + n_resamples=100, batch=None, method=method, + random_state=0) + res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis, + n_resamples=100, batch=10, method=method, + random_state=0) + assert_allclose(res1.confidence_interval, res2.confidence_interval) + assert_allclose(res1.standard_error, res2.standard_error) + + +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +def test_bootstrap_degenerate(method): + data = 35 * [10000.] + if method == "BCa": + with np.errstate(invalid='ignore'): + msg = "The BCa confidence interval cannot be calculated" + with pytest.warns(stats.DegenerateDataWarning, match=msg): + res = bootstrap([data, ], np.mean, method=method) + assert_equal(res.confidence_interval, (np.nan, np.nan)) + else: + res = bootstrap([data, ], np.mean, method=method) + assert_equal(res.confidence_interval, (10000., 10000.)) + assert_equal(res.standard_error, 0) + + +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +def test_bootstrap_gh15678(method): + # Check that gh-15678 is fixed: when statistic function returned a Python + # float, method="BCa" failed when trying to add a dimension to the float + rng = np.random.default_rng(354645618886684) + dist = stats.norm(loc=2, scale=4) + data = dist.rvs(size=100, random_state=rng) + data = (data,) + res = bootstrap(data, stats.skew, method=method, n_resamples=100, + random_state=np.random.default_rng(9563)) + # this always worked because np.apply_along_axis returns NumPy data type + ref = bootstrap(data, stats.skew, method=method, n_resamples=100, + random_state=np.random.default_rng(9563), vectorized=False) + assert_allclose(res.confidence_interval, ref.confidence_interval) + assert_allclose(res.standard_error, ref.standard_error) + assert isinstance(res.standard_error, np.float64) + + +def test_bootstrap_min(): + # Check that gh-15883 is fixed: percentileofscore should + # behave according to the 'mean' behavior and not trigger nan for BCa + rng = np.random.default_rng(1891289180021102) + dist = stats.norm(loc=2, scale=4) + data = dist.rvs(size=100, random_state=rng) + true_min = np.min(data) + data = (data,) + res = bootstrap(data, np.min, method="BCa", n_resamples=100, + random_state=np.random.default_rng(3942)) + assert true_min == res.confidence_interval.low + res2 = bootstrap(-np.array(data), np.max, method="BCa", n_resamples=100, + random_state=np.random.default_rng(3942)) + assert_allclose(-res.confidence_interval.low, + res2.confidence_interval.high) + assert_allclose(-res.confidence_interval.high, + res2.confidence_interval.low) + + +@pytest.mark.parametrize("additional_resamples", [0, 1000]) +def test_re_bootstrap(additional_resamples): + # Test behavior of parameter `bootstrap_result` + rng = np.random.default_rng(8958153316228384) + x = rng.random(size=100) + + n1 = 1000 + n2 = additional_resamples + n3 = n1 + additional_resamples + + rng = np.random.default_rng(296689032789913033) + res = stats.bootstrap((x,), np.mean, n_resamples=n1, random_state=rng, + confidence_level=0.95, method='percentile') + res = stats.bootstrap((x,), np.mean, n_resamples=n2, random_state=rng, + confidence_level=0.90, method='BCa', + bootstrap_result=res) + + rng = np.random.default_rng(296689032789913033) + ref = stats.bootstrap((x,), np.mean, n_resamples=n3, random_state=rng, + confidence_level=0.90, method='BCa') + + assert_allclose(res.standard_error, ref.standard_error, rtol=1e-14) + assert_allclose(res.confidence_interval, ref.confidence_interval, + rtol=1e-14) + + +@pytest.mark.xfail_on_32bit("Sensible to machine precision") +@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa']) +def test_bootstrap_alternative(method): + rng = np.random.default_rng(5894822712842015040) + dist = stats.norm(loc=2, scale=4) + data = (dist.rvs(size=(100), random_state=rng),) + + config = dict(data=data, statistic=np.std, random_state=rng, axis=-1) + t = stats.bootstrap(**config, confidence_level=0.9) + + config.update(dict(n_resamples=0, bootstrap_result=t)) + l = stats.bootstrap(**config, confidence_level=0.95, alternative='less') + g = stats.bootstrap(**config, confidence_level=0.95, alternative='greater') + + assert_allclose(l.confidence_interval.high, t.confidence_interval.high, + rtol=1e-14) + assert_allclose(g.confidence_interval.low, t.confidence_interval.low, + rtol=1e-14) + assert np.isneginf(l.confidence_interval.low) + assert np.isposinf(g.confidence_interval.high) + + with pytest.raises(ValueError, match='`alternative` must be one of'): + stats.bootstrap(**config, alternative='ekki-ekki') + + +def test_jackknife_resample(): + shape = 3, 4, 5, 6 + np.random.seed(0) + x = np.random.rand(*shape) + y = next(_resampling._jackknife_resample(x)) + + for i in range(shape[-1]): + # each resample is indexed along second to last axis + # (last axis is the one the statistic will be taken over / consumed) + slc = y[..., i, :] + expected = np.delete(x, i, axis=-1) + + assert np.array_equal(slc, expected) + + y2 = np.concatenate(list(_resampling._jackknife_resample(x, batch=2)), + axis=-2) + assert np.array_equal(y2, y) + + +@pytest.mark.parametrize("rng_name", ["RandomState", "default_rng"]) +def test_bootstrap_resample(rng_name): + rng = getattr(np.random, rng_name, None) + if rng is None: + pytest.skip(f"{rng_name} not available.") + rng1 = rng(0) + rng2 = rng(0) + + n_resamples = 10 + shape = 3, 4, 5, 6 + + np.random.seed(0) + x = np.random.rand(*shape) + y = _resampling._bootstrap_resample(x, n_resamples, random_state=rng1) + + for i in range(n_resamples): + # each resample is indexed along second to last axis + # (last axis is the one the statistic will be taken over / consumed) + slc = y[..., i, :] + + js = rng_integers(rng2, 0, shape[-1], shape[-1]) + expected = x[..., js] + + assert np.array_equal(slc, expected) + + +@pytest.mark.parametrize("score", [0, 0.5, 1]) +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_percentile_of_score(score, axis): + shape = 10, 20, 30 + np.random.seed(0) + x = np.random.rand(*shape) + p = _resampling._percentile_of_score(x, score, axis=-1) + + def vectorized_pos(a, score, axis): + return np.apply_along_axis(stats.percentileofscore, axis, a, score) + + p2 = vectorized_pos(x, score, axis=-1)/100 + + assert_allclose(p, p2, 1e-15) + + +def test_percentile_along_axis(): + # the difference between _percentile_along_axis and np.percentile is that + # np.percentile gets _all_ the qs for each axis slice, whereas + # _percentile_along_axis gets the q corresponding with each axis slice + + shape = 10, 20 + np.random.seed(0) + x = np.random.rand(*shape) + q = np.random.rand(*shape[:-1]) * 100 + y = _resampling._percentile_along_axis(x, q) + + for i in range(shape[0]): + res = y[i] + expected = np.percentile(x[i], q[i], axis=-1) + assert_allclose(res, expected, 1e-15) + + +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_vectorize_statistic(axis): + # test that _vectorize_statistic vectorizes a statistic along `axis` + + def statistic(*data, axis): + # an arbitrary, vectorized statistic + return sum(sample.mean(axis) for sample in data) + + def statistic_1d(*data): + # the same statistic, not vectorized + for sample in data: + assert sample.ndim == 1 + return statistic(*data, axis=0) + + # vectorize the non-vectorized statistic + statistic2 = _resampling._vectorize_statistic(statistic_1d) + + np.random.seed(0) + x = np.random.rand(4, 5, 6) + y = np.random.rand(4, 1, 6) + z = np.random.rand(1, 5, 6) + + res1 = statistic(x, y, z, axis=axis) + res2 = statistic2(x, y, z, axis=axis) + assert_allclose(res1, res2) + + +@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"]) +def test_vector_valued_statistic(method): + # Generate 95% confidence interval around MLE of normal distribution + # parameters. Repeat 100 times, each time on sample of size 100. + # Check that confidence interval contains true parameters ~95 times. + # Confidence intervals are estimated and stochastic; a test failure + # does not necessarily indicate that something is wrong. More important + # than values of `counts` below is that the shapes of the outputs are + # correct. + + rng = np.random.default_rng(2196847219) + params = 1, 0.5 + sample = stats.norm.rvs(*params, size=(100, 100), random_state=rng) + + def statistic(data, axis): + return np.asarray([np.mean(data, axis), + np.std(data, axis, ddof=1)]) + + res = bootstrap((sample,), statistic, method=method, axis=-1, + n_resamples=9999, batch=200) + + counts = np.sum((res.confidence_interval.low.T < params) + & (res.confidence_interval.high.T > params), + axis=0) + assert np.all(counts >= 90) + assert np.all(counts <= 100) + assert res.confidence_interval.low.shape == (2, 100) + assert res.confidence_interval.high.shape == (2, 100) + assert res.standard_error.shape == (2, 100) + assert res.bootstrap_distribution.shape == (2, 100, 9999) + + +@pytest.mark.slow +@pytest.mark.filterwarnings('ignore::RuntimeWarning') +def test_vector_valued_statistic_gh17715(): + # gh-17715 reported a mistake introduced in the extension of BCa to + # multi-sample statistics; a `len` should have been `.shape[-1]`. Check + # that this is resolved. + + rng = np.random.default_rng(141921000979291141) + + def concordance(x, y, axis): + xm = x.mean(axis) + ym = y.mean(axis) + cov = ((x - xm[..., None]) * (y - ym[..., None])).mean(axis) + return (2 * cov) / (x.var(axis) + y.var(axis) + (xm - ym) ** 2) + + def statistic(tp, tn, fp, fn, axis): + actual = tp + fp + expected = tp + fn + return np.nan_to_num(concordance(actual, expected, axis)) + + def statistic_extradim(*args, axis): + return statistic(*args, axis)[np.newaxis, ...] + + data = [[4, 0, 0, 2], # (tp, tn, fp, fn) + [2, 1, 2, 1], + [0, 6, 0, 0], + [0, 6, 3, 0], + [0, 8, 1, 0]] + data = np.array(data).T + + res = bootstrap(data, statistic_extradim, random_state=rng, paired=True) + ref = bootstrap(data, statistic, random_state=rng, paired=True) + assert_allclose(res.confidence_interval.low[0], + ref.confidence_interval.low, atol=1e-15) + assert_allclose(res.confidence_interval.high[0], + ref.confidence_interval.high, atol=1e-15) + + +# --- Test Monte Carlo Hypothesis Test --- # + +class TestMonteCarloHypothesisTest: + atol = 2.5e-2 # for comparing p-value + + def rvs(self, rvs_in, rs): + return lambda *args, **kwds: rvs_in(*args, random_state=rs, **kwds) + + def test_input_validation(self): + # test that the appropriate error messages are raised for invalid input + + def stat(x): + return stats.skewnorm(x).statistic + + message = "Array shapes are incompatible for broadcasting." + data = (np.zeros((2, 5)), np.zeros((3, 5))) + rvs = (stats.norm.rvs, stats.norm.rvs) + with pytest.raises(ValueError, match=message): + monte_carlo_test(data, rvs, lambda x, y: 1, axis=-1) + + message = "`axis` must be an integer." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, axis=1.5) + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, vectorized=1.5) + + message = "`rvs` must be callable or sequence of callables." + with pytest.raises(TypeError, match=message): + monte_carlo_test([1, 2, 3], None, stat) + with pytest.raises(TypeError, match=message): + monte_carlo_test([[1, 2], [3, 4]], [lambda x: x, None], stat) + + message = "If `rvs` is a sequence..." + with pytest.raises(ValueError, match=message): + monte_carlo_test([[1, 2, 3]], [lambda x: x, lambda x: x], stat) + + message = "`statistic` must be callable." + with pytest.raises(TypeError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, None) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, + n_resamples=-1000) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, + n_resamples=1000.5) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=-1000) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=1000.5) + + message = "`alternative` must be in..." + with pytest.raises(ValueError, match=message): + monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, + alternative='ekki') + + + def test_batch(self): + # make sure that the `batch` parameter is respected by checking the + # maximum batch size provided in calls to `statistic` + rng = np.random.default_rng(23492340193) + x = rng.random(10) + + def statistic(x, axis): + batch_size = 1 if x.ndim == 1 else len(x) + statistic.batch_size = max(batch_size, statistic.batch_size) + statistic.counter += 1 + return stats.skewtest(x, axis=axis).statistic + statistic.counter = 0 + statistic.batch_size = 0 + + kwds = {'sample': x, 'statistic': statistic, + 'n_resamples': 1000, 'vectorized': True} + + kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398)) + res1 = monte_carlo_test(batch=1, **kwds) + assert_equal(statistic.counter, 1001) + assert_equal(statistic.batch_size, 1) + + kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398)) + statistic.counter = 0 + res2 = monte_carlo_test(batch=50, **kwds) + assert_equal(statistic.counter, 21) + assert_equal(statistic.batch_size, 50) + + kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398)) + statistic.counter = 0 + res3 = monte_carlo_test(**kwds) + assert_equal(statistic.counter, 2) + assert_equal(statistic.batch_size, 1000) + + assert_equal(res1.pvalue, res3.pvalue) + assert_equal(res2.pvalue, res3.pvalue) + + @pytest.mark.parametrize('axis', range(-3, 3)) + def test_axis(self, axis): + # test that Nd-array samples are handled correctly for valid values + # of the `axis` parameter + rng = np.random.default_rng(2389234) + norm_rvs = self.rvs(stats.norm.rvs, rng) + + size = [2, 3, 4] + size[axis] = 100 + x = norm_rvs(size=size) + expected = stats.skewtest(x, axis=axis) + + def statistic(x, axis): + return stats.skewtest(x, axis=axis).statistic + + res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True, + n_resamples=20000, axis=axis) + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('alternative', ("less", "greater")) + @pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness + def test_against_ks_1samp(self, alternative, a): + # test that monte_carlo_test can reproduce pvalue of ks_1samp + rng = np.random.default_rng(65723433) + + x = stats.skewnorm.rvs(a=a, size=30, random_state=rng) + expected = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative) + + def statistic1d(x): + return stats.ks_1samp(x, stats.norm.cdf, mode='asymp', + alternative=alternative).statistic + + norm_rvs = self.rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic1d, + n_resamples=1000, vectorized=False, + alternative=alternative) + + assert_allclose(res.statistic, expected.statistic) + if alternative == 'greater': + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + elif alternative == 'less': + assert_allclose(1-res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('hypotest', (stats.skewtest, stats.kurtosistest)) + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + @pytest.mark.parametrize('a', np.linspace(-2, 2, 5)) # skewness + def test_against_normality_tests(self, hypotest, alternative, a): + # test that monte_carlo_test can reproduce pvalue of normality tests + rng = np.random.default_rng(85723405) + + x = stats.skewnorm.rvs(a=a, size=150, random_state=rng) + expected = hypotest(x, alternative=alternative) + + def statistic(x, axis): + return hypotest(x, axis=axis).statistic + + norm_rvs = self.rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True, + alternative=alternative) + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('a', np.arange(-2, 3)) # skewness parameter + def test_against_normaltest(self, a): + # test that monte_carlo_test can reproduce pvalue of normaltest + rng = np.random.default_rng(12340513) + + x = stats.skewnorm.rvs(a=a, size=150, random_state=rng) + expected = stats.normaltest(x) + + def statistic(x, axis): + return stats.normaltest(x, axis=axis).statistic + + norm_rvs = self.rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True, + alternative='greater') + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5)) # skewness + def test_against_cramervonmises(self, a): + # test that monte_carlo_test can reproduce pvalue of cramervonmises + rng = np.random.default_rng(234874135) + + x = stats.skewnorm.rvs(a=a, size=30, random_state=rng) + expected = stats.cramervonmises(x, stats.norm.cdf) + + def statistic1d(x): + return stats.cramervonmises(x, stats.norm.cdf).statistic + + norm_rvs = self.rvs(stats.norm.rvs, rng) + res = monte_carlo_test(x, norm_rvs, statistic1d, + n_resamples=1000, vectorized=False, + alternative='greater') + + assert_allclose(res.statistic, expected.statistic) + assert_allclose(res.pvalue, expected.pvalue, atol=self.atol) + + @pytest.mark.parametrize('dist_name', ('norm', 'logistic')) + @pytest.mark.parametrize('i', range(5)) + def test_against_anderson(self, dist_name, i): + # test that monte_carlo_test can reproduce results of `anderson`. Note: + # `anderson` does not provide a p-value; it provides a list of + # significance levels and the associated critical value of the test + # statistic. `i` used to index this list. + + # find the skewness for which the sample statistic matches one of the + # critical values provided by `stats.anderson` + + def fun(a): + rng = np.random.default_rng(394295467) + x = stats.tukeylambda.rvs(a, size=100, random_state=rng) + expected = stats.anderson(x, dist_name) + return expected.statistic - expected.critical_values[i] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + sol = root(fun, x0=0) + assert sol.success + + # get the significance level (p-value) associated with that critical + # value + a = sol.x[0] + rng = np.random.default_rng(394295467) + x = stats.tukeylambda.rvs(a, size=100, random_state=rng) + expected = stats.anderson(x, dist_name) + expected_stat = expected.statistic + expected_p = expected.significance_level[i]/100 + + # perform equivalent Monte Carlo test and compare results + def statistic1d(x): + return stats.anderson(x, dist_name).statistic + + dist_rvs = self.rvs(getattr(stats, dist_name).rvs, rng) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = monte_carlo_test(x, dist_rvs, + statistic1d, n_resamples=1000, + vectorized=False, alternative='greater') + + assert_allclose(res.statistic, expected_stat) + assert_allclose(res.pvalue, expected_p, atol=2*self.atol) + + def test_p_never_zero(self): + # Use biased estimate of p-value to ensure that p-value is never zero + # per monte_carlo_test reference [1] + rng = np.random.default_rng(2190176673029737545) + x = np.zeros(100) + res = monte_carlo_test(x, rng.random, np.mean, + vectorized=True, alternative='less') + assert res.pvalue == 0.0001 + + def test_against_ttest_ind(self): + # test that `monte_carlo_test` can reproduce results of `ttest_ind`. + rng = np.random.default_rng(219017667302737545) + data = rng.random(size=(2, 5)), rng.random(size=7) # broadcastable + rvs = rng.normal, rng.normal + def statistic(x, y, axis): + return stats.ttest_ind(x, y, axis).statistic + + res = stats.monte_carlo_test(data, rvs, statistic, axis=-1) + ref = stats.ttest_ind(data[0], [data[1]], axis=-1) + assert_allclose(res.statistic, ref.statistic) + assert_allclose(res.pvalue, ref.pvalue, rtol=2e-2) + + def test_against_f_oneway(self): + # test that `monte_carlo_test` can reproduce results of `f_oneway`. + rng = np.random.default_rng(219017667302737545) + data = (rng.random(size=(2, 100)), rng.random(size=(2, 101)), + rng.random(size=(2, 102)), rng.random(size=(2, 103))) + rvs = rng.normal, rng.normal, rng.normal, rng.normal + + def statistic(*args, axis): + return stats.f_oneway(*args, axis=axis).statistic + + res = stats.monte_carlo_test(data, rvs, statistic, axis=-1, + alternative='greater') + ref = stats.f_oneway(*data, axis=-1) + + assert_allclose(res.statistic, ref.statistic) + assert_allclose(res.pvalue, ref.pvalue, atol=1e-2) + + @pytest.mark.xfail_on_32bit("Statistic may not depend on sample order on 32-bit") + def test_finite_precision_statistic(self): + # Some statistics return numerically distinct values when the values + # should be equal in theory. Test that `monte_carlo_test` accounts + # for this in some way. + rng = np.random.default_rng(2549824598234528) + n_resamples = 9999 + def rvs(size): + return 1. * stats.bernoulli(p=0.333).rvs(size=size, random_state=rng) + + x = rvs(100) + res = stats.monte_carlo_test(x, rvs, np.var, alternative='less', + n_resamples=n_resamples) + # show that having a tolerance matters + c0 = np.sum(res.null_distribution <= res.statistic) + c1 = np.sum(res.null_distribution <= res.statistic*(1+1e-15)) + assert c0 != c1 + assert res.pvalue == (c1 + 1)/(n_resamples + 1) + + +class TestPermutationTest: + + rtol = 1e-14 + + def setup_method(self): + self.rng = np.random.default_rng(7170559330470561044) + + # -- Input validation -- # + + def test_permutation_test_iv(self): + + def stat(x, y, axis): + return stats.ttest_ind((x, y), axis).statistic + + message = "each sample in `data` must contain two or more ..." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1]), stat) + + message = "`data` must be a tuple containing at least two samples" + with pytest.raises(ValueError, match=message): + permutation_test((1,), stat) + with pytest.raises(TypeError, match=message): + permutation_test(1, stat) + + message = "`axis` must be an integer." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, axis=1.5) + + message = "`permutation_type` must be in..." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, + permutation_type="ekki") + + message = "`vectorized` must be `True`, `False`, or `None`." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, vectorized=1.5) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=-1000) + + message = "`n_resamples` must be a positive integer." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=1000.5) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=-1000) + + message = "`batch` must be a positive integer or None." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=1000.5) + + message = "`alternative` must be in..." + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, alternative='ekki') + + message = "'herring' cannot be used to seed a" + with pytest.raises(ValueError, match=message): + permutation_test(([1, 2, 3], [1, 2, 3]), stat, + random_state='herring') + + # -- Test Parameters -- # + @pytest.mark.parametrize('random_state', [np.random.RandomState, + np.random.default_rng]) + @pytest.mark.parametrize('permutation_type', + ['pairings', 'samples', 'independent']) + def test_batch(self, permutation_type, random_state): + # make sure that the `batch` parameter is respected by checking the + # maximum batch size provided in calls to `statistic` + x = self.rng.random(10) + y = self.rng.random(10) + + def statistic(x, y, axis): + batch_size = 1 if x.ndim == 1 else len(x) + statistic.batch_size = max(batch_size, statistic.batch_size) + statistic.counter += 1 + return np.mean(x, axis=axis) - np.mean(y, axis=axis) + statistic.counter = 0 + statistic.batch_size = 0 + + kwds = {'n_resamples': 1000, 'permutation_type': permutation_type, + 'vectorized': True} + res1 = stats.permutation_test((x, y), statistic, batch=1, + random_state=random_state(0), **kwds) + assert_equal(statistic.counter, 1001) + assert_equal(statistic.batch_size, 1) + + statistic.counter = 0 + res2 = stats.permutation_test((x, y), statistic, batch=50, + random_state=random_state(0), **kwds) + assert_equal(statistic.counter, 21) + assert_equal(statistic.batch_size, 50) + + statistic.counter = 0 + res3 = stats.permutation_test((x, y), statistic, batch=1000, + random_state=random_state(0), **kwds) + assert_equal(statistic.counter, 2) + assert_equal(statistic.batch_size, 1000) + + assert_equal(res1.pvalue, res3.pvalue) + assert_equal(res2.pvalue, res3.pvalue) + + @pytest.mark.parametrize('random_state', [np.random.RandomState, + np.random.default_rng]) + @pytest.mark.parametrize('permutation_type, exact_size', + [('pairings', special.factorial(3)**2), + ('samples', 2**3), + ('independent', special.binom(6, 3))]) + def test_permutations(self, permutation_type, exact_size, random_state): + # make sure that the `permutations` parameter is respected by checking + # the size of the null distribution + x = self.rng.random(3) + y = self.rng.random(3) + + def statistic(x, y, axis): + return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + kwds = {'permutation_type': permutation_type, + 'vectorized': True} + res = stats.permutation_test((x, y), statistic, n_resamples=3, + random_state=random_state(0), **kwds) + assert_equal(res.null_distribution.size, 3) + + res = stats.permutation_test((x, y), statistic, **kwds) + assert_equal(res.null_distribution.size, exact_size) + + # -- Randomized Permutation Tests -- # + + # To get reasonable accuracy, these next three tests are somewhat slow. + # Originally, I had them passing for all combinations of permutation type, + # alternative, and RNG, but that takes too long for CI. Instead, split + # into three tests, each testing a particular combination of the three + # parameters. + + def test_randomized_test_against_exact_both(self): + # check that the randomized and exact tests agree to reasonable + # precision for permutation_type='both + + alternative, rng = 'less', 0 + + nx, ny, permutations = 8, 9, 24000 + assert special.binom(nx + ny, nx) > permutations + + x = stats.norm.rvs(size=nx) + y = stats.norm.rvs(size=ny) + data = x, y + + def statistic(x, y, axis): + return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + kwds = {'vectorized': True, 'permutation_type': 'independent', + 'batch': 100, 'alternative': alternative, 'random_state': rng} + res = permutation_test(data, statistic, n_resamples=permutations, + **kwds) + res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds) + + assert res.statistic == res2.statistic + assert_allclose(res.pvalue, res2.pvalue, atol=1e-2) + + @pytest.mark.slow() + def test_randomized_test_against_exact_samples(self): + # check that the randomized and exact tests agree to reasonable + # precision for permutation_type='samples' + + alternative, rng = 'greater', None + + nx, ny, permutations = 15, 15, 32000 + assert 2**nx > permutations + + x = stats.norm.rvs(size=nx) + y = stats.norm.rvs(size=ny) + data = x, y + + def statistic(x, y, axis): + return np.mean(x - y, axis=axis) + + kwds = {'vectorized': True, 'permutation_type': 'samples', + 'batch': 100, 'alternative': alternative, 'random_state': rng} + res = permutation_test(data, statistic, n_resamples=permutations, + **kwds) + res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds) + + assert res.statistic == res2.statistic + assert_allclose(res.pvalue, res2.pvalue, atol=1e-2) + + def test_randomized_test_against_exact_pairings(self): + # check that the randomized and exact tests agree to reasonable + # precision for permutation_type='pairings' + + alternative, rng = 'two-sided', self.rng + + nx, ny, permutations = 8, 8, 40000 + assert special.factorial(nx) > permutations + + x = stats.norm.rvs(size=nx) + y = stats.norm.rvs(size=ny) + data = [x] + + def statistic1d(x): + return stats.pearsonr(x, y)[0] + + statistic = _resampling._vectorize_statistic(statistic1d) + + kwds = {'vectorized': True, 'permutation_type': 'samples', + 'batch': 100, 'alternative': alternative, 'random_state': rng} + res = permutation_test(data, statistic, n_resamples=permutations, + **kwds) + res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds) + + assert res.statistic == res2.statistic + assert_allclose(res.pvalue, res2.pvalue, atol=1e-2) + + @pytest.mark.parametrize('alternative', ('less', 'greater')) + # Different conventions for two-sided p-value here VS ttest_ind. + # Eventually, we can add multiple options for the two-sided alternative + # here in permutation_test. + @pytest.mark.parametrize('permutations', (30, 1e9)) + @pytest.mark.parametrize('axis', (0, 1, 2)) + def test_against_permutation_ttest(self, alternative, permutations, axis): + # check that this function and ttest_ind with permutations give + # essentially identical results. + + x = np.arange(3*4*5).reshape(3, 4, 5) + y = np.moveaxis(np.arange(4)[:, None, None], 0, axis) + + rng1 = np.random.default_rng(4337234444626115331) + res1 = stats.ttest_ind(x, y, permutations=permutations, axis=axis, + random_state=rng1, alternative=alternative) + + def statistic(x, y, axis): + return stats.ttest_ind(x, y, axis=axis).statistic + + rng2 = np.random.default_rng(4337234444626115331) + res2 = permutation_test((x, y), statistic, vectorized=True, + n_resamples=permutations, + alternative=alternative, axis=axis, + random_state=rng2) + + assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol) + assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol) + + # -- Independent (Unpaired) Sample Tests -- # + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_ks_2samp(self, alternative): + + x = self.rng.normal(size=4, scale=1) + y = self.rng.normal(size=5, loc=3, scale=3) + + expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact') + + def statistic1d(x, y): + return stats.ks_2samp(x, y, mode='asymp', + alternative=alternative).statistic + + # ks_2samp is always a one-tailed 'greater' test + # it's the statistic that changes (D+ vs D- vs max(D+, D-)) + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='greater', random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_ansari(self, alternative): + + x = self.rng.normal(size=4, scale=1) + y = self.rng.normal(size=5, scale=3) + + # ansari has a different convention for 'alternative' + alternative_correspondence = {"less": "greater", + "greater": "less", + "two-sided": "two-sided"} + alternative_scipy = alternative_correspondence[alternative] + expected = stats.ansari(x, y, alternative=alternative_scipy) + + def statistic1d(x, y): + return stats.ansari(x, y).statistic + + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative=alternative, random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_mannwhitneyu(self, alternative): + + x = stats.uniform.rvs(size=(3, 5, 2), loc=0, random_state=self.rng) + y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05, random_state=self.rng) + + expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative) + + def statistic(x, y, axis): + return stats.mannwhitneyu(x, y, axis=axis).statistic + + res = permutation_test((x, y), statistic, vectorized=True, + n_resamples=np.inf, alternative=alternative, + axis=1, random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + def test_against_cvm(self): + + x = stats.norm.rvs(size=4, scale=1, random_state=self.rng) + y = stats.norm.rvs(size=5, loc=3, scale=3, random_state=self.rng) + + expected = stats.cramervonmises_2samp(x, y, method='exact') + + def statistic1d(x, y): + return stats.cramervonmises_2samp(x, y, + method='asymptotic').statistic + + # cramervonmises_2samp has only one alternative, greater + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='greater', random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.xslow() + @pytest.mark.parametrize('axis', (-1, 2)) + def test_vectorized_nsamp_ptype_both(self, axis): + # Test that permutation_test with permutation_type='independent' works + # properly for a 3-sample statistic with nd array samples of different + # (but compatible) shapes and ndims. Show that exact permutation test + # and random permutation tests approximate SciPy's asymptotic pvalues + # and that exact and random permutation test results are even closer + # to one another (than they are to the asymptotic results). + + # Three samples, different (but compatible) shapes with different ndims + rng = np.random.default_rng(6709265303529651545) + x = rng.random(size=(3)) + y = rng.random(size=(1, 3, 2)) + z = rng.random(size=(2, 1, 4)) + data = (x, y, z) + + # Define the statistic (and pvalue for comparison) + def statistic1d(*data): + return stats.kruskal(*data).statistic + + def pvalue1d(*data): + return stats.kruskal(*data).pvalue + + statistic = _resampling._vectorize_statistic(statistic1d) + pvalue = _resampling._vectorize_statistic(pvalue1d) + + # Calculate the expected results + x2 = np.broadcast_to(x, (2, 3, 3)) # broadcast manually because + y2 = np.broadcast_to(y, (2, 3, 2)) # _vectorize_statistic doesn't + z2 = np.broadcast_to(z, (2, 3, 4)) + expected_statistic = statistic(x2, y2, z2, axis=axis) + expected_pvalue = pvalue(x2, y2, z2, axis=axis) + + # Calculate exact and randomized permutation results + kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater', + 'permutation_type': 'independent', 'random_state': self.rng} + res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds) + res2 = permutation_test(data, statistic1d, n_resamples=1000, **kwds) + + # Check results + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.statistic, res2.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_pvalue, atol=6e-2) + assert_allclose(res.pvalue, res2.pvalue, atol=3e-2) + + # -- Paired-Sample Tests -- # + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_wilcoxon(self, alternative): + + x = stats.uniform.rvs(size=(3, 6, 2), loc=0, random_state=self.rng) + y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05, random_state=self.rng) + + # We'll check both 1- and 2-sample versions of the same test; + # we expect identical results to wilcoxon in all cases. + def statistic_1samp_1d(z): + # 'less' ensures we get the same of two statistics every time + return stats.wilcoxon(z, alternative='less').statistic + + def statistic_2samp_1d(x, y): + return stats.wilcoxon(x, y, alternative='less').statistic + + def test_1d(x, y): + return stats.wilcoxon(x, y, alternative=alternative) + + test = _resampling._vectorize_statistic(test_1d) + + expected = test(x, y, axis=1) + expected_stat = expected[0] + expected_p = expected[1] + + kwds = {'vectorized': False, 'axis': 1, 'alternative': alternative, + 'permutation_type': 'samples', 'random_state': self.rng, + 'n_resamples': np.inf} + res1 = permutation_test((x-y,), statistic_1samp_1d, **kwds) + res2 = permutation_test((x, y), statistic_2samp_1d, **kwds) + + # `wilcoxon` returns a different statistic with 'two-sided' + assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol) + if alternative != 'two-sided': + assert_allclose(res2.statistic, expected_stat, rtol=self.rtol) + + assert_allclose(res2.pvalue, expected_p, rtol=self.rtol) + assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) + def test_against_binomtest(self, alternative): + + x = self.rng.integers(0, 2, size=10) + x[x == 0] = -1 + # More naturally, the test would flip elements between 0 and one. + # However, permutation_test will flip the _signs_ of the elements. + # So we have to work with +1/-1 instead of 1/0. + + def statistic(x, axis=0): + return np.sum(x > 0, axis=axis) + + k, n, p = statistic(x), 10, 0.5 + expected = stats.binomtest(k, n, p, alternative=alternative) + + res = stats.permutation_test((x,), statistic, vectorized=True, + permutation_type='samples', + n_resamples=np.inf, random_state=self.rng, + alternative=alternative) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + # -- Exact Association Tests -- # + + def test_against_kendalltau(self): + + x = self.rng.normal(size=6) + y = x + self.rng.normal(size=6) + + expected = stats.kendalltau(x, y, method='exact') + + def statistic1d(x): + return stats.kendalltau(x, y, method='asymptotic').statistic + + # kendalltau currently has only one alternative, two-sided + res = permutation_test((x,), statistic1d, permutation_type='pairings', + n_resamples=np.inf, random_state=self.rng) + + assert_allclose(res.statistic, expected.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol) + + @pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided')) + def test_against_fisher_exact(self, alternative): + + def statistic(x,): + return np.sum((x == 1) & (y == 1)) + + # x and y are binary random variables with some dependence + rng = np.random.default_rng(6235696159000529929) + x = (rng.random(7) > 0.6).astype(float) + y = (rng.random(7) + 0.25*x > 0.6).astype(float) + tab = stats.contingency.crosstab(x, y)[1] + + res = permutation_test((x,), statistic, permutation_type='pairings', + n_resamples=np.inf, alternative=alternative, + random_state=rng) + res2 = stats.fisher_exact(tab, alternative=alternative) + + assert_allclose(res.pvalue, res2[1]) + + @pytest.mark.xslow() + @pytest.mark.parametrize('axis', (-2, 1)) + def test_vectorized_nsamp_ptype_samples(self, axis): + # Test that permutation_test with permutation_type='samples' works + # properly for a 3-sample statistic with nd array samples of different + # (but compatible) shapes and ndims. Show that exact permutation test + # reproduces SciPy's exact pvalue and that random permutation test + # approximates it. + + x = self.rng.random(size=(2, 4, 3)) + y = self.rng.random(size=(1, 4, 3)) + z = self.rng.random(size=(2, 4, 1)) + x = stats.rankdata(x, axis=axis) + y = stats.rankdata(y, axis=axis) + z = stats.rankdata(z, axis=axis) + y = y[0] # to check broadcast with different ndim + data = (x, y, z) + + def statistic1d(*data): + return stats.page_trend_test(data, ranked=True, + method='asymptotic').statistic + + def pvalue1d(*data): + return stats.page_trend_test(data, ranked=True, + method='exact').pvalue + + statistic = _resampling._vectorize_statistic(statistic1d) + pvalue = _resampling._vectorize_statistic(pvalue1d) + + expected_statistic = statistic(*np.broadcast_arrays(*data), axis=axis) + expected_pvalue = pvalue(*np.broadcast_arrays(*data), axis=axis) + + # Let's forgive this use of an integer seed, please. + kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater', + 'permutation_type': 'pairings', 'random_state': 0} + res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds) + res2 = permutation_test(data, statistic1d, n_resamples=5000, **kwds) + + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.statistic, res2.statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_pvalue, rtol=self.rtol) + assert_allclose(res.pvalue, res2.pvalue, atol=3e-2) + + # -- Test Against External References -- # + + tie_case_1 = {'x': [1, 2, 3, 4], 'y': [1.5, 2, 2.5], + 'expected_less': 0.2000000000, + 'expected_2sided': 0.4, # 2*expected_less + 'expected_Pr_gte_S_mean': 0.3428571429, # see note below + 'expected_statistic': 7.5, + 'expected_avg': 9.142857, 'expected_std': 1.40698} + tie_case_2 = {'x': [111, 107, 100, 99, 102, 106, 109, 108], + 'y': [107, 108, 106, 98, 105, 103, 110, 105, 104], + 'expected_less': 0.1555738379, + 'expected_2sided': 0.3111476758, + 'expected_Pr_gte_S_mean': 0.2969971205, # see note below + 'expected_statistic': 32.5, + 'expected_avg': 38.117647, 'expected_std': 5.172124} + + @pytest.mark.xslow() # only the second case is slow, really + @pytest.mark.parametrize('case', (tie_case_1, tie_case_2)) + def test_with_ties(self, case): + """ + Results above from SAS PROC NPAR1WAY, e.g. + + DATA myData; + INPUT X Y; + CARDS; + 1 1 + 1 2 + 1 3 + 1 4 + 2 1.5 + 2 2 + 2 2.5 + ods graphics on; + proc npar1way AB data=myData; + class X; + EXACT; + run; + ods graphics off; + + Note: SAS provides Pr >= |S-Mean|, which is different from our + definition of a two-sided p-value. + + """ + + x = case['x'] + y = case['y'] + + expected_statistic = case['expected_statistic'] + expected_less = case['expected_less'] + expected_2sided = case['expected_2sided'] + expected_Pr_gte_S_mean = case['expected_Pr_gte_S_mean'] + expected_avg = case['expected_avg'] + expected_std = case['expected_std'] + + def statistic1d(x, y): + return stats.ansari(x, y).statistic + + with np.testing.suppress_warnings() as sup: + sup.filter(UserWarning, "Ties preclude use of exact statistic") + res = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='less') + res2 = permutation_test((x, y), statistic1d, n_resamples=np.inf, + alternative='two-sided') + + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_less, atol=1e-10) + assert_allclose(res2.pvalue, expected_2sided, atol=1e-10) + assert_allclose(res2.null_distribution.mean(), expected_avg, rtol=1e-6) + assert_allclose(res2.null_distribution.std(), expected_std, rtol=1e-6) + + # SAS provides Pr >= |S-Mean|; might as well check against that, too + S = res.statistic + mean = res.null_distribution.mean() + n = len(res.null_distribution) + Pr_gte_S_mean = np.sum(np.abs(res.null_distribution-mean) + >= np.abs(S-mean))/n + assert_allclose(expected_Pr_gte_S_mean, Pr_gte_S_mean) + + @pytest.mark.parametrize('alternative, expected_pvalue', + (('less', 0.9708333333333), + ('greater', 0.05138888888889), + ('two-sided', 0.1027777777778))) + def test_against_spearmanr_in_R(self, alternative, expected_pvalue): + """ + Results above from R cor.test, e.g. + + options(digits=16) + x <- c(1.76405235, 0.40015721, 0.97873798, + 2.2408932, 1.86755799, -0.97727788) + y <- c(2.71414076, 0.2488, 0.87551913, + 2.6514917, 2.01160156, 0.47699563) + cor.test(x, y, method = "spearm", alternative = "t") + """ + # data comes from + # np.random.seed(0) + # x = stats.norm.rvs(size=6) + # y = x + stats.norm.rvs(size=6) + x = [1.76405235, 0.40015721, 0.97873798, + 2.2408932, 1.86755799, -0.97727788] + y = [2.71414076, 0.2488, 0.87551913, + 2.6514917, 2.01160156, 0.47699563] + expected_statistic = 0.7714285714285715 + + def statistic1d(x): + return stats.spearmanr(x, y).statistic + + res = permutation_test((x,), statistic1d, permutation_type='pairings', + n_resamples=np.inf, alternative=alternative) + + assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) + assert_allclose(res.pvalue, expected_pvalue, atol=1e-13) + + @pytest.mark.parametrize("batch", (-1, 0)) + def test_batch_generator_iv(self, batch): + with pytest.raises(ValueError, match="`batch` must be positive."): + list(_resampling._batch_generator([1, 2, 3], batch)) + + batch_generator_cases = [(range(0), 3, []), + (range(6), 3, [[0, 1, 2], [3, 4, 5]]), + (range(8), 3, [[0, 1, 2], [3, 4, 5], [6, 7]])] + + @pytest.mark.parametrize("iterable, batch, expected", + batch_generator_cases) + def test_batch_generator(self, iterable, batch, expected): + got = list(_resampling._batch_generator(iterable, batch)) + assert got == expected + + def test_finite_precision_statistic(self): + # Some statistics return numerically distinct values when the values + # should be equal in theory. Test that `permutation_test` accounts + # for this in some way. + x = [1, 2, 4, 3] + y = [2, 4, 6, 8] + + def statistic(x, y): + return stats.pearsonr(x, y)[0] + + res = stats.permutation_test((x, y), statistic, vectorized=False, + permutation_type='pairings') + r, pvalue, null = res.statistic, res.pvalue, res.null_distribution + + correct_p = 2 * np.sum(null >= r - 1e-14) / len(null) + assert pvalue == correct_p == 1/3 + # Compare against other exact correlation tests using R corr.test + # options(digits=16) + # x = c(1, 2, 4, 3) + # y = c(2, 4, 6, 8) + # cor.test(x, y, alternative = "t", method = "spearman") # 0.333333333 + # cor.test(x, y, alternative = "t", method = "kendall") # 0.333333333 + + +def test_all_partitions_concatenated(): + # make sure that _all_paritions_concatenated produces the correct number + # of partitions of the data into samples of the given sizes and that + # all are unique + n = np.array([3, 2, 4], dtype=int) + nc = np.cumsum(n) + + all_partitions = set() + counter = 0 + for partition_concatenated in _resampling._all_partitions_concatenated(n): + counter += 1 + partitioning = np.split(partition_concatenated, nc[:-1]) + all_partitions.add(tuple([frozenset(i) for i in partitioning])) + + expected = np.prod([special.binom(sum(n[i:]), sum(n[i+1:])) + for i in range(len(n)-1)]) + + assert_equal(counter, expected) + assert_equal(len(all_partitions), expected) + + +@pytest.mark.parametrize('fun_name', + ['bootstrap', 'permutation_test', 'monte_carlo_test']) +def test_parameter_vectorized(fun_name): + # Check that parameter `vectorized` is working as desired for all + # resampling functions. Results don't matter; just don't fail asserts. + rng = np.random.default_rng(75245098234592) + sample = rng.random(size=10) + + def rvs(size): # needed by `monte_carlo_test` + return stats.norm.rvs(size=size, random_state=rng) + + fun_options = {'bootstrap': {'data': (sample,), 'random_state': rng, + 'method': 'percentile'}, + 'permutation_test': {'data': (sample,), 'random_state': rng, + 'permutation_type': 'samples'}, + 'monte_carlo_test': {'sample': sample, 'rvs': rvs}} + common_options = {'n_resamples': 100} + + fun = getattr(stats, fun_name) + options = fun_options[fun_name] + options.update(common_options) + + def statistic(x, axis): + assert x.ndim > 1 or np.array_equal(x, sample) + return np.mean(x, axis=axis) + fun(statistic=statistic, vectorized=None, **options) + fun(statistic=statistic, vectorized=True, **options) + + def statistic(x): + assert x.ndim == 1 + return np.mean(x) + fun(statistic=statistic, vectorized=None, **options) + fun(statistic=statistic, vectorized=False, **options) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_sampling.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..10b4e5af96ec070ef10ccd8e5d3e7db78f3a03e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_sampling.py @@ -0,0 +1,1445 @@ +import threading +import pickle +import pytest +from copy import deepcopy +import platform +import sys +import math +import numpy as np +from numpy.testing import assert_allclose, assert_equal, suppress_warnings +from scipy.stats.sampling import ( + TransformedDensityRejection, + DiscreteAliasUrn, + DiscreteGuideTable, + NumericalInversePolynomial, + NumericalInverseHermite, + RatioUniforms, + SimpleRatioUniforms, + UNURANError +) +from pytest import raises as assert_raises +from scipy import stats +from scipy import special +from scipy.stats import chisquare, cramervonmises +from scipy.stats._distr_params import distdiscrete, distcont +from scipy._lib._util import check_random_state + + +# common test data: this data can be shared between all the tests. + + +# Normal distribution shared between all the continuous methods +class StandardNormal: + def pdf(self, x): + # normalization constant needed for NumericalInverseHermite + return 1./np.sqrt(2.*np.pi) * np.exp(-0.5 * x*x) + + def dpdf(self, x): + return 1./np.sqrt(2.*np.pi) * -x * np.exp(-0.5 * x*x) + + def cdf(self, x): + return special.ndtr(x) + + +all_methods = [ + ("TransformedDensityRejection", {"dist": StandardNormal()}), + ("DiscreteAliasUrn", {"dist": [0.02, 0.18, 0.8]}), + ("DiscreteGuideTable", {"dist": [0.02, 0.18, 0.8]}), + ("NumericalInversePolynomial", {"dist": StandardNormal()}), + ("NumericalInverseHermite", {"dist": StandardNormal()}), + ("SimpleRatioUniforms", {"dist": StandardNormal(), "mode": 0}) +] + +if (sys.implementation.name == 'pypy' + and sys.implementation.version < (7, 3, 10)): + # changed in PyPy for v7.3.10 + floaterr = r"unsupported operand type for float\(\): 'list'" +else: + floaterr = r"must be real number, not list" +# Make sure an internal error occurs in UNU.RAN when invalid callbacks are +# passed. Moreover, different generators throw different error messages. +# So, in case of an `UNURANError`, we do not validate the error message. +bad_pdfs_common = [ + # Negative PDF + (lambda x: -x, UNURANError, r"..."), + # Returning wrong type + (lambda x: [], TypeError, floaterr), + # Undefined name inside the function + (lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa: F821, E501 + # Infinite value returned => Overflow error. + (lambda x: np.inf, UNURANError, r"..."), + # NaN value => internal error in UNU.RAN + (lambda x: np.nan, UNURANError, r"..."), + # signature of PDF wrong + (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given") +] + + +# same approach for dpdf +bad_dpdf_common = [ + # Infinite value returned. + (lambda x: np.inf, UNURANError, r"..."), + # NaN value => internal error in UNU.RAN + (lambda x: np.nan, UNURANError, r"..."), + # Returning wrong type + (lambda x: [], TypeError, floaterr), + # Undefined name inside the function + (lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa: F821, E501 + # signature of dPDF wrong + (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given") +] + + +# same approach for logpdf +bad_logpdfs_common = [ + # Returning wrong type + (lambda x: [], TypeError, floaterr), + # Undefined name inside the function + (lambda x: foo, NameError, r"name 'foo' is not defined"), # type: ignore[name-defined] # noqa: F821, E501 + # Infinite value returned => Overflow error. + (lambda x: np.inf, UNURANError, r"..."), + # NaN value => internal error in UNU.RAN + (lambda x: np.nan, UNURANError, r"..."), + # signature of logpdf wrong + (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given") +] + + +bad_pv_common = [ + ([], r"must contain at least one element"), + ([[1.0, 0.0]], r"wrong number of dimensions \(expected 1, got 2\)"), + ([0.2, 0.4, np.nan, 0.8], r"must contain only finite / non-nan values"), + ([0.2, 0.4, np.inf, 0.8], r"must contain only finite / non-nan values"), + ([0.0, 0.0], r"must contain at least one non-zero value"), +] + + +# size of the domains is incorrect +bad_sized_domains = [ + # > 2 elements in the domain + ((1, 2, 3), ValueError, r"must be a length 2 tuple"), + # empty domain + ((), ValueError, r"must be a length 2 tuple") +] + +# domain values are incorrect +bad_domains = [ + ((2, 1), UNURANError, r"left >= right"), + ((1, 1), UNURANError, r"left >= right"), +] + +# infinite and nan values present in domain. +inf_nan_domains = [ + # left >= right + ((10, 10), UNURANError, r"left >= right"), + ((np.inf, np.inf), UNURANError, r"left >= right"), + ((-np.inf, -np.inf), UNURANError, r"left >= right"), + ((np.inf, -np.inf), UNURANError, r"left >= right"), + # Also include nans in some of the domains. + ((-np.inf, np.nan), ValueError, r"only non-nan values"), + ((np.nan, np.inf), ValueError, r"only non-nan values") +] + +# `nan` values present in domain. Some distributions don't support +# infinite tails, so don't mix the nan values with infinities. +nan_domains = [ + ((0, np.nan), ValueError, r"only non-nan values"), + ((np.nan, np.nan), ValueError, r"only non-nan values") +] + + +# all the methods should throw errors for nan, bad sized, and bad valued +# domains. +@pytest.mark.parametrize("domain, err, msg", + bad_domains + bad_sized_domains + + nan_domains) # type: ignore[operator] +@pytest.mark.parametrize("method, kwargs", all_methods) +def test_bad_domain(domain, err, msg, method, kwargs): + Method = getattr(stats.sampling, method) + with pytest.raises(err, match=msg): + Method(**kwargs, domain=domain) + + +@pytest.mark.parametrize("method, kwargs", all_methods) +def test_random_state(method, kwargs): + Method = getattr(stats.sampling, method) + + # simple seed that works for any version of NumPy + seed = 123 + rng1 = Method(**kwargs, random_state=seed) + rng2 = Method(**kwargs, random_state=seed) + assert_equal(rng1.rvs(100), rng2.rvs(100)) + + # global seed + np.random.seed(123) + rng1 = Method(**kwargs) + rvs1 = rng1.rvs(100) + np.random.seed(None) + rng2 = Method(**kwargs, random_state=123) + rvs2 = rng2.rvs(100) + assert_equal(rvs1, rvs2) + + # Generator seed for new NumPy + # when a RandomState is given, it should take the bitgen_t + # member of the class and create a Generator instance. + seed1 = np.random.RandomState(np.random.MT19937(123)) + seed2 = np.random.Generator(np.random.MT19937(123)) + rng1 = Method(**kwargs, random_state=seed1) + rng2 = Method(**kwargs, random_state=seed2) + assert_equal(rng1.rvs(100), rng2.rvs(100)) + + +def test_set_random_state(): + rng1 = TransformedDensityRejection(StandardNormal(), random_state=123) + rng2 = TransformedDensityRejection(StandardNormal()) + rng2.set_random_state(123) + assert_equal(rng1.rvs(100), rng2.rvs(100)) + rng = TransformedDensityRejection(StandardNormal(), random_state=123) + rvs1 = rng.rvs(100) + rng.set_random_state(123) + rvs2 = rng.rvs(100) + assert_equal(rvs1, rvs2) + + +def test_threading_behaviour(): + # Test if the API is thread-safe. + # This verifies if the lock mechanism and the use of `PyErr_Occurred` + # is correct. + errors = {"err1": None, "err2": None} + + class Distribution: + def __init__(self, pdf_msg): + self.pdf_msg = pdf_msg + + def pdf(self, x): + if 49.9 < x < 50.0: + raise ValueError(self.pdf_msg) + return x + + def dpdf(self, x): + return 1 + + def func1(): + dist = Distribution('foo') + rng = TransformedDensityRejection(dist, domain=(10, 100), + random_state=12) + try: + rng.rvs(100000) + except ValueError as e: + errors['err1'] = e.args[0] + + def func2(): + dist = Distribution('bar') + rng = TransformedDensityRejection(dist, domain=(10, 100), + random_state=2) + try: + rng.rvs(100000) + except ValueError as e: + errors['err2'] = e.args[0] + + t1 = threading.Thread(target=func1) + t2 = threading.Thread(target=func2) + + t1.start() + t2.start() + + t1.join() + t2.join() + + assert errors['err1'] == 'foo' + assert errors['err2'] == 'bar' + + +@pytest.mark.parametrize("method, kwargs", all_methods) +def test_pickle(method, kwargs): + Method = getattr(stats.sampling, method) + rng1 = Method(**kwargs, random_state=123) + obj = pickle.dumps(rng1) + rng2 = pickle.loads(obj) + assert_equal(rng1.rvs(100), rng2.rvs(100)) + + +@pytest.mark.parametrize("size", [None, 0, (0, ), 1, (10, 3), (2, 3, 4, 5), + (0, 0), (0, 1)]) +def test_rvs_size(size): + # As the `rvs` method is present in the base class and shared between + # all the classes, we can just test with one of the methods. + rng = TransformedDensityRejection(StandardNormal()) + if size is None: + assert np.isscalar(rng.rvs(size)) + else: + if np.isscalar(size): + size = (size, ) + assert rng.rvs(size).shape == size + + +def test_with_scipy_distribution(): + # test if the setup works with SciPy's rv_frozen distributions + dist = stats.norm() + urng = np.random.default_rng(0) + rng = NumericalInverseHermite(dist, random_state=urng) + u = np.linspace(0, 1, num=100) + check_cont_samples(rng, dist, dist.stats()) + assert_allclose(dist.ppf(u), rng.ppf(u)) + # test if it works with `loc` and `scale` + dist = stats.norm(loc=10., scale=5.) + rng = NumericalInverseHermite(dist, random_state=urng) + check_cont_samples(rng, dist, dist.stats()) + assert_allclose(dist.ppf(u), rng.ppf(u)) + # check for discrete distributions + dist = stats.binom(10, 0.2) + rng = DiscreteAliasUrn(dist, random_state=urng) + domain = dist.support() + pv = dist.pmf(np.arange(domain[0], domain[1]+1)) + check_discr_samples(rng, pv, dist.stats()) + + +def check_cont_samples(rng, dist, mv_ex, rtol=1e-7, atol=1e-1): + rvs = rng.rvs(100000) + mv = rvs.mean(), rvs.var() + # test the moments only if the variance is finite + if np.isfinite(mv_ex[1]): + assert_allclose(mv, mv_ex, rtol=rtol, atol=atol) + # Cramer Von Mises test for goodness-of-fit + rvs = rng.rvs(500) + dist.cdf = np.vectorize(dist.cdf) + pval = cramervonmises(rvs, dist.cdf).pvalue + assert pval > 0.1 + + +def check_discr_samples(rng, pv, mv_ex, rtol=1e-3, atol=1e-1): + rvs = rng.rvs(100000) + # test if the first few moments match + mv = rvs.mean(), rvs.var() + assert_allclose(mv, mv_ex, rtol=rtol, atol=atol) + # normalize + pv = pv / pv.sum() + # chi-squared test for goodness-of-fit + obs_freqs = np.zeros_like(pv) + _, freqs = np.unique(rvs, return_counts=True) + freqs = freqs / freqs.sum() + obs_freqs[:freqs.size] = freqs + pval = chisquare(obs_freqs, pv).pvalue + assert pval > 0.1 + + +def test_warning_center_not_in_domain(): + # UNURAN will warn if the center provided or the one computed w/o the + # domain is outside of the domain + msg = "102 : center moved into domain of distribution" + with pytest.warns(RuntimeWarning, match=msg): + NumericalInversePolynomial(StandardNormal(), center=0, domain=(3, 5)) + with pytest.warns(RuntimeWarning, match=msg): + NumericalInversePolynomial(StandardNormal(), domain=(3, 5)) + + +@pytest.mark.parametrize('method', ["SimpleRatioUniforms", + "NumericalInversePolynomial", + "TransformedDensityRejection"]) +def test_error_mode_not_in_domain(method): + # UNURAN raises an error if the mode is not in the domain + # the behavior is different compared to the case that center is not in the + # domain. mode is supposed to be the exact value, center can be an + # approximate value + Method = getattr(stats.sampling, method) + msg = "17 : mode not in domain" + with pytest.raises(UNURANError, match=msg): + Method(StandardNormal(), mode=0, domain=(3, 5)) + + +@pytest.mark.parametrize('method', ["NumericalInverseHermite", + "NumericalInversePolynomial"]) +class TestQRVS: + def test_input_validation(self, method): + match = "`qmc_engine` must be an instance of..." + with pytest.raises(ValueError, match=match): + Method = getattr(stats.sampling, method) + gen = Method(StandardNormal()) + gen.qrvs(qmc_engine=0) + + # issues with QMCEngines and old NumPy + Method = getattr(stats.sampling, method) + gen = Method(StandardNormal()) + + match = "`d` must be consistent with dimension of `qmc_engine`." + with pytest.raises(ValueError, match=match): + gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2)) + + qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)] + # `size=None` should not add anything to the shape, `size=1` should + sizes = [(None, tuple()), (1, (1,)), (4, (4,)), + ((4,), (4,)), ((2, 4), (2, 4))] # type: ignore + # Neither `d=None` nor `d=1` should add anything to the shape + ds = [(None, tuple()), (1, tuple()), (3, (3,))] + + @pytest.mark.parametrize('qrng', qrngs) + @pytest.mark.parametrize('size_in, size_out', sizes) + @pytest.mark.parametrize('d_in, d_out', ds) + def test_QRVS_shape_consistency(self, qrng, size_in, size_out, + d_in, d_out, method): + w32 = sys.platform == "win32" and platform.architecture()[0] == "32bit" + if w32 and method == "NumericalInversePolynomial": + pytest.xfail("NumericalInversePolynomial.qrvs fails for Win " + "32-bit") + + dist = StandardNormal() + Method = getattr(stats.sampling, method) + gen = Method(dist) + + # If d and qrng.d are inconsistent, an error is raised + if d_in is not None and qrng is not None and qrng.d != d_in: + match = "`d` must be consistent with dimension of `qmc_engine`." + with pytest.raises(ValueError, match=match): + gen.qrvs(size_in, d=d_in, qmc_engine=qrng) + return + + # Sometimes d is really determined by qrng + if d_in is None and qrng is not None and qrng.d != 1: + d_out = (qrng.d,) + + shape_expected = size_out + d_out + + qrng2 = deepcopy(qrng) + qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng) + if size_in is not None: + assert qrvs.shape == shape_expected + + if qrng2 is not None: + uniform = qrng2.random(np.prod(size_in) or 1) + qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected) + assert_allclose(qrvs, qrvs2, atol=1e-12) + + def test_QRVS_size_tuple(self, method): + # QMCEngine samples are always of shape (n, d). When `size` is a tuple, + # we set `n = prod(size)` in the call to qmc_engine.random, transform + # the sample, and reshape it to the final dimensions. When we reshape, + # we need to be careful, because the _columns_ of the sample returned + # by a QMCEngine are "independent"-ish, but the elements within the + # columns are not. We need to make sure that this doesn't get mixed up + # by reshaping: qrvs[..., i] should remain "independent"-ish of + # qrvs[..., i+1], but the elements within qrvs[..., i] should be + # transformed from the same low-discrepancy sequence. + + dist = StandardNormal() + Method = getattr(stats.sampling, method) + gen = Method(dist) + + size = (3, 4) + d = 5 + qrng = stats.qmc.Halton(d, seed=0) + qrng2 = stats.qmc.Halton(d, seed=0) + + uniform = qrng2.random(np.prod(size)) + + qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng) + qrvs2 = stats.norm.ppf(uniform) + + for i in range(d): + sample = qrvs[..., i] + sample2 = qrvs2[:, i].reshape(size) + assert_allclose(sample, sample2, atol=1e-12) + + +class TestTransformedDensityRejection: + # Simple Custom Distribution + class dist0: + def pdf(self, x): + return 3/4 * (1-x*x) + + def dpdf(self, x): + return 3/4 * (-2*x) + + def cdf(self, x): + return 3/4 * (x - x**3/3 + 2/3) + + def support(self): + return -1, 1 + + # Standard Normal Distribution + class dist1: + def pdf(self, x): + return stats.norm._pdf(x / 0.1) + + def dpdf(self, x): + return -x / 0.01 * stats.norm._pdf(x / 0.1) + + def cdf(self, x): + return stats.norm._cdf(x / 0.1) + + # pdf with piecewise linear function as transformed density + # with T = -1/sqrt with shift. Taken from UNU.RAN test suite + # (from file t_tdr_ps.c) + class dist2: + def __init__(self, shift): + self.shift = shift + + def pdf(self, x): + x -= self.shift + y = 1. / (abs(x) + 1.) + return 0.5 * y * y + + def dpdf(self, x): + x -= self.shift + y = 1. / (abs(x) + 1.) + y = y * y * y + return y if (x < 0.) else -y + + def cdf(self, x): + x -= self.shift + if x <= 0.: + return 0.5 / (1. - x) + else: + return 1. - 0.5 / (1. + x) + + dists = [dist0(), dist1(), dist2(0.), dist2(10000.)] + + # exact mean and variance of the distributions in the list dists + mv0 = [0., 4./15.] + mv1 = [0., 0.01] + mv2 = [0., np.inf] + mv3 = [10000., np.inf] + mvs = [mv0, mv1, mv2, mv3] + + @pytest.mark.parametrize("dist, mv_ex", + zip(dists, mvs)) + def test_basic(self, dist, mv_ex): + with suppress_warnings() as sup: + # filter the warnings thrown by UNU.RAN + sup.filter(RuntimeWarning) + rng = TransformedDensityRejection(dist, random_state=42) + check_cont_samples(rng, dist, mv_ex) + + # PDF 0 everywhere => bad construction points + bad_pdfs = [(lambda x: 0, UNURANError, r"50 : bad construction points.")] + bad_pdfs += bad_pdfs_common # type: ignore[arg-type] + + @pytest.mark.parametrize("pdf, err, msg", bad_pdfs) + def test_bad_pdf(self, pdf, err, msg): + class dist: + pass + dist.pdf = pdf + dist.dpdf = lambda x: 1 # an arbitrary dPDF + with pytest.raises(err, match=msg): + TransformedDensityRejection(dist) + + @pytest.mark.parametrize("dpdf, err, msg", bad_dpdf_common) + def test_bad_dpdf(self, dpdf, err, msg): + class dist: + pass + dist.pdf = lambda x: x + dist.dpdf = dpdf + with pytest.raises(err, match=msg): + TransformedDensityRejection(dist, domain=(1, 10)) + + # test domains with inf + nan in them. need to write a custom test for + # this because not all methods support infinite tails. + @pytest.mark.parametrize("domain, err, msg", inf_nan_domains) + def test_inf_nan_domains(self, domain, err, msg): + with pytest.raises(err, match=msg): + TransformedDensityRejection(StandardNormal(), domain=domain) + + @pytest.mark.parametrize("construction_points", [-1, 0, 0.1]) + def test_bad_construction_points_scalar(self, construction_points): + with pytest.raises(ValueError, match=r"`construction_points` must be " + r"a positive integer."): + TransformedDensityRejection( + StandardNormal(), construction_points=construction_points + ) + + def test_bad_construction_points_array(self): + # empty array + construction_points = [] + with pytest.raises(ValueError, match=r"`construction_points` must " + r"either be a " + r"scalar or a non-empty array."): + TransformedDensityRejection( + StandardNormal(), construction_points=construction_points + ) + + # construction_points not monotonically increasing + construction_points = [1, 1, 1, 1, 1, 1] + with pytest.warns(RuntimeWarning, match=r"33 : starting points not " + r"strictly monotonically " + r"increasing"): + TransformedDensityRejection( + StandardNormal(), construction_points=construction_points + ) + + # construction_points containing nans + construction_points = [np.nan, np.nan, np.nan] + with pytest.raises(UNURANError, match=r"50 : bad construction " + r"points."): + TransformedDensityRejection( + StandardNormal(), construction_points=construction_points + ) + + # construction_points out of domain + construction_points = [-10, 10] + with pytest.warns(RuntimeWarning, match=r"50 : starting point out of " + r"domain"): + TransformedDensityRejection( + StandardNormal(), domain=(-3, 3), + construction_points=construction_points + ) + + @pytest.mark.parametrize("c", [-1., np.nan, np.inf, 0.1, 1.]) + def test_bad_c(self, c): + msg = r"`c` must either be -0.5 or 0." + with pytest.raises(ValueError, match=msg): + TransformedDensityRejection(StandardNormal(), c=-1.) + + u = [np.linspace(0, 1, num=1000), [], [[]], [np.nan], + [-np.inf, np.nan, np.inf], 0, + [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]] + + @pytest.mark.parametrize("u", u) + def test_ppf_hat(self, u): + # Increase the `max_squeeze_hat_ratio` so the ppf_hat is more + # accurate. + rng = TransformedDensityRejection(StandardNormal(), + max_squeeze_hat_ratio=0.9999) + # Older versions of NumPy throw RuntimeWarnings for comparisons + # with nan. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in greater") + sup.filter(RuntimeWarning, "invalid value encountered in " + "greater_equal") + sup.filter(RuntimeWarning, "invalid value encountered in less") + sup.filter(RuntimeWarning, "invalid value encountered in " + "less_equal") + res = rng.ppf_hat(u) + expected = stats.norm.ppf(u) + assert_allclose(res, expected, rtol=1e-3, atol=1e-5) + assert res.shape == expected.shape + + def test_bad_dist(self): + # Empty distribution + class dist: + ... + + msg = r"`pdf` required but not found." + with pytest.raises(ValueError, match=msg): + TransformedDensityRejection(dist) + + # dPDF not present in dist + class dist: + pdf = lambda x: 1-x*x # noqa: E731 + + msg = r"`dpdf` required but not found." + with pytest.raises(ValueError, match=msg): + TransformedDensityRejection(dist) + + +class TestDiscreteAliasUrn: + # DAU fails on these probably because of large domains and small + # computation errors in PMF. Mean/SD match but chi-squared test fails. + basic_fail_dists = { + 'nchypergeom_fisher', # numerical errors on tails + 'nchypergeom_wallenius', # numerical errors on tails + 'randint' # fails on 32-bit ubuntu + } + + @pytest.mark.parametrize("distname, params", distdiscrete) + def test_basic(self, distname, params): + if distname in self.basic_fail_dists: + msg = ("DAU fails on these probably because of large domains " + "and small computation errors in PMF.") + pytest.skip(msg) + if not isinstance(distname, str): + dist = distname + else: + dist = getattr(stats, distname) + dist = dist(*params) + domain = dist.support() + if not np.isfinite(domain[1] - domain[0]): + # DAU only works with finite domain. So, skip the distributions + # with infinite tails. + pytest.skip("DAU only works with a finite domain.") + k = np.arange(domain[0], domain[1]+1) + pv = dist.pmf(k) + mv_ex = dist.stats('mv') + rng = DiscreteAliasUrn(dist, random_state=42) + check_discr_samples(rng, pv, mv_ex) + + # Can't use bad_pmf_common here as we evaluate PMF early on to avoid + # unhelpful errors from UNU.RAN. + bad_pmf = [ + # inf returned + (lambda x: np.inf, ValueError, + r"must contain only finite / non-nan values"), + # nan returned + (lambda x: np.nan, ValueError, + r"must contain only finite / non-nan values"), + # all zeros + (lambda x: 0.0, ValueError, + r"must contain at least one non-zero value"), + # Undefined name inside the function + (lambda x: foo, NameError, # type: ignore[name-defined] # noqa: F821 + r"name 'foo' is not defined"), + # Returning wrong type. + (lambda x: [], ValueError, + r"setting an array element with a sequence."), + # probabilities < 0 + (lambda x: -x, UNURANError, + r"50 : probability < 0"), + # signature of PMF wrong + (lambda: 1.0, TypeError, + r"takes 0 positional arguments but 1 was given") + ] + + @pytest.mark.parametrize("pmf, err, msg", bad_pmf) + def test_bad_pmf(self, pmf, err, msg): + class dist: + pass + dist.pmf = pmf + with pytest.raises(err, match=msg): + DiscreteAliasUrn(dist, domain=(1, 10)) + + @pytest.mark.parametrize("pv", [[0.18, 0.02, 0.8], + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]) + def test_sampling_with_pv(self, pv): + pv = np.asarray(pv, dtype=np.float64) + rng = DiscreteAliasUrn(pv, random_state=123) + rng.rvs(100_000) + pv = pv / pv.sum() + variates = np.arange(0, len(pv)) + # test if the first few moments match + m_expected = np.average(variates, weights=pv) + v_expected = np.average((variates - m_expected) ** 2, weights=pv) + mv_expected = m_expected, v_expected + check_discr_samples(rng, pv, mv_expected) + + @pytest.mark.parametrize("pv, msg", bad_pv_common) + def test_bad_pv(self, pv, msg): + with pytest.raises(ValueError, match=msg): + DiscreteAliasUrn(pv) + + # DAU doesn't support infinite tails. So, it should throw an error when + # inf is present in the domain. + inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf), + (0, np.inf), (-np.inf, 0)] + + @pytest.mark.parametrize("domain", inf_domain) + def test_inf_domain(self, domain): + with pytest.raises(ValueError, match=r"must be finite"): + DiscreteAliasUrn(stats.binom(10, 0.2), domain=domain) + + def test_bad_urn_factor(self): + with pytest.warns(RuntimeWarning, match=r"relative urn size < 1."): + DiscreteAliasUrn([0.5, 0.5], urn_factor=-1) + + def test_bad_args(self): + msg = (r"`domain` must be provided when the " + r"probability vector is not available.") + + class dist: + def pmf(self, x): + return x + + with pytest.raises(ValueError, match=msg): + DiscreteAliasUrn(dist) + + def test_gh19359(self): + pv = special.softmax(np.ones((1533,))) + rng = DiscreteAliasUrn(pv, random_state=42) + # check the correctness + check_discr_samples(rng, pv, (1532 / 2, (1532**2 - 1) / 12), + rtol=5e-3) + + +class TestNumericalInversePolynomial: + # Simple Custom Distribution + class dist0: + def pdf(self, x): + return 3/4 * (1-x*x) + + def cdf(self, x): + return 3/4 * (x - x**3/3 + 2/3) + + def support(self): + return -1, 1 + + # Standard Normal Distribution + class dist1: + def pdf(self, x): + return stats.norm._pdf(x / 0.1) + + def cdf(self, x): + return stats.norm._cdf(x / 0.1) + + # Sin 2 distribution + # / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 1 + # f(x) = < + # \ 0 otherwise + # Taken from UNU.RAN test suite (from file t_pinv.c) + class dist2: + def pdf(self, x): + return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x)) + + def cdf(self, x): + return (0.05*(x + 1) + + 0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) / + (4.*np.pi)) + + def support(self): + return -1, 1 + + # Sin 10 distribution + # / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5 + # f(x) = < + # \ 0 otherwise + # Taken from UNU.RAN test suite (from file t_pinv.c) + class dist3: + def pdf(self, x): + return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x))) + + def cdf(self, x): + return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) - + np.cos(2*np.pi*x)) + + def support(self): + return -5, 5 + + dists = [dist0(), dist1(), dist2(), dist3()] + + # exact mean and variance of the distributions in the list dists + mv0 = [0., 4./15.] + mv1 = [0., 0.01] + mv2 = [-0.45/np.pi, 2/3*0.5 - 0.45**2/np.pi**2] + mv3 = [-0.45/np.pi, 0.2 * 250/3 * 0.5 - 0.45**2/np.pi**2] + mvs = [mv0, mv1, mv2, mv3] + + @pytest.mark.parametrize("dist, mv_ex", + zip(dists, mvs)) + def test_basic(self, dist, mv_ex): + rng = NumericalInversePolynomial(dist, random_state=42) + check_cont_samples(rng, dist, mv_ex) + + @pytest.mark.xslow + @pytest.mark.parametrize("distname, params", distcont) + def test_basic_all_scipy_dists(self, distname, params): + + very_slow_dists = ['anglit', 'gausshyper', 'kappa4', + 'ksone', 'kstwo', 'levy_l', + 'levy_stable', 'studentized_range', + 'trapezoid', 'triang', 'vonmises'] + # for these distributions, some assertions fail due to minor + # numerical differences. They can be avoided either by changing + # the seed or by increasing the u_resolution. + fail_dists = ['chi2', 'fatiguelife', 'gibrat', + 'halfgennorm', 'lognorm', 'ncf', + 'ncx2', 'pareto', 't'] + # for these distributions, skip the check for agreement between sample + # moments and true moments. We cannot expect them to pass due to the + # high variance of sample moments. + skip_sample_moment_check = ['rel_breitwigner'] + + if distname in very_slow_dists: + pytest.skip(f"PINV too slow for {distname}") + if distname in fail_dists: + pytest.skip(f"PINV fails for {distname}") + dist = (getattr(stats, distname) + if isinstance(distname, str) + else distname) + dist = dist(*params) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + rng = NumericalInversePolynomial(dist, random_state=42) + if distname in skip_sample_moment_check: + return + check_cont_samples(rng, dist, [dist.mean(), dist.var()]) + + @pytest.mark.parametrize("pdf, err, msg", bad_pdfs_common) + def test_bad_pdf(self, pdf, err, msg): + class dist: + pass + dist.pdf = pdf + with pytest.raises(err, match=msg): + NumericalInversePolynomial(dist, domain=[0, 5]) + + @pytest.mark.parametrize("logpdf, err, msg", bad_logpdfs_common) + def test_bad_logpdf(self, logpdf, err, msg): + class dist: + pass + dist.logpdf = logpdf + with pytest.raises(err, match=msg): + NumericalInversePolynomial(dist, domain=[0, 5]) + + # test domains with inf + nan in them. need to write a custom test for + # this because not all methods support infinite tails. + @pytest.mark.parametrize("domain, err, msg", inf_nan_domains) + def test_inf_nan_domains(self, domain, err, msg): + with pytest.raises(err, match=msg): + NumericalInversePolynomial(StandardNormal(), domain=domain) + + u = [ + # test if quantile 0 and 1 return -inf and inf respectively and check + # the correctness of the PPF for equidistant points between 0 and 1. + np.linspace(0, 1, num=10000), + # test the PPF method for empty arrays + [], [[]], + # test if nans and infs return nan result. + [np.nan], [-np.inf, np.nan, np.inf], + # test if a scalar is returned for a scalar input. + 0, + # test for arrays with nans, values greater than 1 and less than 0, + # and some valid values. + [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]] + ] + + @pytest.mark.parametrize("u", u) + def test_ppf(self, u): + dist = StandardNormal() + rng = NumericalInversePolynomial(dist, u_resolution=1e-14) + # Older versions of NumPy throw RuntimeWarnings for comparisons + # with nan. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in greater") + sup.filter(RuntimeWarning, "invalid value encountered in " + "greater_equal") + sup.filter(RuntimeWarning, "invalid value encountered in less") + sup.filter(RuntimeWarning, "invalid value encountered in " + "less_equal") + res = rng.ppf(u) + expected = stats.norm.ppf(u) + assert_allclose(res, expected, rtol=1e-11, atol=1e-11) + assert res.shape == expected.shape + + x = [np.linspace(-10, 10, num=10000), [], [[]], [np.nan], + [-np.inf, np.nan, np.inf], 0, + [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-np.inf, 3, 4]]] + + @pytest.mark.parametrize("x", x) + def test_cdf(self, x): + dist = StandardNormal() + rng = NumericalInversePolynomial(dist, u_resolution=1e-14) + # Older versions of NumPy throw RuntimeWarnings for comparisons + # with nan. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in greater") + sup.filter(RuntimeWarning, "invalid value encountered in " + "greater_equal") + sup.filter(RuntimeWarning, "invalid value encountered in less") + sup.filter(RuntimeWarning, "invalid value encountered in " + "less_equal") + res = rng.cdf(x) + expected = stats.norm.cdf(x) + assert_allclose(res, expected, rtol=1e-11, atol=1e-11) + assert res.shape == expected.shape + + def test_u_error(self): + dist = StandardNormal() + rng = NumericalInversePolynomial(dist, u_resolution=1e-10) + max_error, mae = rng.u_error() + assert max_error < 1e-10 + assert mae <= max_error + rng = NumericalInversePolynomial(dist, u_resolution=1e-14) + max_error, mae = rng.u_error() + assert max_error < 1e-14 + assert mae <= max_error + + bad_orders = [1, 4.5, 20, np.inf, np.nan] + bad_u_resolution = [1e-20, 1e-1, np.inf, np.nan] + + @pytest.mark.parametrize("order", bad_orders) + def test_bad_orders(self, order): + dist = StandardNormal() + + msg = r"`order` must be an integer in the range \[3, 17\]." + with pytest.raises(ValueError, match=msg): + NumericalInversePolynomial(dist, order=order) + + @pytest.mark.parametrize("u_resolution", bad_u_resolution) + def test_bad_u_resolution(self, u_resolution): + msg = r"`u_resolution` must be between 1e-15 and 1e-5." + with pytest.raises(ValueError, match=msg): + NumericalInversePolynomial(StandardNormal(), + u_resolution=u_resolution) + + def test_bad_args(self): + + class BadDist: + def cdf(self, x): + return stats.norm._cdf(x) + + dist = BadDist() + msg = r"Either of the methods `pdf` or `logpdf` must be specified" + with pytest.raises(ValueError, match=msg): + rng = NumericalInversePolynomial(dist) + + dist = StandardNormal() + rng = NumericalInversePolynomial(dist) + msg = r"`sample_size` must be greater than or equal to 1000." + with pytest.raises(ValueError, match=msg): + rng.u_error(10) + + class Distribution: + def pdf(self, x): + return np.exp(-0.5 * x*x) + + dist = Distribution() + rng = NumericalInversePolynomial(dist) + msg = r"Exact CDF required but not found." + with pytest.raises(ValueError, match=msg): + rng.u_error() + + def test_logpdf_pdf_consistency(self): + # 1. check that PINV works with pdf and logpdf only + # 2. check that generated ppf is the same (up to a small tolerance) + + class MyDist: + pass + + # create generator from dist with only pdf + dist_pdf = MyDist() + dist_pdf.pdf = lambda x: math.exp(-x*x/2) + rng1 = NumericalInversePolynomial(dist_pdf) + + # create dist with only logpdf + dist_logpdf = MyDist() + dist_logpdf.logpdf = lambda x: -x*x/2 + rng2 = NumericalInversePolynomial(dist_logpdf) + + q = np.linspace(1e-5, 1-1e-5, num=100) + assert_allclose(rng1.ppf(q), rng2.ppf(q)) + + +class TestNumericalInverseHermite: + # / (1 +sin(2 Pi x))/2 if |x| <= 1 + # f(x) = < + # \ 0 otherwise + # Taken from UNU.RAN test suite (from file t_hinv.c) + class dist0: + def pdf(self, x): + return 0.5*(1. + np.sin(2.*np.pi*x)) + + def dpdf(self, x): + return np.pi*np.cos(2.*np.pi*x) + + def cdf(self, x): + return (1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) / (4.*np.pi) + + def support(self): + return -1, 1 + + # / Max(sin(2 Pi x)),0)Pi/2 if -1 < x <0.5 + # f(x) = < + # \ 0 otherwise + # Taken from UNU.RAN test suite (from file t_hinv.c) + class dist1: + def pdf(self, x): + if (x <= -0.5): + return np.sin((2. * np.pi) * x) * 0.5 * np.pi + if (x < 0.): + return 0. + if (x <= 0.5): + return np.sin((2. * np.pi) * x) * 0.5 * np.pi + + def dpdf(self, x): + if (x <= -0.5): + return np.cos((2. * np.pi) * x) * np.pi * np.pi + if (x < 0.): + return 0. + if (x <= 0.5): + return np.cos((2. * np.pi) * x) * np.pi * np.pi + + def cdf(self, x): + if (x <= -0.5): + return 0.25 * (1 - np.cos((2. * np.pi) * x)) + if (x < 0.): + return 0.5 + if (x <= 0.5): + return 0.75 - 0.25 * np.cos((2. * np.pi) * x) + + def support(self): + return -1, 0.5 + + dists = [dist0(), dist1()] + + # exact mean and variance of the distributions in the list dists + mv0 = [-1/(2*np.pi), 1/3 - 1/(4*np.pi*np.pi)] + mv1 = [-1/4, 3/8-1/(2*np.pi*np.pi) - 1/16] + mvs = [mv0, mv1] + + @pytest.mark.parametrize("dist, mv_ex", + zip(dists, mvs)) + @pytest.mark.parametrize("order", [3, 5]) + def test_basic(self, dist, mv_ex, order): + rng = NumericalInverseHermite(dist, order=order, random_state=42) + check_cont_samples(rng, dist, mv_ex) + + # test domains with inf + nan in them. need to write a custom test for + # this because not all methods support infinite tails. + @pytest.mark.parametrize("domain, err, msg", inf_nan_domains) + def test_inf_nan_domains(self, domain, err, msg): + with pytest.raises(err, match=msg): + NumericalInverseHermite(StandardNormal(), domain=domain) + + def basic_test_all_scipy_dists(self, distname, shapes): + slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'} + fail_dists = {'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct', + 'norminvgauss', 'genhyperbolic', 'studentized_range', + 'vonmises', 'kappa4', 'invgauss', 'wald'} + + if distname in slow_dists: + pytest.skip("Distribution is too slow") + if distname in fail_dists: + # specific reasons documented in gh-13319 + # https://github.com/scipy/scipy/pull/13319#discussion_r626188955 + pytest.xfail("Fails - usually due to inaccurate CDF/PDF") + + np.random.seed(0) + + dist = getattr(stats, distname)(*shapes) + fni = NumericalInverseHermite(dist) + + x = np.random.rand(10) + p_tol = np.max(np.abs(dist.ppf(x)-fni.ppf(x))/np.abs(dist.ppf(x))) + u_tol = np.max(np.abs(dist.cdf(fni.ppf(x)) - x)) + + assert p_tol < 1e-8 + assert u_tol < 1e-12 + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + @pytest.mark.xslow + @pytest.mark.parametrize(("distname", "shapes"), distcont) + def test_basic_all_scipy_dists(self, distname, shapes): + # if distname == "truncnorm": + # pytest.skip("Tested separately") + self.basic_test_all_scipy_dists(distname, shapes) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_basic_truncnorm_gh17155(self): + self.basic_test_all_scipy_dists("truncnorm", (0.1, 2)) + + def test_input_validation(self): + match = r"`order` must be either 1, 3, or 5." + with pytest.raises(ValueError, match=match): + NumericalInverseHermite(StandardNormal(), order=2) + + match = "`cdf` required but not found" + with pytest.raises(ValueError, match=match): + NumericalInverseHermite("norm") + + match = "could not convert string to float" + with pytest.raises(ValueError, match=match): + NumericalInverseHermite(StandardNormal(), + u_resolution='ekki') + + rngs = [None, 0, np.random.RandomState(0)] + rngs.append(np.random.default_rng(0)) # type: ignore + sizes = [(None, tuple()), (8, (8,)), ((4, 5, 6), (4, 5, 6))] + + @pytest.mark.parametrize('rng', rngs) + @pytest.mark.parametrize('size_in, size_out', sizes) + def test_RVS(self, rng, size_in, size_out): + dist = StandardNormal() + fni = NumericalInverseHermite(dist) + + rng2 = deepcopy(rng) + rvs = fni.rvs(size=size_in, random_state=rng) + if size_in is not None: + assert rvs.shape == size_out + + if rng2 is not None: + rng2 = check_random_state(rng2) + uniform = rng2.uniform(size=size_in) + rvs2 = stats.norm.ppf(uniform) + assert_allclose(rvs, rvs2) + + def test_inaccurate_CDF(self): + # CDF function with inaccurate tail cannot be inverted; see gh-13319 + # https://github.com/scipy/scipy/pull/13319#discussion_r626188955 + shapes = (2.3098496451481823, 0.6268795430096368) + match = ("98 : one or more intervals very short; possibly due to " + "numerical problems with a pole or very flat tail") + + # fails with default tol + with pytest.warns(RuntimeWarning, match=match): + NumericalInverseHermite(stats.beta(*shapes)) + + # no error with coarser tol + NumericalInverseHermite(stats.beta(*shapes), u_resolution=1e-8) + + def test_custom_distribution(self): + dist1 = StandardNormal() + fni1 = NumericalInverseHermite(dist1) + + dist2 = stats.norm() + fni2 = NumericalInverseHermite(dist2) + + assert_allclose(fni1.rvs(random_state=0), fni2.rvs(random_state=0)) + + u = [ + # check the correctness of the PPF for equidistant points between + # 0.02 and 0.98. + np.linspace(0., 1., num=10000), + # test the PPF method for empty arrays + [], [[]], + # test if nans and infs return nan result. + [np.nan], [-np.inf, np.nan, np.inf], + # test if a scalar is returned for a scalar input. + 0, + # test for arrays with nans, values greater than 1 and less than 0, + # and some valid values. + [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]] + ] + + @pytest.mark.parametrize("u", u) + def test_ppf(self, u): + dist = StandardNormal() + rng = NumericalInverseHermite(dist, u_resolution=1e-12) + # Older versions of NumPy throw RuntimeWarnings for comparisons + # with nan. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in greater") + sup.filter(RuntimeWarning, "invalid value encountered in " + "greater_equal") + sup.filter(RuntimeWarning, "invalid value encountered in less") + sup.filter(RuntimeWarning, "invalid value encountered in " + "less_equal") + res = rng.ppf(u) + expected = stats.norm.ppf(u) + assert_allclose(res, expected, rtol=1e-9, atol=3e-10) + assert res.shape == expected.shape + + def test_u_error(self): + dist = StandardNormal() + rng = NumericalInverseHermite(dist, u_resolution=1e-10) + max_error, mae = rng.u_error() + assert max_error < 1e-10 + assert mae <= max_error + with suppress_warnings() as sup: + # ignore warning about u-resolution being too small. + sup.filter(RuntimeWarning) + rng = NumericalInverseHermite(dist, u_resolution=1e-14) + max_error, mae = rng.u_error() + assert max_error < 1e-14 + assert mae <= max_error + + +class TestDiscreteGuideTable: + basic_fail_dists = { + 'nchypergeom_fisher', # numerical errors on tails + 'nchypergeom_wallenius', # numerical errors on tails + 'randint' # fails on 32-bit ubuntu + } + + def test_guide_factor_gt3_raises_warning(self): + pv = [0.1, 0.3, 0.6] + urng = np.random.default_rng() + with pytest.warns(RuntimeWarning): + DiscreteGuideTable(pv, random_state=urng, guide_factor=7) + + def test_guide_factor_zero_raises_warning(self): + pv = [0.1, 0.3, 0.6] + urng = np.random.default_rng() + with pytest.warns(RuntimeWarning): + DiscreteGuideTable(pv, random_state=urng, guide_factor=0) + + def test_negative_guide_factor_raises_warning(self): + # This occurs from the UNU.RAN wrapper automatically. + # however it already gives a useful warning + # Here we just test that a warning is raised. + pv = [0.1, 0.3, 0.6] + urng = np.random.default_rng() + with pytest.warns(RuntimeWarning): + DiscreteGuideTable(pv, random_state=urng, guide_factor=-1) + + @pytest.mark.parametrize("distname, params", distdiscrete) + def test_basic(self, distname, params): + if distname in self.basic_fail_dists: + msg = ("DGT fails on these probably because of large domains " + "and small computation errors in PMF.") + pytest.skip(msg) + + if not isinstance(distname, str): + dist = distname + else: + dist = getattr(stats, distname) + + dist = dist(*params) + domain = dist.support() + + if not np.isfinite(domain[1] - domain[0]): + # DGT only works with finite domain. So, skip the distributions + # with infinite tails. + pytest.skip("DGT only works with a finite domain.") + + k = np.arange(domain[0], domain[1]+1) + pv = dist.pmf(k) + mv_ex = dist.stats('mv') + rng = DiscreteGuideTable(dist, random_state=42) + check_discr_samples(rng, pv, mv_ex) + + u = [ + # the correctness of the PPF for equidistant points between 0 and 1. + np.linspace(0, 1, num=10000), + # test the PPF method for empty arrays + [], [[]], + # test if nans and infs return nan result. + [np.nan], [-np.inf, np.nan, np.inf], + # test if a scalar is returned for a scalar input. + 0, + # test for arrays with nans, values greater than 1 and less than 0, + # and some valid values. + [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]] + ] + + @pytest.mark.parametrize('u', u) + def test_ppf(self, u): + n, p = 4, 0.1 + dist = stats.binom(n, p) + rng = DiscreteGuideTable(dist, random_state=42) + + # Older versions of NumPy throw RuntimeWarnings for comparisons + # with nan. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in greater") + sup.filter(RuntimeWarning, "invalid value encountered in " + "greater_equal") + sup.filter(RuntimeWarning, "invalid value encountered in less") + sup.filter(RuntimeWarning, "invalid value encountered in " + "less_equal") + + res = rng.ppf(u) + expected = stats.binom.ppf(u, n, p) + assert_equal(res.shape, expected.shape) + assert_equal(res, expected) + + @pytest.mark.parametrize("pv, msg", bad_pv_common) + def test_bad_pv(self, pv, msg): + with pytest.raises(ValueError, match=msg): + DiscreteGuideTable(pv) + + # DGT doesn't support infinite tails. So, it should throw an error when + # inf is present in the domain. + inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf), + (0, np.inf), (-np.inf, 0)] + + @pytest.mark.parametrize("domain", inf_domain) + def test_inf_domain(self, domain): + with pytest.raises(ValueError, match=r"must be finite"): + DiscreteGuideTable(stats.binom(10, 0.2), domain=domain) + + +class TestSimpleRatioUniforms: + # pdf with piecewise linear function as transformed density + # with T = -1/sqrt with shift. Taken from UNU.RAN test suite + # (from file t_srou.c) + class dist: + def __init__(self, shift): + self.shift = shift + self.mode = shift + + def pdf(self, x): + x -= self.shift + y = 1. / (abs(x) + 1.) + return 0.5 * y * y + + def cdf(self, x): + x -= self.shift + if x <= 0.: + return 0.5 / (1. - x) + else: + return 1. - 0.5 / (1. + x) + + dists = [dist(0.), dist(10000.)] + + # exact mean and variance of the distributions in the list dists + mv1 = [0., np.inf] + mv2 = [10000., np.inf] + mvs = [mv1, mv2] + + @pytest.mark.parametrize("dist, mv_ex", + zip(dists, mvs)) + def test_basic(self, dist, mv_ex): + rng = SimpleRatioUniforms(dist, mode=dist.mode, random_state=42) + check_cont_samples(rng, dist, mv_ex) + rng = SimpleRatioUniforms(dist, mode=dist.mode, + cdf_at_mode=dist.cdf(dist.mode), + random_state=42) + check_cont_samples(rng, dist, mv_ex) + + # test domains with inf + nan in them. need to write a custom test for + # this because not all methods support infinite tails. + @pytest.mark.parametrize("domain, err, msg", inf_nan_domains) + def test_inf_nan_domains(self, domain, err, msg): + with pytest.raises(err, match=msg): + SimpleRatioUniforms(StandardNormal(), domain=domain) + + def test_bad_args(self): + # pdf_area < 0 + with pytest.raises(ValueError, match=r"`pdf_area` must be > 0"): + SimpleRatioUniforms(StandardNormal(), mode=0, pdf_area=-1) + + +class TestRatioUniforms: + """ Tests for rvs_ratio_uniforms. + """ + + def test_rv_generation(self): + # use KS test to check distribution of rvs + # normal distribution + f = stats.norm.pdf + v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + u = np.sqrt(f(0)) + gen = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12345) + assert_equal(stats.kstest(gen.rvs(2500), 'norm')[1] > 0.25, True) + + # exponential distribution + gen = RatioUniforms(lambda x: np.exp(-x), umax=1, + vmin=0, vmax=2*np.exp(-1), random_state=12345) + assert_equal(stats.kstest(gen.rvs(1000), 'expon')[1] > 0.25, True) + + def test_shape(self): + # test shape of return value depending on size parameter + f = stats.norm.pdf + v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + u = np.sqrt(f(0)) + + gen1 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234) + gen2 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234) + gen3 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234) + r1, r2, r3 = gen1.rvs(3), gen2.rvs((3,)), gen3.rvs((3, 1)) + assert_equal(r1, r2) + assert_equal(r2, r3.flatten()) + assert_equal(r1.shape, (3,)) + assert_equal(r3.shape, (3, 1)) + + gen4 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12) + gen5 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=12) + r4, r5 = gen4.rvs(size=(3, 3, 3)), gen5.rvs(size=27) + assert_equal(r4.flatten(), r5) + assert_equal(r4.shape, (3, 3, 3)) + + gen6 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234) + gen7 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234) + gen8 = RatioUniforms(f, umax=u, vmin=-v, vmax=v, random_state=1234) + r6, r7, r8 = gen6.rvs(), gen7.rvs(1), gen8.rvs((1,)) + assert_equal(r6, r7) + assert_equal(r7, r8) + + def test_random_state(self): + f = stats.norm.pdf + v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + umax = np.sqrt(f(0)) + gen1 = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=1234) + r1 = gen1.rvs(10) + np.random.seed(1234) + gen2 = RatioUniforms(f, umax=umax, vmin=-v, vmax=v) + r2 = gen2.rvs(10) + assert_equal(r1, r2) + + def test_exceptions(self): + f = stats.norm.pdf + # need vmin < vmax + with assert_raises(ValueError, match="vmin must be smaller than vmax"): + RatioUniforms(pdf=f, umax=1, vmin=3, vmax=1) + with assert_raises(ValueError, match="vmin must be smaller than vmax"): + RatioUniforms(pdf=f, umax=1, vmin=1, vmax=1) + # need umax > 0 + with assert_raises(ValueError, match="umax must be positive"): + RatioUniforms(pdf=f, umax=-1, vmin=1, vmax=3) + with assert_raises(ValueError, match="umax must be positive"): + RatioUniforms(pdf=f, umax=0, vmin=1, vmax=3) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_sensitivity_analysis.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_sensitivity_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..ac05c8c9a3f5d84016caf902f028f5cbdb76b889 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_sensitivity_analysis.py @@ -0,0 +1,300 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_array_less +import pytest + +from scipy import stats +from scipy.stats import sobol_indices +from scipy.stats._resampling import BootstrapResult +from scipy.stats._sensitivity_analysis import ( + BootstrapSobolResult, f_ishigami, sample_AB, sample_A_B +) + + +@pytest.fixture(scope='session') +def ishigami_ref_indices(): + """Reference values for Ishigami from Saltelli2007. + + Chapter 4, exercise 5 pages 179-182. + """ + a = 7. + b = 0.1 + + var = 0.5 + a**2/8 + b*np.pi**4/5 + b**2*np.pi**8/18 + v1 = 0.5 + b*np.pi**4/5 + b**2*np.pi**8/50 + v2 = a**2/8 + v3 = 0 + v12 = 0 + # v13: mistake in the book, see other derivations e.g. in 10.1002/nme.4856 + v13 = b**2*np.pi**8*8/225 + v23 = 0 + + s_first = np.array([v1, v2, v3])/var + s_second = np.array([ + [0., 0., v13], + [v12, 0., v23], + [v13, v23, 0.] + ])/var + s_total = s_first + s_second.sum(axis=1) + + return s_first, s_total + + +def f_ishigami_vec(x): + """Output of shape (2, n).""" + res = f_ishigami(x) + return res, res + + +class TestSobolIndices: + + dists = [ + stats.uniform(loc=-np.pi, scale=2*np.pi) # type: ignore[attr-defined] + ] * 3 + + def test_sample_AB(self): + # (d, n) + A = np.array( + [[1, 4, 7, 10], + [2, 5, 8, 11], + [3, 6, 9, 12]] + ) + B = A + 100 + # (d, d, n) + ref = np.array( + [[[101, 104, 107, 110], + [2, 5, 8, 11], + [3, 6, 9, 12]], + [[1, 4, 7, 10], + [102, 105, 108, 111], + [3, 6, 9, 12]], + [[1, 4, 7, 10], + [2, 5, 8, 11], + [103, 106, 109, 112]]] + ) + AB = sample_AB(A=A, B=B) + assert_allclose(AB, ref) + + @pytest.mark.xfail_on_32bit("Can't create large array for test") + @pytest.mark.parametrize( + 'func', + [f_ishigami, pytest.param(f_ishigami_vec, marks=pytest.mark.slow)], + ids=['scalar', 'vector'] + ) + def test_ishigami(self, ishigami_ref_indices, func): + rng = np.random.default_rng(28631265345463262246170309650372465332) + res = sobol_indices( + func=func, n=4096, + dists=self.dists, + random_state=rng + ) + + if func.__name__ == 'f_ishigami_vec': + ishigami_ref_indices = [ + [ishigami_ref_indices[0], ishigami_ref_indices[0]], + [ishigami_ref_indices[1], ishigami_ref_indices[1]] + ] + + assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2) + assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2) + + assert res._bootstrap_result is None + bootstrap_res = res.bootstrap(n_resamples=99) + assert isinstance(bootstrap_res, BootstrapSobolResult) + assert isinstance(res._bootstrap_result, BootstrapResult) + + assert res._bootstrap_result.confidence_interval.low.shape[0] == 2 + assert res._bootstrap_result.confidence_interval.low[1].shape \ + == res.first_order.shape + + assert bootstrap_res.first_order.confidence_interval.low.shape \ + == res.first_order.shape + assert bootstrap_res.total_order.confidence_interval.low.shape \ + == res.total_order.shape + + assert_array_less( + bootstrap_res.first_order.confidence_interval.low, res.first_order + ) + assert_array_less( + res.first_order, bootstrap_res.first_order.confidence_interval.high + ) + assert_array_less( + bootstrap_res.total_order.confidence_interval.low, res.total_order + ) + assert_array_less( + res.total_order, bootstrap_res.total_order.confidence_interval.high + ) + + # call again to use previous results and change a param + assert isinstance( + res.bootstrap(confidence_level=0.9, n_resamples=99), + BootstrapSobolResult + ) + assert isinstance(res._bootstrap_result, BootstrapResult) + + def test_func_dict(self, ishigami_ref_indices): + rng = np.random.default_rng(28631265345463262246170309650372465332) + n = 4096 + dists = [ + stats.uniform(loc=-np.pi, scale=2*np.pi), + stats.uniform(loc=-np.pi, scale=2*np.pi), + stats.uniform(loc=-np.pi, scale=2*np.pi) + ] + + A, B = sample_A_B(n=n, dists=dists, random_state=rng) + AB = sample_AB(A=A, B=B) + + func = { + 'f_A': f_ishigami(A).reshape(1, -1), + 'f_B': f_ishigami(B).reshape(1, -1), + 'f_AB': f_ishigami(AB).reshape((3, 1, -1)) + } + + res = sobol_indices( + func=func, n=n, + dists=dists, + random_state=rng + ) + assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2) + + res = sobol_indices( + func=func, n=n, + random_state=rng + ) + assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2) + + def test_method(self, ishigami_ref_indices): + def jansen_sobol(f_A, f_B, f_AB): + """Jansen for S and Sobol' for St. + + From Saltelli2010, table 2 formulations (c) and (e).""" + var = np.var([f_A, f_B], axis=(0, -1)) + + s = (var - 0.5*np.mean((f_B - f_AB)**2, axis=-1)) / var + st = np.mean(f_A*(f_A - f_AB), axis=-1) / var + + return s.T, st.T + + rng = np.random.default_rng(28631265345463262246170309650372465332) + res = sobol_indices( + func=f_ishigami, n=4096, + dists=self.dists, + method=jansen_sobol, + random_state=rng + ) + + assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2) + assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2) + + def jansen_sobol_typed( + f_A: np.ndarray, f_B: np.ndarray, f_AB: np.ndarray + ) -> tuple[np.ndarray, np.ndarray]: + return jansen_sobol(f_A, f_B, f_AB) + + _ = sobol_indices( + func=f_ishigami, n=8, + dists=self.dists, + method=jansen_sobol_typed, + random_state=rng + ) + + def test_normalization(self, ishigami_ref_indices): + rng = np.random.default_rng(28631265345463262246170309650372465332) + res = sobol_indices( + func=lambda x: f_ishigami(x) + 1000, n=4096, + dists=self.dists, + random_state=rng + ) + + assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-2) + assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-2) + + def test_constant_function(self, ishigami_ref_indices): + + def f_ishigami_vec_const(x): + """Output of shape (3, n).""" + res = f_ishigami(x) + return res, res * 0 + 10, res + + rng = np.random.default_rng(28631265345463262246170309650372465332) + res = sobol_indices( + func=f_ishigami_vec_const, n=4096, + dists=self.dists, + random_state=rng + ) + + ishigami_vec_indices = [ + [ishigami_ref_indices[0], [0, 0, 0], ishigami_ref_indices[0]], + [ishigami_ref_indices[1], [0, 0, 0], ishigami_ref_indices[1]] + ] + + assert_allclose(res.first_order, ishigami_vec_indices[0], atol=1e-2) + assert_allclose(res.total_order, ishigami_vec_indices[1], atol=1e-2) + + @pytest.mark.xfail_on_32bit("Can't create large array for test") + def test_more_converged(self, ishigami_ref_indices): + rng = np.random.default_rng(28631265345463262246170309650372465332) + res = sobol_indices( + func=f_ishigami, n=2**19, # 524288 + dists=self.dists, + random_state=rng + ) + + assert_allclose(res.first_order, ishigami_ref_indices[0], atol=1e-4) + assert_allclose(res.total_order, ishigami_ref_indices[1], atol=1e-4) + + def test_raises(self): + + message = r"Each distribution in `dists` must have method `ppf`" + with pytest.raises(ValueError, match=message): + sobol_indices(n=0, func=f_ishigami, dists="uniform") + + with pytest.raises(ValueError, match=message): + sobol_indices(n=0, func=f_ishigami, dists=[lambda x: x]) + + message = r"The balance properties of Sobol'" + with pytest.raises(ValueError, match=message): + sobol_indices(n=7, func=f_ishigami, dists=[stats.uniform()]) + + with pytest.raises(ValueError, match=message): + sobol_indices(n=4.1, func=f_ishigami, dists=[stats.uniform()]) + + message = r"'toto' is not a valid 'method'" + with pytest.raises(ValueError, match=message): + sobol_indices(n=0, func=f_ishigami, method='toto') + + message = r"must have the following signature" + with pytest.raises(ValueError, match=message): + sobol_indices(n=0, func=f_ishigami, method=lambda x: x) + + message = r"'dists' must be defined when 'func' is a callable" + with pytest.raises(ValueError, match=message): + sobol_indices(n=0, func=f_ishigami) + + def func_wrong_shape_output(x): + return x.reshape(-1, 1) + + message = r"'func' output should have a shape" + with pytest.raises(ValueError, match=message): + sobol_indices( + n=2, func=func_wrong_shape_output, dists=[stats.uniform()] + ) + + message = r"When 'func' is a dictionary" + with pytest.raises(ValueError, match=message): + sobol_indices( + n=2, func={'f_A': [], 'f_AB': []}, dists=[stats.uniform()] + ) + + with pytest.raises(ValueError, match=message): + # f_B malformed + sobol_indices( + n=2, + func={'f_A': [1, 2], 'f_B': [3], 'f_AB': [5, 6, 7, 8]}, + ) + + with pytest.raises(ValueError, match=message): + # f_AB malformed + sobol_indices( + n=2, + func={'f_A': [1, 2], 'f_B': [3, 4], 'f_AB': [5, 6, 7]}, + ) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_stats.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..a1600a95e560933438cc3ca245f36f117407fe0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_stats.py @@ -0,0 +1,8677 @@ +""" Test functions for stats module + + WRITTEN BY LOUIS LUANGKESORN FOR THE STATS MODULE + BASED ON WILKINSON'S STATISTICS QUIZ + https://www.stanford.edu/~clint/bench/wilk.txt + + Additional tests by a host of SciPy developers. +""" +import os +import re +import warnings +from collections import namedtuple +from itertools import product +import hypothesis.extra.numpy as npst +import hypothesis +import contextlib + +from numpy.testing import (assert_, assert_equal, + assert_almost_equal, assert_array_almost_equal, + assert_array_equal, assert_approx_equal, + assert_allclose, assert_warns, suppress_warnings, + assert_array_less) +import pytest +from pytest import raises as assert_raises +import numpy.ma.testutils as mat +from numpy import array, arange, float32, float64, power +import numpy as np + +import scipy.stats as stats +import scipy.stats.mstats as mstats +import scipy.stats._mstats_basic as mstats_basic +from scipy.stats._ksstats import kolmogn +from scipy.special._testutils import FuncData +from scipy.special import binom +from scipy import optimize +from .common_tests import check_named_results +from scipy.spatial.distance import cdist +from scipy.stats._axis_nan_policy import _broadcast_concatenate +from scipy.stats._stats_py import _permutation_distribution_t +from scipy._lib._util import AxisError + + +""" Numbers in docstrings beginning with 'W' refer to the section numbers + and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are + considered to be essential functionality. True testing and + evaluation of a statistics package requires use of the + NIST Statistical test data. See McCoullough(1999) Assessing The Reliability + of Statistical Software for a test methodology and its + implementation in testing SAS, SPSS, and S-Plus +""" + +# Datasets +# These data sets are from the nasty.dat sets used by Wilkinson +# For completeness, I should write the relevant tests and count them as failures +# Somewhat acceptable, since this is still beta software. It would count as a +# good target for 1.0 status +X = array([1,2,3,4,5,6,7,8,9], float) +ZERO = array([0,0,0,0,0,0,0,0,0], float) +BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997, + 99999998,99999999], float) +LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996, + 0.99999997,0.99999998,0.99999999], float) +HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float) +TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float) +ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float) + + +class TestTrimmedStats: + # TODO: write these tests to handle missing values properly + dprec = np.finfo(np.float64).precision + + def test_tmean(self): + y = stats.tmean(X, (2, 8), (True, True)) + assert_approx_equal(y, 5.0, significant=self.dprec) + + y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False)) + y2 = stats.tmean(X, limits=None) + assert_approx_equal(y1, y2, significant=self.dprec) + + x_2d = arange(63, dtype=float64).reshape(9, 7) + y = stats.tmean(x_2d, axis=None) + assert_approx_equal(y, x_2d.mean(), significant=self.dprec) + + y = stats.tmean(x_2d, axis=0) + assert_array_almost_equal(y, x_2d.mean(axis=0), decimal=8) + + y = stats.tmean(x_2d, axis=1) + assert_array_almost_equal(y, x_2d.mean(axis=1), decimal=8) + + y = stats.tmean(x_2d, limits=(2, 61), axis=None) + assert_approx_equal(y, 31.5, significant=self.dprec) + + y = stats.tmean(x_2d, limits=(2, 21), axis=0) + y_true = [14, 11.5, 9, 10, 11, 12, 13] + assert_array_almost_equal(y, y_true, decimal=8) + + y = stats.tmean(x_2d, limits=(2, 21), inclusive=(True, False), axis=0) + y_true = [10.5, 11.5, 9, 10, 11, 12, 13] + assert_array_almost_equal(y, y_true, decimal=8) + + x_2d_with_nan = np.array(x_2d) + x_2d_with_nan[-1, -3:] = np.nan + y = stats.tmean(x_2d_with_nan, limits=(1, 13), axis=0) + y_true = [7, 4.5, 5.5, 6.5, np.nan, np.nan, np.nan] + assert_array_almost_equal(y, y_true, decimal=8) + + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "Mean of empty slice") + + y = stats.tmean(x_2d, limits=(2, 21), axis=1) + y_true = [4, 10, 17, 21, np.nan, np.nan, np.nan, np.nan, np.nan] + assert_array_almost_equal(y, y_true, decimal=8) + + y = stats.tmean(x_2d, limits=(2, 21), + inclusive=(False, True), axis=1) + y_true = [4.5, 10, 17, 21, np.nan, np.nan, np.nan, np.nan, np.nan] + assert_array_almost_equal(y, y_true, decimal=8) + + def test_tvar(self): + y = stats.tvar(X, limits=(2, 8), inclusive=(True, True)) + assert_approx_equal(y, 4.6666666666666661, significant=self.dprec) + + y = stats.tvar(X, limits=None) + assert_approx_equal(y, X.var(ddof=1), significant=self.dprec) + + x_2d = arange(63, dtype=float64).reshape((9, 7)) + y = stats.tvar(x_2d, axis=None) + assert_approx_equal(y, x_2d.var(ddof=1), significant=self.dprec) + + y = stats.tvar(x_2d, axis=0) + assert_array_almost_equal(y[0], np.full((1, 7), 367.50000000), decimal=8) + + y = stats.tvar(x_2d, axis=1) + assert_array_almost_equal(y[0], np.full((1, 9), 4.66666667), decimal=8) + + y = stats.tvar(x_2d[3, :]) + assert_approx_equal(y, 4.666666666666667, significant=self.dprec) + + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "Degrees of freedom <= 0 for slice.") + + # Limiting some values along one axis + y = stats.tvar(x_2d, limits=(1, 5), axis=1, inclusive=(True, True)) + assert_approx_equal(y[0], 2.5, significant=self.dprec) + + # Limiting all values along one axis + y = stats.tvar(x_2d, limits=(0, 6), axis=1, inclusive=(True, True)) + assert_approx_equal(y[0], 4.666666666666667, significant=self.dprec) + assert_equal(y[1], np.nan) + + def test_tstd(self): + y = stats.tstd(X, (2, 8), (True, True)) + assert_approx_equal(y, 2.1602468994692865, significant=self.dprec) + + y = stats.tstd(X, limits=None) + assert_approx_equal(y, X.std(ddof=1), significant=self.dprec) + + def test_tmin(self): + assert_equal(stats.tmin(4), 4) + + x = np.arange(10) + assert_equal(stats.tmin(x), 0) + assert_equal(stats.tmin(x, lowerlimit=0), 0) + assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1) + + x = x.reshape((5, 2)) + assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1]) + assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8]) + assert_equal(stats.tmin(x, axis=None), 0) + + x = np.arange(10.) + x[9] = np.nan + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "invalid value*") + assert_equal(stats.tmin(x), np.nan) + assert_equal(stats.tmin(x, nan_policy='omit'), 0.) + assert_raises(ValueError, stats.tmin, x, nan_policy='raise') + assert_raises(ValueError, stats.tmin, x, nan_policy='foobar') + msg = "'propagate', 'raise', 'omit'" + with assert_raises(ValueError, match=msg): + stats.tmin(x, nan_policy='foo') + + # check that that if a full slice is masked, the output returns a + # nan instead of a garbage value. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + x = np.arange(16).reshape(4, 4) + res = stats.tmin(x, lowerlimit=4, axis=1) + assert_equal(res, [np.nan, 4, 8, 12]) + + def test_tmax(self): + assert_equal(stats.tmax(4), 4) + + x = np.arange(10) + assert_equal(stats.tmax(x), 9) + assert_equal(stats.tmax(x, upperlimit=9), 9) + assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8) + + x = x.reshape((5, 2)) + assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7]) + assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9]) + assert_equal(stats.tmax(x, axis=None), 9) + + x = np.arange(10.) + x[6] = np.nan + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "invalid value*") + assert_equal(stats.tmax(x), np.nan) + assert_equal(stats.tmax(x, nan_policy='omit'), 9.) + assert_raises(ValueError, stats.tmax, x, nan_policy='raise') + assert_raises(ValueError, stats.tmax, x, nan_policy='foobar') + + # check that that if a full slice is masked, the output returns a + # nan instead of a garbage value. + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + x = np.arange(16).reshape(4, 4) + res = stats.tmax(x, upperlimit=11, axis=1) + assert_equal(res, [3, 7, 11, np.nan]) + + def test_tsem(self): + y = stats.tsem(X, limits=(3, 8), inclusive=(False, True)) + y_ref = np.array([4, 5, 6, 7, 8]) + assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size), + significant=self.dprec) + + assert_approx_equal(stats.tsem(X, limits=[-1, 10]), + stats.tsem(X, limits=None), + significant=self.dprec) + + +class TestCorrPearsonr: + """ W.II.D. Compute a correlation matrix on all the variables. + + All the correlations, except for ZERO and MISS, should be exactly 1. + ZERO and MISS should have undefined or missing correlations with the + other variables. The same should go for SPEARMAN correlations, if + your program has them. + """ + + def test_pXX(self): + y = stats.pearsonr(X,X) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXBIG(self): + y = stats.pearsonr(X,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXLITTLE(self): + y = stats.pearsonr(X,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXHUGE(self): + y = stats.pearsonr(X,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXTINY(self): + y = stats.pearsonr(X,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pXROUND(self): + y = stats.pearsonr(X,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGBIG(self): + y = stats.pearsonr(BIG,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGLITTLE(self): + y = stats.pearsonr(BIG,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGHUGE(self): + y = stats.pearsonr(BIG,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGTINY(self): + y = stats.pearsonr(BIG,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pBIGROUND(self): + y = stats.pearsonr(BIG,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLELITTLE(self): + y = stats.pearsonr(LITTLE,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLEHUGE(self): + y = stats.pearsonr(LITTLE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLETINY(self): + y = stats.pearsonr(LITTLE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pLITTLEROUND(self): + y = stats.pearsonr(LITTLE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pHUGEHUGE(self): + y = stats.pearsonr(HUGE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pHUGETINY(self): + y = stats.pearsonr(HUGE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pHUGEROUND(self): + y = stats.pearsonr(HUGE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pTINYTINY(self): + y = stats.pearsonr(TINY,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pTINYROUND(self): + y = stats.pearsonr(TINY,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pROUNDROUND(self): + y = stats.pearsonr(ROUND,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_pearsonr_result_attributes(self): + res = stats.pearsonr(X, X) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes) + assert_equal(res.correlation, res.statistic) + + def test_r_almost_exactly_pos1(self): + a = arange(3.0) + r, prob = stats.pearsonr(a, a) + + assert_allclose(r, 1.0, atol=1e-15) + # With n = len(a) = 3, the error in prob grows like the + # square root of the error in r. + assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0))) + + def test_r_almost_exactly_neg1(self): + a = arange(3.0) + r, prob = stats.pearsonr(a, -a) + + assert_allclose(r, -1.0, atol=1e-15) + # With n = len(a) = 3, the error in prob grows like the + # square root of the error in r. + assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0))) + + def test_basic(self): + # A basic test, with a correlation coefficient + # that is not 1 or -1. + a = array([-1, 0, 1]) + b = array([0, 0, 3]) + r, prob = stats.pearsonr(a, b) + assert_approx_equal(r, np.sqrt(3)/2) + assert_approx_equal(prob, 1/3) + + def test_constant_input(self): + # Zero variance input + # See https://github.com/scipy/scipy/issues/3728 + msg = "An input array is constant" + with pytest.warns(stats.ConstantInputWarning, match=msg): + r, p = stats.pearsonr([0.667, 0.667, 0.667], [0.123, 0.456, 0.789]) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + + def test_near_constant_input(self): + # Near constant input (but not constant): + x = [2, 2, 2 + np.spacing(2)] + y = [3, 3, 3 + 6*np.spacing(3)] + msg = "An input array is nearly constant; the computed" + with pytest.warns(stats.NearConstantInputWarning, match=msg): + # r and p are garbage, so don't bother checking them in this case. + # (The exact value of r would be 1.) + r, p = stats.pearsonr(x, y) + + def test_very_small_input_values(self): + # Very small values in an input. A naive implementation will + # suffer from underflow. + # See https://github.com/scipy/scipy/issues/9353 + x = [0.004434375, 0.004756007, 0.003911996, 0.0038005, 0.003409971] + y = [2.48e-188, 7.41e-181, 4.09e-208, 2.08e-223, 2.66e-245] + r, p = stats.pearsonr(x,y) + + # The expected values were computed using mpmath with 80 digits + # of precision. + assert_allclose(r, 0.7272930540750450) + assert_allclose(p, 0.1637805429533202) + + def test_very_large_input_values(self): + # Very large values in an input. A naive implementation will + # suffer from overflow. + # See https://github.com/scipy/scipy/issues/8980 + x = 1e90*np.array([0, 0, 0, 1, 1, 1, 1]) + y = 1e90*np.arange(7) + + r, p = stats.pearsonr(x, y) + + # The expected values were computed using mpmath with 80 digits + # of precision. + assert_allclose(r, 0.8660254037844386) + assert_allclose(p, 0.011724811003954638) + + def test_extremely_large_input_values(self): + # Extremely large values in x and y. These values would cause the + # product sigma_x * sigma_y to overflow if the two factors were + # computed independently. + x = np.array([2.3e200, 4.5e200, 6.7e200, 8e200]) + y = np.array([1.2e199, 5.5e200, 3.3e201, 1.0e200]) + r, p = stats.pearsonr(x, y) + + # The expected values were computed using mpmath with 80 digits + # of precision. + assert_allclose(r, 0.351312332103289) + assert_allclose(p, 0.648687667896711) + + def test_length_two_pos1(self): + # Inputs with length 2. + # See https://github.com/scipy/scipy/issues/7730 + res = stats.pearsonr([1, 2], [3, 5]) + r, p = res + assert_equal(r, 1) + assert_equal(p, 1) + assert_equal(res.confidence_interval(), (-1, 1)) + + def test_length_two_neg2(self): + # Inputs with length 2. + # See https://github.com/scipy/scipy/issues/7730 + r, p = stats.pearsonr([2, 1], [3, 5]) + assert_equal(r, -1) + assert_equal(p, 1) + + # Expected values computed with R 3.6.2 cor.test, e.g. + # options(digits=16) + # x <- c(1, 2, 3, 4) + # y <- c(0, 1, 0.5, 1) + # cor.test(x, y, method = "pearson", alternative = "g") + # correlation coefficient and p-value for alternative='two-sided' + # calculated with mpmath agree to 16 digits. + @pytest.mark.parametrize('alternative, pval, rlow, rhigh, sign', + [('two-sided', 0.325800137536, -0.814938968841, 0.99230697523, 1), + ('less', 0.8370999312316, -1, 0.985600937290653, 1), + ('greater', 0.1629000687684, -0.6785654158217636, 1, 1), + ('two-sided', 0.325800137536, -0.992306975236, 0.81493896884, -1), + ('less', 0.1629000687684, -1.0, 0.6785654158217636, -1), + ('greater', 0.8370999312316, -0.985600937290653, 1.0, -1)]) + def test_basic_example(self, alternative, pval, rlow, rhigh, sign): + x = [1, 2, 3, 4] + y = np.array([0, 1, 0.5, 1]) * sign + result = stats.pearsonr(x, y, alternative=alternative) + assert_allclose(result.statistic, 0.6741998624632421*sign, rtol=1e-12) + assert_allclose(result.pvalue, pval, rtol=1e-6) + ci = result.confidence_interval() + assert_allclose(ci, (rlow, rhigh), rtol=1e-6) + + def test_negative_correlation_pvalue_gh17795(self): + x = np.arange(10) + y = -x + test_greater = stats.pearsonr(x, y, alternative='greater') + test_less = stats.pearsonr(x, y, alternative='less') + assert_allclose(test_greater.pvalue, 1) + assert_allclose(test_less.pvalue, 0, atol=1e-20) + + def test_length3_r_exactly_negative_one(self): + x = [1, 2, 3] + y = [5, -4, -13] + res = stats.pearsonr(x, y) + + # The expected r and p are exact. + r, p = res + assert_allclose(r, -1.0) + assert_allclose(p, 0.0, atol=1e-7) + + assert_equal(res.confidence_interval(), (-1, 1)) + + def test_unequal_lengths(self): + x = [1, 2, 3] + y = [4, 5] + assert_raises(ValueError, stats.pearsonr, x, y) + + def test_len1(self): + x = [1] + y = [2] + assert_raises(ValueError, stats.pearsonr, x, y) + + def test_complex_data(self): + x = [-1j, -2j, -3.0j] + y = [-1j, -2j, -3.0j] + message = 'This function does not support complex data' + with pytest.raises(ValueError, match=message): + stats.pearsonr(x, y) + + @pytest.mark.xslow + @pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided')) + @pytest.mark.parametrize('method', ('permutation', 'monte_carlo')) + def test_resampling_pvalue(self, method, alternative): + rng = np.random.default_rng(24623935790378923) + size = 100 if method == 'permutation' else 1000 + x = rng.normal(size=size) + y = rng.normal(size=size) + methods = {'permutation': stats.PermutationMethod(random_state=rng), + 'monte_carlo': stats.MonteCarloMethod(rvs=(rng.normal,)*2)} + method = methods[method] + res = stats.pearsonr(x, y, alternative=alternative, method=method) + ref = stats.pearsonr(x, y, alternative=alternative) + assert_allclose(res.statistic, ref.statistic, rtol=1e-15) + assert_allclose(res.pvalue, ref.pvalue, rtol=1e-2, atol=1e-3) + + @pytest.mark.xslow + @pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided')) + def test_bootstrap_ci(self, alternative): + rng = np.random.default_rng(24623935790378923) + x = rng.normal(size=100) + y = rng.normal(size=100) + res = stats.pearsonr(x, y, alternative=alternative) + + method = stats.BootstrapMethod(random_state=rng) + res_ci = res.confidence_interval(method=method) + ref_ci = res.confidence_interval() + + assert_allclose(res_ci, ref_ci, atol=1e-2) + + def test_invalid_method(self): + message = "`method` must be an instance of..." + with pytest.raises(ValueError, match=message): + stats.pearsonr([1, 2], [3, 4], method="asymptotic") + + res = stats.pearsonr([1, 2], [3, 4]) + with pytest.raises(ValueError, match=message): + res.confidence_interval(method="exact") + + +class TestFisherExact: + """Some tests to show that fisher_exact() works correctly. + + Note that in SciPy 0.9.0 this was not working well for large numbers due to + inaccuracy of the hypergeom distribution (see #1218). Fixed now. + + Also note that R and SciPy have different argument formats for their + hypergeometric distribution functions. + + R: + > phyper(18999, 99000, 110000, 39000, lower.tail = FALSE) + [1] 1.701815e-09 + """ + + def test_basic(self): + fisher_exact = stats.fisher_exact + + res = fisher_exact([[14500, 20000], [30000, 40000]])[1] + assert_approx_equal(res, 0.01106, significant=4) + res = fisher_exact([[100, 2], [1000, 5]])[1] + assert_approx_equal(res, 0.1301, significant=4) + res = fisher_exact([[2, 7], [8, 2]])[1] + assert_approx_equal(res, 0.0230141, significant=6) + res = fisher_exact([[5, 1], [10, 10]])[1] + assert_approx_equal(res, 0.1973244, significant=6) + res = fisher_exact([[5, 15], [20, 20]])[1] + assert_approx_equal(res, 0.0958044, significant=6) + res = fisher_exact([[5, 16], [20, 25]])[1] + assert_approx_equal(res, 0.1725862, significant=6) + res = fisher_exact([[10, 5], [10, 1]])[1] + assert_approx_equal(res, 0.1973244, significant=6) + res = fisher_exact([[5, 0], [1, 4]])[1] + assert_approx_equal(res, 0.04761904, significant=6) + res = fisher_exact([[0, 1], [3, 2]])[1] + assert_approx_equal(res, 1.0) + res = fisher_exact([[0, 2], [6, 4]])[1] + assert_approx_equal(res, 0.4545454545) + res = fisher_exact([[2, 7], [8, 2]]) + assert_approx_equal(res[1], 0.0230141, significant=6) + assert_approx_equal(res[0], 4.0 / 56) + + def test_precise(self): + # results from R + # + # R defines oddsratio differently (see Notes section of fisher_exact + # docstring), so those will not match. We leave them in anyway, in + # case they will be useful later on. We test only the p-value. + tablist = [ + ([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)), + ([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)), + ([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)), + ([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)), + ([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)), + ([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)), + ([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)), + ([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)), + ([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)), + ([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)), + ([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000)) + ] + for table, res_r in tablist: + res = stats.fisher_exact(np.asarray(table)) + np.testing.assert_almost_equal(res[1], res_r[1], decimal=11, + verbose=True) + + def test_gh4130(self): + # Previously, a fudge factor used to distinguish between theoeretically + # and numerically different probability masses was 1e-4; it has been + # tightened to fix gh4130. Accuracy checked against R fisher.test. + # options(digits=16) + # table <- matrix(c(6, 108, 37, 200), nrow = 2) + # fisher.test(table, alternative = "t") + x = [[6, 37], [108, 200]] + res = stats.fisher_exact(x) + assert_allclose(res[1], 0.005092697748126) + + # case from https://github.com/brentp/fishers_exact_test/issues/27 + # That package has an (absolute?) fudge factor of 1e-6; too big + x = [[22, 0], [0, 102]] + res = stats.fisher_exact(x) + assert_allclose(res[1], 7.175066786244549e-25) + + # case from https://github.com/brentp/fishers_exact_test/issues/1 + x = [[94, 48], [3577, 16988]] + res = stats.fisher_exact(x) + assert_allclose(res[1], 2.069356340993818e-37) + + def test_gh9231(self): + # Previously, fisher_exact was extremely slow for this table + # As reported in gh-9231, the p-value should be very nearly zero + x = [[5829225, 5692693], [5760959, 5760959]] + res = stats.fisher_exact(x) + assert_allclose(res[1], 0, atol=1e-170) + + @pytest.mark.slow + def test_large_numbers(self): + # Test with some large numbers. Regression test for #1401 + pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R + for pval, num in zip(pvals, [75, 76, 77]): + res = stats.fisher_exact([[17704, 496], [1065, num]])[1] + assert_approx_equal(res, pval, significant=4) + + res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1] + assert_approx_equal(res, 0.2751, significant=4) + + def test_raises(self): + # test we raise an error for wrong shape of input. + assert_raises(ValueError, stats.fisher_exact, + np.arange(6).reshape(2, 3)) + + def test_row_or_col_zero(self): + tables = ([[0, 0], [5, 10]], + [[5, 10], [0, 0]], + [[0, 5], [0, 10]], + [[5, 0], [10, 0]]) + for table in tables: + oddsratio, pval = stats.fisher_exact(table) + assert_equal(pval, 1.0) + assert_equal(oddsratio, np.nan) + + def test_less_greater(self): + tables = ( + # Some tables to compare with R: + [[2, 7], [8, 2]], + [[200, 7], [8, 300]], + [[28, 21], [6, 1957]], + [[190, 800], [200, 900]], + # Some tables with simple exact values + # (includes regression test for ticket #1568): + [[0, 2], [3, 0]], + [[1, 1], [2, 1]], + [[2, 0], [1, 2]], + [[0, 1], [2, 3]], + [[1, 0], [1, 4]], + ) + pvals = ( + # from R: + [0.018521725952066501, 0.9990149169715733], + [1.0, 2.0056578803889148e-122], + [1.0, 5.7284374608319831e-44], + [0.7416227, 0.2959826], + # Exact: + [0.1, 1.0], + [0.7, 0.9], + [1.0, 0.3], + [2./3, 1.0], + [1.0, 1./3], + ) + for table, pval in zip(tables, pvals): + res = [] + res.append(stats.fisher_exact(table, alternative="less")[1]) + res.append(stats.fisher_exact(table, alternative="greater")[1]) + assert_allclose(res, pval, atol=0, rtol=1e-7) + + def test_gh3014(self): + # check if issue #3014 has been fixed. + # before, this would have risen a ValueError + odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]]) + + @pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater']) + def test_result(self, alternative): + table = np.array([[14500, 20000], [30000, 40000]]) + res = stats.fisher_exact(table, alternative=alternative) + assert_equal((res.statistic, res.pvalue), res) + + +class TestCorrSpearmanr: + """ W.II.D. Compute a correlation matrix on all the variables. + + All the correlations, except for ZERO and MISS, should be exactly 1. + ZERO and MISS should have undefined or missing correlations with the + other variables. The same should go for SPEARMAN correlations, if + your program has them. + """ + + def test_scalar(self): + y = stats.spearmanr(4., 2.) + assert_(np.isnan(y).all()) + + def test_uneven_lengths(self): + assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9]) + assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8) + + def test_uneven_2d_shapes(self): + # Different number of columns should work - those just get concatenated. + np.random.seed(232324) + x = np.random.randn(4, 3) + y = np.random.randn(4, 2) + assert stats.spearmanr(x, y).statistic.shape == (5, 5) + assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5) + + assert_raises(ValueError, stats.spearmanr, x, y, axis=1) + assert_raises(ValueError, stats.spearmanr, x.T, y.T) + + def test_ndim_too_high(self): + np.random.seed(232324) + x = np.random.randn(4, 3, 2) + assert_raises(ValueError, stats.spearmanr, x) + assert_raises(ValueError, stats.spearmanr, x, x) + assert_raises(ValueError, stats.spearmanr, x, None, None) + # But should work with axis=None (raveling axes) for two input arrays + assert_allclose(stats.spearmanr(x, x, axis=None), + stats.spearmanr(x.flatten(), x.flatten(), axis=0)) + + def test_nan_policy(self): + x = np.arange(10.) + x[9] = np.nan + assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan)) + assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'), + (1.0, 0.0)) + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise') + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar') + + def test_nan_policy_bug_12458(self): + np.random.seed(5) + x = np.random.rand(5, 10) + k = 6 + x[:, k] = np.nan + y = np.delete(x, k, axis=1) + corx, px = stats.spearmanr(x, nan_policy='omit') + cory, py = stats.spearmanr(y) + corx = np.delete(np.delete(corx, k, axis=1), k, axis=0) + px = np.delete(np.delete(px, k, axis=1), k, axis=0) + assert_allclose(corx, cory, atol=1e-14) + assert_allclose(px, py, atol=1e-14) + + def test_nan_policy_bug_12411(self): + np.random.seed(5) + m = 5 + n = 10 + x = np.random.randn(m, n) + x[1, 0] = np.nan + x[3, -1] = np.nan + corr, pvalue = stats.spearmanr(x, axis=1, nan_policy="propagate") + res = [[stats.spearmanr(x[i, :], x[j, :]).statistic for i in range(m)] + for j in range(m)] + assert_allclose(corr, res) + + def test_sXX(self): + y = stats.spearmanr(X,X) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXBIG(self): + y = stats.spearmanr(X,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXLITTLE(self): + y = stats.spearmanr(X,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXHUGE(self): + y = stats.spearmanr(X,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXTINY(self): + y = stats.spearmanr(X,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sXROUND(self): + y = stats.spearmanr(X,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGBIG(self): + y = stats.spearmanr(BIG,BIG) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGLITTLE(self): + y = stats.spearmanr(BIG,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGHUGE(self): + y = stats.spearmanr(BIG,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGTINY(self): + y = stats.spearmanr(BIG,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sBIGROUND(self): + y = stats.spearmanr(BIG,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLELITTLE(self): + y = stats.spearmanr(LITTLE,LITTLE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLEHUGE(self): + y = stats.spearmanr(LITTLE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLETINY(self): + y = stats.spearmanr(LITTLE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sLITTLEROUND(self): + y = stats.spearmanr(LITTLE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sHUGEHUGE(self): + y = stats.spearmanr(HUGE,HUGE) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sHUGETINY(self): + y = stats.spearmanr(HUGE,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sHUGEROUND(self): + y = stats.spearmanr(HUGE,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sTINYTINY(self): + y = stats.spearmanr(TINY,TINY) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sTINYROUND(self): + y = stats.spearmanr(TINY,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_sROUNDROUND(self): + y = stats.spearmanr(ROUND,ROUND) + r = y[0] + assert_approx_equal(r,1.0) + + def test_spearmanr_result_attributes(self): + res = stats.spearmanr(X, X) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes) + assert_equal(res.correlation, res.statistic) + + def test_1d_vs_2d(self): + x1 = [1, 2, 3, 4, 5, 6] + x2 = [1, 2, 3, 4, 6, 5] + res1 = stats.spearmanr(x1, x2) + res2 = stats.spearmanr(np.asarray([x1, x2]).T) + assert_allclose(res1, res2) + + def test_1d_vs_2d_nans(self): + # Now the same with NaNs present. Regression test for gh-9103. + for nan_policy in ['propagate', 'omit']: + x1 = [1, np.nan, 3, 4, 5, 6] + x2 = [1, 2, 3, 4, 6, np.nan] + res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy) + res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy) + assert_allclose(res1, res2) + + def test_3cols(self): + x1 = np.arange(6) + x2 = -x1 + x3 = np.array([0, 1, 2, 3, 5, 4]) + x = np.asarray([x1, x2, x3]).T + actual = stats.spearmanr(x) + expected_corr = np.array([[1, -1, 0.94285714], + [-1, 1, -0.94285714], + [0.94285714, -0.94285714, 1]]) + expected_pvalue = np.zeros((3, 3), dtype=float) + expected_pvalue[2, 0:2] = 0.00480466472 + expected_pvalue[0:2, 2] = 0.00480466472 + + assert_allclose(actual.statistic, expected_corr) + assert_allclose(actual.pvalue, expected_pvalue) + + def test_gh_9103(self): + # Regression test for gh-9103. + x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2], + [5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1], + [0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T + corr = np.array([[np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan], + [np.nan, np.nan, 1.]]) + assert_allclose(stats.spearmanr(x, nan_policy='propagate').statistic, + corr) + + res = stats.spearmanr(x, nan_policy='omit').statistic + assert_allclose((res[0][1], res[0][2], res[1][2]), + (0.2051957, 0.4857143, -0.4707919), rtol=1e-6) + + def test_gh_8111(self): + # Regression test for gh-8111 (different result for float/int/bool). + n = 100 + np.random.seed(234568) + x = np.random.rand(n) + m = np.random.rand(n) > 0.7 + + # bool against float, no nans + a = (x > .5) + b = np.array(x) + res1 = stats.spearmanr(a, b, nan_policy='omit').statistic + + # bool against float with NaNs + b[m] = np.nan + res2 = stats.spearmanr(a, b, nan_policy='omit').statistic + + # int against float with NaNs + a = a.astype(np.int32) + res3 = stats.spearmanr(a, b, nan_policy='omit').statistic + + expected = [0.865895477, 0.866100381, 0.866100381] + assert_allclose([res1, res2, res3], expected) + + +class TestCorrSpearmanr2: + """Some further tests of the spearmanr function.""" + + def test_spearmanr_vs_r(self): + # Cross-check with R: + # cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr") + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 7] + expected = (0.82078268166812329, 0.088587005313543798) + res = stats.spearmanr(x1, x2) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + def test_empty_arrays(self): + assert_equal(stats.spearmanr([], []), (np.nan, np.nan)) + + def test_normal_draws(self): + np.random.seed(7546) + x = np.array([np.random.normal(loc=1, scale=1, size=500), + np.random.normal(loc=1, scale=1, size=500)]) + corr = [[1.0, 0.3], + [0.3, 1.0]] + x = np.dot(np.linalg.cholesky(corr), x) + expected = (0.28659685838743354, 6.579862219051161e-11) + res = stats.spearmanr(x[0], x[1]) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + def test_corr_1(self): + assert_approx_equal(stats.spearmanr([1, 1, 2], [1, 1, 2])[0], 1.0) + + def test_nan_policies(self): + x = np.arange(10.) + x[9] = np.nan + assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan)) + assert_allclose(stats.spearmanr(x, x, nan_policy='omit'), + (1.0, 0)) + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise') + assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar') + + def test_unequal_lengths(self): + x = np.arange(10.) + y = np.arange(20.) + assert_raises(ValueError, stats.spearmanr, x, y) + + def test_omit_paired_value(self): + x1 = [1, 2, 3, 4] + x2 = [8, 7, 6, np.nan] + res1 = stats.spearmanr(x1, x2, nan_policy='omit') + res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit') + assert_equal(res1, res2) + + def test_gh_issue_6061_windows_overflow(self): + x = list(range(2000)) + y = list(range(2000)) + y[0], y[9] = y[9], y[0] + y[10], y[434] = y[434], y[10] + y[435], y[1509] = y[1509], y[435] + # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1)) + # = 1 - (1 / 500) + # = 0.998 + x.append(np.nan) + y.append(3.0) + assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998) + + def test_tie0(self): + # with only ties in one or both inputs + warn_msg = "An input array is constant" + with pytest.warns(stats.ConstantInputWarning, match=warn_msg): + r, p = stats.spearmanr([2, 2, 2], [2, 2, 2]) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + r, p = stats.spearmanr([2, 0, 2], [2, 2, 2]) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + r, p = stats.spearmanr([2, 2, 2], [2, 0, 2]) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + + def test_tie1(self): + # Data + x = [1.0, 2.0, 3.0, 4.0] + y = [1.0, 2.0, 2.0, 3.0] + # Ranks of the data, with tie-handling. + xr = [1.0, 2.0, 3.0, 4.0] + yr = [1.0, 2.5, 2.5, 4.0] + # Result of spearmanr should be the same as applying + # pearsonr to the ranks. + sr = stats.spearmanr(x, y) + pr = stats.pearsonr(xr, yr) + assert_almost_equal(sr, pr) + + def test_tie2(self): + # Test tie-handling if inputs contain nan's + # Data without nan's + x1 = [1, 2, 2.5, 2] + y1 = [1, 3, 2.5, 4] + # Same data with nan's + x2 = [1, 2, 2.5, 2, np.nan] + y2 = [1, 3, 2.5, 4, np.nan] + + # Results for two data sets should be the same if nan's are ignored + sr1 = stats.spearmanr(x1, y1) + sr2 = stats.spearmanr(x2, y2, nan_policy='omit') + assert_almost_equal(sr1, sr2) + + def test_ties_axis_1(self): + z1 = np.array([[1, 1, 1, 1], [1, 2, 3, 4]]) + z2 = np.array([[1, 2, 3, 4], [1, 1, 1, 1]]) + z3 = np.array([[1, 1, 1, 1], [1, 1, 1, 1]]) + warn_msg = "An input array is constant" + with pytest.warns(stats.ConstantInputWarning, match=warn_msg): + r, p = stats.spearmanr(z1, axis=1) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + r, p = stats.spearmanr(z2, axis=1) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + r, p = stats.spearmanr(z3, axis=1) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + + def test_gh_11111(self): + x = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) + y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587, + 0.0007535430349118562, 0.0002661781514710257, 0, 0, + 0.0007835762419683435]) + warn_msg = "An input array is constant" + with pytest.warns(stats.ConstantInputWarning, match=warn_msg): + r, p = stats.spearmanr(x, y) + assert_equal(r, np.nan) + assert_equal(p, np.nan) + + def test_index_error(self): + x = np.array([1.0, 7.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) + y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587, + 0.0007535430349118562, 0.0002661781514710257, 0, 0, + 0.0007835762419683435]) + assert_raises(ValueError, stats.spearmanr, x, y, axis=2) + + def test_alternative(self): + # Test alternative parameter + + # Simple test - Based on the above ``test_spearmanr_vs_r`` + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 7] + + # strong positive correlation + expected = (0.82078268166812329, 0.088587005313543798) + + # correlation > 0 -> large "less" p-value + res = stats.spearmanr(x1, x2, alternative="less") + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], 1 - (expected[1] / 2)) + + # correlation > 0 -> small "less" p-value + res = stats.spearmanr(x1, x2, alternative="greater") + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1] / 2) + + with pytest.raises(ValueError, match="`alternative` must be 'less'..."): + stats.spearmanr(x1, x2, alternative="ekki-ekki") + + @pytest.mark.parametrize("alternative", ('two-sided', 'less', 'greater')) + def test_alternative_nan_policy(self, alternative): + # Test nan policies + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 7] + x1nan = x1 + [np.nan] + x2nan = x2 + [np.nan] + + # test nan_policy="propagate" + assert_array_equal(stats.spearmanr(x1nan, x2nan), (np.nan, np.nan)) + + # test nan_policy="omit" + res_actual = stats.spearmanr(x1nan, x2nan, nan_policy='omit', + alternative=alternative) + res_expected = stats.spearmanr(x1, x2, alternative=alternative) + assert_allclose(res_actual, res_expected) + + # test nan_policy="raise" + message = 'The input contains nan values' + with pytest.raises(ValueError, match=message): + stats.spearmanr(x1nan, x2nan, nan_policy='raise', + alternative=alternative) + + # test invalid nan_policy + message = "nan_policy must be one of..." + with pytest.raises(ValueError, match=message): + stats.spearmanr(x1nan, x2nan, nan_policy='ekki-ekki', + alternative=alternative) + + +# W.II.E. Tabulate X against X, using BIG as a case weight. The values +# should appear on the diagonal and the total should be 899999955. +# If the table cannot hold these values, forget about working with +# census data. You can also tabulate HUGE against TINY. There is no +# reason a tabulation program should not be able to distinguish +# different values regardless of their magnitude. + +# I need to figure out how to do this one. + + +def test_kendalltau(): + # For the cases without ties, both variants should give the same + # result. + variants = ('b', 'c') + + # case without ties, con-dis equal zero + x = [5, 2, 1, 3, 6, 4, 7, 8] + y = [5, 2, 6, 3, 1, 8, 7, 4] + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.0, 1.0) + for taux in variants: + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # case without ties, con-dis equal zero + x = [0, 5, 2, 1, 3, 6, 4, 7, 8] + y = [5, 2, 0, 6, 3, 1, 8, 7, 4] + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.0, 1.0) + for taux in variants: + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # case without ties, con-dis close to zero + x = [5, 2, 1, 3, 6, 4, 7] + y = [5, 2, 6, 3, 1, 7, 4] + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-0.14285714286, 0.77261904762) + for taux in variants: + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # case without ties, con-dis close to zero + x = [2, 1, 3, 6, 4, 7, 8] + y = [2, 6, 3, 1, 8, 7, 4] + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.047619047619, 1.0) + for taux in variants: + res = stats.kendalltau(x, y) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # simple case without ties + x = np.arange(10) + y = np.arange(10) + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (1.0, 5.511463844797e-07) + for taux in variants: + res = stats.kendalltau(x, y, variant=taux) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.9555555555555556, 5.511463844797e-06) + for taux in variants: + res = stats.kendalltau(x, y, variant=taux) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (0.9111111111111111, 2.976190476190e-05) + for taux in variants: + res = stats.kendalltau(x, y, variant=taux) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # same in opposite direction + x = np.arange(10) + y = np.arange(10)[::-1] + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-1.0, 5.511463844797e-07) + for taux in variants: + res = stats.kendalltau(x, y, variant=taux) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple of values + b = y[1] + y[1] = y[2] + y[2] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-0.9555555555555556, 5.511463844797e-06) + for taux in variants: + res = stats.kendalltau(x, y, variant=taux) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # swap a couple more + b = y[5] + y[5] = y[6] + y[6] = b + # Cross-check with exact result from R: + # cor.test(x,y,method="kendall",exact=1) + expected = (-0.9111111111111111, 2.976190476190e-05) + for taux in variants: + res = stats.kendalltau(x, y, variant=taux) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # Check a case where variants are different + # Example values found from Kendall (1970). + # P-value is the same for the both variants + x = array([1, 2, 2, 4, 4, 6, 6, 8, 9, 9]) + y = array([1, 2, 4, 4, 4, 4, 8, 8, 8, 10]) + expected = 0.85895569 + assert_approx_equal(stats.kendalltau(x, y, variant='b')[0], expected) + expected = 0.825 + assert_approx_equal(stats.kendalltau(x, y, variant='c')[0], expected) + + # check exception in case of ties and method='exact' requested + y[2] = y[1] + assert_raises(ValueError, stats.kendalltau, x, y, method='exact') + + # check exception in case of invalid method keyword + assert_raises(ValueError, stats.kendalltau, x, y, method='banana') + + # check exception in case of invalid variant keyword + assert_raises(ValueError, stats.kendalltau, x, y, variant='rms') + + # tau-b with some ties + # Cross-check with R: + # cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE) + x1 = [12, 2, 1, 12, 2] + x2 = [1, 4, 7, 1, 0] + expected = (-0.47140452079103173, 0.28274545993277478) + res = stats.kendalltau(x1, x2) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # test for namedtuple attribute results + attributes = ('correlation', 'pvalue') + for taux in variants: + res = stats.kendalltau(x1, x2, variant=taux) + check_named_results(res, attributes) + assert_equal(res.correlation, res.statistic) + + # with only ties in one or both inputs in tau-b or tau-c + for taux in variants: + assert_equal(stats.kendalltau([2, 2, 2], [2, 2, 2], variant=taux), + (np.nan, np.nan)) + assert_equal(stats.kendalltau([2, 0, 2], [2, 2, 2], variant=taux), + (np.nan, np.nan)) + assert_equal(stats.kendalltau([2, 2, 2], [2, 0, 2], variant=taux), + (np.nan, np.nan)) + + # empty arrays provided as input + assert_equal(stats.kendalltau([], []), (np.nan, np.nan)) + + # check with larger arrays + np.random.seed(7546) + x = np.array([np.random.normal(loc=1, scale=1, size=500), + np.random.normal(loc=1, scale=1, size=500)]) + corr = [[1.0, 0.3], + [0.3, 1.0]] + x = np.dot(np.linalg.cholesky(corr), x) + expected = (0.19291382765531062, 1.1337095377742629e-10) + res = stats.kendalltau(x[0], x[1]) + assert_approx_equal(res[0], expected[0]) + assert_approx_equal(res[1], expected[1]) + + # this should result in 1 for taub but not tau-c + assert_approx_equal(stats.kendalltau([1, 1, 2], [1, 1, 2], variant='b')[0], + 1.0) + assert_approx_equal(stats.kendalltau([1, 1, 2], [1, 1, 2], variant='c')[0], + 0.88888888) + + # test nan_policy + x = np.arange(10.) + x[9] = np.nan + assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan)) + assert_allclose(stats.kendalltau(x, x, nan_policy='omit'), + (1.0, 5.5114638e-6), rtol=1e-06) + assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'), + (1.0, 0.00017455009626808976), rtol=1e-06) + assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise') + assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar') + + # test unequal length inputs + x = np.arange(10.) + y = np.arange(20.) + assert_raises(ValueError, stats.kendalltau, x, y) + + # test all ties + tau, p_value = stats.kendalltau([], []) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + tau, p_value = stats.kendalltau([0], [0]) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + + # Regression test for GitHub issue #6061 - Overflow on Windows + x = np.arange(2000, dtype=float) + x = np.ma.masked_greater(x, 1995) + y = np.arange(2000, dtype=float) + y = np.concatenate((y[1000:], y[:1000])) + assert_(np.isfinite(stats.kendalltau(x,y)[1])) + + +def test_kendalltau_vs_mstats_basic(): + np.random.seed(42) + for s in range(2,10): + a = [] + # Generate rankings with ties + for i in range(s): + a += [i]*i + b = list(a) + np.random.shuffle(a) + np.random.shuffle(b) + expected = mstats_basic.kendalltau(a, b) + actual = stats.kendalltau(a, b) + assert_approx_equal(actual[0], expected[0]) + assert_approx_equal(actual[1], expected[1]) + + +def test_kendalltau_nan_2nd_arg(): + # regression test for gh-6134: nans in the second arg were not handled + x = [1., 2., 3., 4.] + y = [np.nan, 2.4, 3.4, 3.4] + + r1 = stats.kendalltau(x, y, nan_policy='omit') + r2 = stats.kendalltau(x[1:], y[1:]) + assert_allclose(r1.statistic, r2.statistic, atol=1e-15) + + +def test_kendalltau_deprecations(): + msg_dep = "keyword argument 'initial_lexsort'" + with pytest.deprecated_call(match=msg_dep): + stats.kendalltau([], [], initial_lexsort=True) + with pytest.deprecated_call(match=f"use keyword arguments|{msg_dep}"): + stats.kendalltau([], [], True) + + +def test_kendalltau_gh18139_overflow(): + # gh-18139 reported an overflow in `kendalltau` that appeared after + # SciPy 0.15.1. Check that this particular overflow does not occur. + # (Test would fail if warning were emitted.) + import random + random.seed(6272161) + classes = [1, 2, 3, 4, 5, 6, 7] + n_samples = 2 * 10 ** 5 + x = random.choices(classes, k=n_samples) + y = random.choices(classes, k=n_samples) + res = stats.kendalltau(x, y) + # Reference value from SciPy 0.15.1 + assert_allclose(res.statistic, 0.0011816493905730343) + # Reference p-value from `permutation_test` w/ n_resamples=9999 (default). + # Expected to be accurate to at least two digits. + assert_allclose(res.pvalue, 0.4894, atol=2e-3) + + +class TestKendallTauAlternative: + def test_kendalltau_alternative_asymptotic(self): + # Test alternative parameter, asymptotic method (due to tie) + + # Based on TestCorrSpearman2::test_alternative + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 7] + + # strong positive correlation + expected = stats.kendalltau(x1, x2, alternative="two-sided") + assert expected[0] > 0 + + # rank correlation > 0 -> large "less" p-value + res = stats.kendalltau(x1, x2, alternative="less") + assert_equal(res[0], expected[0]) + assert_allclose(res[1], 1 - (expected[1] / 2)) + + # rank correlation > 0 -> small "greater" p-value + res = stats.kendalltau(x1, x2, alternative="greater") + assert_equal(res[0], expected[0]) + assert_allclose(res[1], expected[1] / 2) + + # reverse the direction of rank correlation + x2.reverse() + + # strong negative correlation + expected = stats.kendalltau(x1, x2, alternative="two-sided") + assert expected[0] < 0 + + # rank correlation < 0 -> large "greater" p-value + res = stats.kendalltau(x1, x2, alternative="greater") + assert_equal(res[0], expected[0]) + assert_allclose(res[1], 1 - (expected[1] / 2)) + + # rank correlation < 0 -> small "less" p-value + res = stats.kendalltau(x1, x2, alternative="less") + assert_equal(res[0], expected[0]) + assert_allclose(res[1], expected[1] / 2) + + with pytest.raises(ValueError, match="`alternative` must be 'less'..."): + stats.kendalltau(x1, x2, alternative="ekki-ekki") + + # There are a lot of special cases considered in the calculation of the + # exact p-value, so we test each separately. We also need to test + # separately when the observed statistic is in the left tail vs the right + # tail because the code leverages symmetry of the null distribution; to + # do that we use the same test case but negate one of the samples. + # Reference values computed using R cor.test, e.g. + # options(digits=16) + # x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1) + # y <- c( 2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8) + # cor.test(x, y, method = "kendall", alternative = "g") + + alternatives = ('less', 'two-sided', 'greater') + p_n1 = [np.nan, np.nan, np.nan] + p_n2 = [1, 1, 0.5] + p_c0 = [1, 0.3333333333333, 0.1666666666667] + p_c1 = [0.9583333333333, 0.3333333333333, 0.1666666666667] + p_no_correlation = [0.5916666666667, 1, 0.5916666666667] + p_no_correlationb = [0.5475694444444, 1, 0.5475694444444] + p_n_lt_171 = [0.9624118165785, 0.1194389329806, 0.0597194664903] + p_n_lt_171b = [0.246236925303, 0.4924738506059, 0.755634083327] + p_n_lt_171c = [0.9847475308925, 0.03071385306533, 0.01535692653267] + + def exact_test(self, x, y, alternative, rev, stat_expected, p_expected): + if rev: + y = -np.asarray(y) + stat_expected *= -1 + res = stats.kendalltau(x, y, method='exact', alternative=alternative) + res_expected = stat_expected, p_expected + assert_allclose(res, res_expected) + + case_R_n1 = (list(zip(alternatives, p_n1, [False]*3)) + + list(zip(alternatives, reversed(p_n1), [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_n1) + def test_against_R_n1(self, alternative, p_expected, rev): + x, y = [1], [2] + stat_expected = np.nan + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_n2 = (list(zip(alternatives, p_n2, [False]*3)) + + list(zip(alternatives, reversed(p_n2), [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_n2) + def test_against_R_n2(self, alternative, p_expected, rev): + x, y = [1, 2], [3, 4] + stat_expected = 0.9999999999999998 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_c0 = (list(zip(alternatives, p_c0, [False]*3)) + + list(zip(alternatives, reversed(p_c0), [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_c0) + def test_against_R_c0(self, alternative, p_expected, rev): + x, y = [1, 2, 3], [1, 2, 3] + stat_expected = 1 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_c1 = (list(zip(alternatives, p_c1, [False]*3)) + + list(zip(alternatives, reversed(p_c1), [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_c1) + def test_against_R_c1(self, alternative, p_expected, rev): + x, y = [1, 2, 3, 4], [1, 2, 4, 3] + stat_expected = 0.6666666666666667 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_no_corr = (list(zip(alternatives, p_no_correlation, [False]*3)) + + list(zip(alternatives, reversed(p_no_correlation), + [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_no_corr) + def test_against_R_no_correlation(self, alternative, p_expected, rev): + x, y = [1, 2, 3, 4, 5], [1, 5, 4, 2, 3] + stat_expected = 0 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_no_cor_b = (list(zip(alternatives, p_no_correlationb, [False]*3)) + + list(zip(alternatives, reversed(p_no_correlationb), + [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_no_cor_b) + def test_against_R_no_correlationb(self, alternative, p_expected, rev): + x, y = [1, 2, 3, 4, 5, 6, 7, 8], [8, 6, 1, 3, 2, 5, 4, 7] + stat_expected = 0 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_lt_171 = (list(zip(alternatives, p_n_lt_171, [False]*3)) + + list(zip(alternatives, reversed(p_n_lt_171), [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171) + def test_against_R_lt_171(self, alternative, p_expected, rev): + # Data from Hollander & Wolfe (1973), p. 187f. + # Used from https://rdrr.io/r/stats/cor.test.html + x = [44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1] + y = [2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8] + stat_expected = 0.4444444444444445 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_lt_171b = (list(zip(alternatives, p_n_lt_171b, [False]*3)) + + list(zip(alternatives, reversed(p_n_lt_171b), + [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171b) + def test_against_R_lt_171b(self, alternative, p_expected, rev): + np.random.seed(0) + x = np.random.rand(100) + y = np.random.rand(100) + stat_expected = -0.04686868686868687 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_R_lt_171c = (list(zip(alternatives, p_n_lt_171c, [False]*3)) + + list(zip(alternatives, reversed(p_n_lt_171c), + [True]*3))) + + @pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171c) + def test_against_R_lt_171c(self, alternative, p_expected, rev): + np.random.seed(0) + x = np.random.rand(170) + y = np.random.rand(170) + stat_expected = 0.1115906717716673 + self.exact_test(x, y, alternative, rev, stat_expected, p_expected) + + case_gt_171 = (list(zip(alternatives, [False]*3)) + + list(zip(alternatives, [True]*3))) + + @pytest.mark.parametrize("alternative, rev", case_gt_171) + def test_gt_171(self, alternative, rev): + np.random.seed(0) + x = np.random.rand(400) + y = np.random.rand(400) + res0 = stats.kendalltau(x, y, method='exact', + alternative=alternative) + res1 = stats.kendalltau(x, y, method='asymptotic', + alternative=alternative) + assert_equal(res0[0], res1[0]) + assert_allclose(res0[1], res1[1], rtol=1e-3) + + @pytest.mark.parametrize("method", ('exact', 'asymptotic')) + @pytest.mark.parametrize("alternative", ('two-sided', 'less', 'greater')) + def test_nan_policy(self, method, alternative): + # Test nan policies + x1 = [1, 2, 3, 4, 5] + x2 = [5, 6, 7, 8, 9] + x1nan = x1 + [np.nan] + x2nan = x2 + [np.nan] + + # test nan_policy="propagate" + res_actual = stats.kendalltau(x1nan, x2nan, + method=method, alternative=alternative) + res_expected = (np.nan, np.nan) + assert_allclose(res_actual, res_expected) + + # test nan_policy="omit" + res_actual = stats.kendalltau(x1nan, x2nan, nan_policy='omit', + method=method, alternative=alternative) + res_expected = stats.kendalltau(x1, x2, method=method, + alternative=alternative) + assert_allclose(res_actual, res_expected) + + # test nan_policy="raise" + message = 'The input contains nan values' + with pytest.raises(ValueError, match=message): + stats.kendalltau(x1nan, x2nan, nan_policy='raise', + method=method, alternative=alternative) + + # test invalid nan_policy + message = "nan_policy must be one of..." + with pytest.raises(ValueError, match=message): + stats.kendalltau(x1nan, x2nan, nan_policy='ekki-ekki', + method=method, alternative=alternative) + + +def test_weightedtau(): + x = [12, 2, 1, 12, 2] + y = [1, 4, 7, 1, 0] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(x, y, additive=False) + assert_approx_equal(tau, -0.62205716951801038) + assert_equal(np.nan, p_value) + # This must be exactly Kendall's tau + tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1) + assert_approx_equal(tau, -0.47140452079103173) + assert_equal(np.nan, p_value) + + # test for namedtuple attribute results + res = stats.weightedtau(x, y) + attributes = ('correlation', 'pvalue') + check_named_results(res, attributes) + assert_equal(res.correlation, res.statistic) + + # Asymmetric, ranked version + tau, p_value = stats.weightedtau(x, y, rank=None) + assert_approx_equal(tau, -0.4157652301037516) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(y, x, rank=None) + assert_approx_equal(tau, -0.7181341329699029) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(x, y, rank=None, additive=False) + assert_approx_equal(tau, -0.40644850966246893) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(y, x, rank=None, additive=False) + assert_approx_equal(tau, -0.83766582937355172) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(x, y, rank=False) + assert_approx_equal(tau, -0.51604397940261848) + assert_equal(np.nan, p_value) + # This must be exactly Kendall's tau + tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1) + assert_approx_equal(tau, -0.47140452079103173) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1) + assert_approx_equal(tau, -0.47140452079103173) + assert_equal(np.nan, p_value) + # Test argument conversion + tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y) + assert_approx_equal(tau, -0.56694968153682723) + tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y) + assert_approx_equal(tau, -0.56694968153682723) + tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), + np.asarray(y, dtype=np.float64)) + assert_approx_equal(tau, -0.56694968153682723) + # All ties + tau, p_value = stats.weightedtau([], []) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + tau, p_value = stats.weightedtau([0], [0]) + assert_equal(np.nan, tau) + assert_equal(np.nan, p_value) + # Size mismatches + assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2]) + assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0]) + # NaNs + x = [12, 2, 1, 12, 2] + y = [1, 4, 7, 1, np.nan] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + x = [12, 2, np.nan, 12, 2] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + # NaNs when the dtype of x and y are all np.float64 + x = [12.0, 2.0, 1.0, 12.0, 2.0] + y = [1.0, 4.0, 7.0, 1.0, np.nan] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + x = [12.0, 2.0, np.nan, 12.0, 2.0] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.56694968153682723) + # NaNs when there are more than one NaN in x or y + x = [12.0, 2.0, 1.0, 12.0, 1.0] + y = [1.0, 4.0, 7.0, 1.0, 1.0] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.6615242347139803) + x = [12.0, 2.0, np.nan, 12.0, np.nan] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.6615242347139803) + y = [np.nan, 4.0, 7.0, np.nan, np.nan] + tau, p_value = stats.weightedtau(x, y) + assert_approx_equal(tau, -0.6615242347139803) + + +def test_segfault_issue_9710(): + # https://github.com/scipy/scipy/issues/9710 + # This test was created to check segfault + # In issue SEGFAULT only repros in optimized builds after calling the function twice + stats.weightedtau([1], [1.0]) + stats.weightedtau([1], [1.0]) + # The code below also caused SEGFAULT + stats.weightedtau([np.nan], [52]) + + +def test_kendall_tau_large(): + n = 172 + # Test omit policy + x = np.arange(n + 1).astype(float) + y = np.arange(n + 1).astype(float) + y[-1] = np.nan + _, pval = stats.kendalltau(x, y, method='exact', nan_policy='omit') + assert_equal(pval, 0.0) + + +def test_weightedtau_vs_quadratic(): + # Trivial quadratic implementation, all parameters mandatory + def wkq(x, y, rank, weigher, add): + tot = conc = disc = u = v = 0 + for (i, j) in product(range(len(x)), range(len(x))): + w = weigher(rank[i]) + weigher(rank[j]) if add \ + else weigher(rank[i]) * weigher(rank[j]) + tot += w + if x[i] == x[j]: + u += w + if y[i] == y[j]: + v += w + if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]: + conc += w + elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]: + disc += w + return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v) + + def weigher(x): + return 1. / (x + 1) + + np.random.seed(42) + for s in range(3,10): + a = [] + # Generate rankings with ties + for i in range(s): + a += [i]*i + b = list(a) + np.random.shuffle(a) + np.random.shuffle(b) + # First pass: use element indices as ranks + rank = np.arange(len(a), dtype=np.intp) + for _ in range(2): + for add in [True, False]: + expected = wkq(a, b, rank, weigher, add) + actual = stats.weightedtau(a, b, rank, weigher, add).statistic + assert_approx_equal(expected, actual) + # Second pass: use a random rank + np.random.shuffle(rank) + + +class TestFindRepeats: + + def test_basic(self): + a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5] + res, nums = stats.find_repeats(a) + assert_array_equal(res, [1, 2, 3, 4]) + assert_array_equal(nums, [3, 3, 2, 2]) + + def test_empty_result(self): + # Check that empty arrays are returned when there are no repeats. + for a in [[10, 20, 50, 30, 40], []]: + repeated, counts = stats.find_repeats(a) + assert_array_equal(repeated, []) + assert_array_equal(counts, []) + + +class TestRegression: + + def test_linregressBIGX(self): + # W.II.F. Regress BIG on X. + result = stats.linregress(X, BIG) + assert_almost_equal(result.intercept, 99999990) + assert_almost_equal(result.rvalue, 1.0) + # The uncertainty ought to be almost zero + # since all points lie on a line + assert_almost_equal(result.stderr, 0.0) + assert_almost_equal(result.intercept_stderr, 0.0) + + def test_regressXX(self): + # W.IV.B. Regress X on X. + # The constant should be exactly 0 and the regression coefficient + # should be 1. This is a perfectly valid regression and the + # program should not complain. + result = stats.linregress(X, X) + assert_almost_equal(result.intercept, 0.0) + assert_almost_equal(result.rvalue, 1.0) + # The uncertainly on regression through two points ought to be 0 + assert_almost_equal(result.stderr, 0.0) + assert_almost_equal(result.intercept_stderr, 0.0) + + # W.IV.C. Regress X on BIG and LITTLE (two predictors). The program + # should tell you that this model is "singular" because BIG and + # LITTLE are linear combinations of each other. Cryptic error + # messages are unacceptable here. Singularity is the most + # fundamental regression error. + # + # Need to figure out how to handle multiple linear regression. + # This is not obvious + + def test_regressZEROX(self): + # W.IV.D. Regress ZERO on X. + # The program should inform you that ZERO has no variance or it should + # go ahead and compute the regression and report a correlation and + # total sum of squares of exactly 0. + result = stats.linregress(X, ZERO) + assert_almost_equal(result.intercept, 0.0) + assert_almost_equal(result.rvalue, 0.0) + + def test_regress_simple(self): + # Regress a line with sinusoidal noise. + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + + result = stats.linregress(x, y) + lr = stats._stats_mstats_common.LinregressResult + assert_(isinstance(result, lr)) + assert_almost_equal(result.stderr, 2.3957814497838803e-3) + + def test_regress_alternative(self): + # test alternative parameter + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 # slope is greater than zero + y += np.sin(np.linspace(0, 20, 100)) + + with pytest.raises(ValueError, match="alternative must be 'less'..."): + stats.linregress(x, y, alternative="ekki-ekki") + + res1 = stats.linregress(x, y, alternative="two-sided") + + # slope is greater than zero, so "less" p-value should be large + res2 = stats.linregress(x, y, alternative="less") + assert_allclose(res2.pvalue, 1 - (res1.pvalue / 2)) + + # slope is greater than zero, so "greater" p-value should be small + res3 = stats.linregress(x, y, alternative="greater") + assert_allclose(res3.pvalue, res1.pvalue / 2) + + assert res1.rvalue == res2.rvalue == res3.rvalue + + def test_regress_against_R(self): + # test against R `lm` + # options(digits=16) + # x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131) + # y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48) + # relation <- lm(y~x) + # print(summary(relation)) + + x = [151, 174, 138, 186, 128, 136, 179, 163, 152, 131] + y = [63, 81, 56, 91, 47, 57, 76, 72, 62, 48] + res = stats.linregress(x, y, alternative="two-sided") + # expected values from R's `lm` above + assert_allclose(res.slope, 0.6746104491292) + assert_allclose(res.intercept, -38.4550870760770) + assert_allclose(res.rvalue, np.sqrt(0.95478224775)) + assert_allclose(res.pvalue, 1.16440531074e-06) + assert_allclose(res.stderr, 0.0519051424731) + assert_allclose(res.intercept_stderr, 8.0490133029927) + + def test_regress_simple_onearg_rows(self): + # Regress a line w sinusoidal noise, + # with a single input of shape (2, N) + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + rows = np.vstack((x, y)) + + result = stats.linregress(rows) + assert_almost_equal(result.stderr, 2.3957814497838803e-3) + assert_almost_equal(result.intercept_stderr, 1.3866936078570702e-1) + + def test_regress_simple_onearg_cols(self): + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + columns = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1))) + + result = stats.linregress(columns) + assert_almost_equal(result.stderr, 2.3957814497838803e-3) + assert_almost_equal(result.intercept_stderr, 1.3866936078570702e-1) + + def test_regress_shape_error(self): + # Check that a single input argument to linregress with wrong shape + # results in a ValueError. + assert_raises(ValueError, stats.linregress, np.ones((3, 3))) + + def test_linregress(self): + # compared with multivariate ols with pinv + x = np.arange(11) + y = np.arange(5, 16) + y[[(1), (-2)]] -= 1 + y[[(0), (-1)]] += 1 + + result = stats.linregress(x, y) + + # This test used to use 'assert_array_almost_equal' but its + # formualtion got confusing since LinregressResult became + # _lib._bunch._make_tuple_bunch instead of namedtuple + # (for backwards compatibility, see PR #12983) + def assert_ae(x, y): + return assert_almost_equal(x, y, decimal=14) + assert_ae(result.slope, 1.0) + assert_ae(result.intercept, 5.0) + assert_ae(result.rvalue, 0.98229948625750) + assert_ae(result.pvalue, 7.45259691e-008) + assert_ae(result.stderr, 0.063564172616372733) + assert_ae(result.intercept_stderr, 0.37605071654517686) + + def test_regress_simple_negative_cor(self): + # If the slope of the regression is negative the factor R tend + # to -1 not 1. Sometimes rounding errors makes it < -1 + # leading to stderr being NaN. + a, n = 1e-71, 100000 + x = np.linspace(a, 2 * a, n) + y = np.linspace(2 * a, a, n) + result = stats.linregress(x, y) + + # Make sure propagated numerical errors + # did not bring rvalue below -1 (or were coersced) + assert_(result.rvalue >= -1) + assert_almost_equal(result.rvalue, -1) + + # slope and intercept stderror should stay numeric + assert_(not np.isnan(result.stderr)) + assert_(not np.isnan(result.intercept_stderr)) + + def test_linregress_result_attributes(self): + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + result = stats.linregress(x, y) + + # Result is of a correct class + lr = stats._stats_mstats_common.LinregressResult + assert_(isinstance(result, lr)) + + # LinregressResult elements have correct names + attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') + check_named_results(result, attributes) + # Also check that the extra attribute (intercept_stderr) is present + assert 'intercept_stderr' in dir(result) + + def test_regress_two_inputs(self): + # Regress a simple line formed by two points. + x = np.arange(2) + y = np.arange(3, 5) + result = stats.linregress(x, y) + + # Non-horizontal line + assert_almost_equal(result.pvalue, 0.0) + + # Zero error through two points + assert_almost_equal(result.stderr, 0.0) + assert_almost_equal(result.intercept_stderr, 0.0) + + def test_regress_two_inputs_horizontal_line(self): + # Regress a horizontal line formed by two points. + x = np.arange(2) + y = np.ones(2) + result = stats.linregress(x, y) + + # Horizontal line + assert_almost_equal(result.pvalue, 1.0) + + # Zero error through two points + assert_almost_equal(result.stderr, 0.0) + assert_almost_equal(result.intercept_stderr, 0.0) + + def test_nist_norris(self): + x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0, + 558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1, + 995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0, + 11.1, 118.3, 229.2, 669.1, 448.9, 0.5] + + y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9, + 559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3, + 998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9, + 10.2, 117.6, 228.9, 668.4, 449.2, 0.2] + + result = stats.linregress(x, y) + + assert_almost_equal(result.slope, 1.00211681802045) + assert_almost_equal(result.intercept, -0.262323073774029) + assert_almost_equal(result.rvalue**2, 0.999993745883712) + assert_almost_equal(result.pvalue, 0.0) + assert_almost_equal(result.stderr, 0.00042979684820) + assert_almost_equal(result.intercept_stderr, 0.23281823430153) + + def test_compare_to_polyfit(self): + x = np.linspace(0, 100, 100) + y = 0.2 * np.linspace(0, 100, 100) + 10 + y += np.sin(np.linspace(0, 20, 100)) + result = stats.linregress(x, y) + poly = np.polyfit(x, y, 1) # Fit 1st degree polynomial + + # Make sure linear regression slope and intercept + # match with results from numpy polyfit + assert_almost_equal(result.slope, poly[0]) + assert_almost_equal(result.intercept, poly[1]) + + def test_empty_input(self): + assert_raises(ValueError, stats.linregress, [], []) + + def test_nan_input(self): + x = np.arange(10.) + x[9] = np.nan + + with np.errstate(invalid="ignore"): + result = stats.linregress(x, x) + + # Make sure the result still comes back as `LinregressResult` + lr = stats._stats_mstats_common.LinregressResult + assert_(isinstance(result, lr)) + assert_array_equal(result, (np.nan,)*5) + assert_equal(result.intercept_stderr, np.nan) + + def test_identical_x(self): + x = np.zeros(10) + y = np.random.random(10) + msg = "Cannot calculate a linear regression" + with assert_raises(ValueError, match=msg): + stats.linregress(x, y) + + +def test_theilslopes(): + # Basic slope test. + slope, intercept, lower, upper = stats.theilslopes([0,1,1]) + assert_almost_equal(slope, 0.5) + assert_almost_equal(intercept, 0.5) + + msg = ("method must be either 'joint' or 'separate'." + "'joint_separate' is invalid.") + with pytest.raises(ValueError, match=msg): + stats.theilslopes([0, 1, 1], method='joint_separate') + + slope, intercept, lower, upper = stats.theilslopes([0, 1, 1], + method='joint') + assert_almost_equal(slope, 0.5) + assert_almost_equal(intercept, 0.0) + + # Test of confidence intervals. + x = [1, 2, 3, 4, 10, 12, 18] + y = [9, 15, 19, 20, 45, 55, 78] + slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07, + method='separate') + assert_almost_equal(slope, 4) + assert_almost_equal(intercept, 4.0) + assert_almost_equal(upper, 4.38, decimal=2) + assert_almost_equal(lower, 3.71, decimal=2) + + slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07, + method='joint') + assert_almost_equal(slope, 4) + assert_almost_equal(intercept, 6.0) + assert_almost_equal(upper, 4.38, decimal=2) + assert_almost_equal(lower, 3.71, decimal=2) + + +def test_cumfreq(): + x = [1, 4, 2, 1, 3, 1] + cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4) + assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.])) + cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq( + x, numbins=4, defaultreallimits=(1.5, 5)) + assert_(extrapoints == 3) + + # test for namedtuple attribute results + attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints') + res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) + check_named_results(res, attributes) + + +def test_relfreq(): + a = np.array([1, 4, 2, 1, 3, 1]) + relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4) + assert_array_almost_equal(relfreqs, + array([0.5, 0.16666667, 0.16666667, 0.16666667])) + + # test for namedtuple attribute results + attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints') + res = stats.relfreq(a, numbins=4) + check_named_results(res, attributes) + + # check array_like input is accepted + relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1], + numbins=4) + assert_array_almost_equal(relfreqs, relfreqs2) + + +class TestScoreatpercentile: + def setup_method(self): + self.a1 = [3, 4, 5, 10, -3, -5, 6] + self.a2 = [3, -6, -2, 8, 7, 4, 2, 1] + self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0] + + def test_basic(self): + x = arange(8) * 0.5 + assert_equal(stats.scoreatpercentile(x, 0), 0.) + assert_equal(stats.scoreatpercentile(x, 100), 3.5) + assert_equal(stats.scoreatpercentile(x, 50), 1.75) + + def test_fraction(self): + scoreatperc = stats.scoreatpercentile + + # Test defaults + assert_equal(scoreatperc(list(range(10)), 50), 4.5) + assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5) + assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5) + + # explicitly specify interpolation_method 'fraction' (the default) + assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'), + 4.5) + assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7), + interpolation_method='fraction'), + 4.5) + assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8), + interpolation_method='fraction'), + 4.5) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100), + interpolation_method='fraction'), + 55) + assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10), + interpolation_method='fraction'), + 5.5) + + def test_lower_higher(self): + scoreatperc = stats.scoreatpercentile + + # interpolation_method 'lower'/'higher' + assert_equal(scoreatperc(list(range(10)), 50, + interpolation_method='lower'), 4) + assert_equal(scoreatperc(list(range(10)), 50, + interpolation_method='higher'), 5) + assert_equal(scoreatperc(list(range(10)), 50, (2,7), + interpolation_method='lower'), 4) + assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7), + interpolation_method='higher'), 5) + assert_equal(scoreatperc(list(range(100)), 50, (1,8), + interpolation_method='lower'), 4) + assert_equal(scoreatperc(list(range(100)), 50, (1,8), + interpolation_method='higher'), 5) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100), + interpolation_method='lower'), 10) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100), + interpolation_method='higher'), 100) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10), + interpolation_method='lower'), 1) + assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10), + interpolation_method='higher'), 10) + + def test_sequence_per(self): + x = arange(8) * 0.5 + expected = np.array([0, 3.5, 1.75]) + res = stats.scoreatpercentile(x, [0, 100, 50]) + assert_allclose(res, expected) + assert_(isinstance(res, np.ndarray)) + # Test with ndarray. Regression test for gh-2861 + assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])), + expected) + # Also test combination of 2-D array, axis not None and array-like per + res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)), + np.array([0, 1, 100, 100]), axis=1) + expected2 = array([[0, 4, 8], + [0.03, 4.03, 8.03], + [3, 7, 11], + [3, 7, 11]]) + assert_allclose(res2, expected2) + + def test_axis(self): + scoreatperc = stats.scoreatpercentile + x = arange(12).reshape(3, 4) + + assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]] + assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1) + + x = array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + score = stats.scoreatpercentile(x, 50) + assert_equal(score.shape, ()) + assert_equal(score, 1.0) + score = stats.scoreatpercentile(x, 50, axis=0) + assert_equal(score.shape, (3,)) + assert_equal(score, [1, 1, 1]) + + def test_exception(self): + assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56, + interpolation_method='foobar') + assert_raises(ValueError, stats.scoreatpercentile, [1], 101) + assert_raises(ValueError, stats.scoreatpercentile, [1], -1) + + def test_empty(self): + assert_equal(stats.scoreatpercentile([], 50), np.nan) + assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan) + assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan]) + + +@pytest.mark.filterwarnings('ignore::FutureWarning') +class TestMode: + + def test_empty(self): + vals, counts = stats.mode([]) + assert_equal(vals, np.array([])) + assert_equal(counts, np.array([])) + + def test_scalar(self): + vals, counts = stats.mode(4.) + assert_equal(vals, np.array([4.])) + assert_equal(counts, np.array([1])) + + def test_basic(self): + data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] + vals = stats.mode(data1) + assert_equal(vals[0], 6) + assert_equal(vals[1], 3) + + def test_axes(self): + data1 = [10, 10, 30, 40] + data2 = [10, 10, 10, 10] + data3 = [20, 10, 20, 20] + data4 = [30, 30, 30, 30] + data5 = [40, 30, 30, 30] + arr = np.array([data1, data2, data3, data4, data5]) + + vals = stats.mode(arr, axis=None, keepdims=True) + assert_equal(vals[0], np.array([[30]])) + assert_equal(vals[1], np.array([[8]])) + + vals = stats.mode(arr, axis=0, keepdims=True) + assert_equal(vals[0], np.array([[10, 10, 30, 30]])) + assert_equal(vals[1], np.array([[2, 3, 3, 2]])) + + vals = stats.mode(arr, axis=1, keepdims=True) + assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]])) + assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]])) + + @pytest.mark.parametrize('axis', np.arange(-4, 0)) + def test_negative_axes_gh_15375(self, axis): + np.random.seed(984213899) + a = np.random.rand(10, 11, 12, 13) + res0 = stats.mode(a, axis=a.ndim+axis) + res1 = stats.mode(a, axis=axis) + np.testing.assert_array_equal(res0, res1) + + def test_mode_result_attributes(self): + data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] + data2 = [] + actual = stats.mode(data1) + attributes = ('mode', 'count') + check_named_results(actual, attributes) + actual2 = stats.mode(data2) + check_named_results(actual2, attributes) + + def test_mode_nan(self): + data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] + actual = stats.mode(data1) + assert_equal(actual, (6, 3)) + + actual = stats.mode(data1, nan_policy='omit') + assert_equal(actual, (6, 3)) + assert_raises(ValueError, stats.mode, data1, nan_policy='raise') + assert_raises(ValueError, stats.mode, data1, nan_policy='foobar') + + @pytest.mark.parametrize("data", [ + [3, 5, 1, 1, 3], + [3, np.nan, 5, 1, 1, 3], + [3, 5, 1], + [3, np.nan, 5, 1], + ]) + @pytest.mark.parametrize('keepdims', [False, True]) + def test_smallest_equal(self, data, keepdims): + result = stats.mode(data, nan_policy='omit', keepdims=keepdims) + if keepdims: + assert_equal(result[0][0], 1) + else: + assert_equal(result[0], 1) + + @pytest.mark.parametrize('axis', np.arange(-3, 3)) + def test_mode_shape_gh_9955(self, axis, dtype=np.float64): + rng = np.random.default_rng(984213899) + a = rng.uniform(size=(3, 4, 5)).astype(dtype) + res = stats.mode(a, axis=axis, keepdims=False) + reference_shape = list(a.shape) + reference_shape.pop(axis) + np.testing.assert_array_equal(res.mode.shape, reference_shape) + np.testing.assert_array_equal(res.count.shape, reference_shape) + + def test_nan_policy_propagate_gh_9815(self): + # mode should treat np.nan as it would any other object when + # nan_policy='propagate' + a = [2, np.nan, 1, np.nan] + res = stats.mode(a) + assert np.isnan(res.mode) and res.count == 2 + + def test_keepdims(self): + # test empty arrays (handled by `np.mean`) + a = np.zeros((1, 2, 3, 0)) + + res = stats.mode(a, axis=1, keepdims=False) + assert res.mode.shape == res.count.shape == (1, 3, 0) + + res = stats.mode(a, axis=1, keepdims=True) + assert res.mode.shape == res.count.shape == (1, 1, 3, 0) + + # test nan_policy='propagate' + a = [[1, 3, 3, np.nan], [1, 1, np.nan, 1]] + + res = stats.mode(a, axis=1, keepdims=False) + assert_array_equal(res.mode, [3, 1]) + assert_array_equal(res.count, [2, 3]) + + res = stats.mode(a, axis=1, keepdims=True) + assert_array_equal(res.mode, [[3], [1]]) + assert_array_equal(res.count, [[2], [3]]) + + a = np.array(a) + res = stats.mode(a, axis=None, keepdims=False) + ref = stats.mode(a.ravel(), keepdims=False) + assert_array_equal(res, ref) + assert res.mode.shape == ref.mode.shape == () + + res = stats.mode(a, axis=None, keepdims=True) + ref = stats.mode(a.ravel(), keepdims=True) + assert_equal(res.mode.ravel(), ref.mode.ravel()) + assert res.mode.shape == (1, 1) + assert_equal(res.count.ravel(), ref.count.ravel()) + assert res.count.shape == (1, 1) + + # test nan_policy='omit' + a = [[1, np.nan, np.nan, np.nan, 1], + [np.nan, np.nan, np.nan, np.nan, 2], + [1, 2, np.nan, 5, 5]] + + res = stats.mode(a, axis=1, keepdims=False, nan_policy='omit') + assert_array_equal(res.mode, [1, 2, 5]) + assert_array_equal(res.count, [2, 1, 2]) + + res = stats.mode(a, axis=1, keepdims=True, nan_policy='omit') + assert_array_equal(res.mode, [[1], [2], [5]]) + assert_array_equal(res.count, [[2], [1], [2]]) + + a = np.array(a) + res = stats.mode(a, axis=None, keepdims=False, nan_policy='omit') + ref = stats.mode(a.ravel(), keepdims=False, nan_policy='omit') + assert_array_equal(res, ref) + assert res.mode.shape == ref.mode.shape == () + + res = stats.mode(a, axis=None, keepdims=True, nan_policy='omit') + ref = stats.mode(a.ravel(), keepdims=True, nan_policy='omit') + assert_equal(res.mode.ravel(), ref.mode.ravel()) + assert res.mode.shape == (1, 1) + assert_equal(res.count.ravel(), ref.count.ravel()) + assert res.count.shape == (1, 1) + + @pytest.mark.parametrize("nan_policy", ['propagate', 'omit']) + def test_gh16955(self, nan_policy): + # Check that bug reported in gh-16955 is resolved + shape = (4, 3) + data = np.ones(shape) + data[0, 0] = np.nan + res = stats.mode(a=data, axis=1, keepdims=False, nan_policy=nan_policy) + assert_array_equal(res.mode, [1, 1, 1, 1]) + assert_array_equal(res.count, [2, 3, 3, 3]) + + # Test with input from gh-16595. Support for non-numeric input + # was deprecated, so check for the appropriate error. + my_dtype = np.dtype([('asdf', np.uint8), ('qwer', np.float64, (3,))]) + test = np.zeros(10, dtype=my_dtype) + with pytest.raises(TypeError, match="Argument `a` is not..."): + stats.mode(test, nan_policy=nan_policy) + + def test_gh9955(self): + # The behavior of mode with empty slices (whether the input was empty + # or all elements were omitted) was inconsistent. Test that this is + # resolved: the mode of an empty slice is NaN and the count is zero. + res = stats.mode([]) + ref = (np.nan, 0) + assert_equal(res, ref) + + res = stats.mode([np.nan], nan_policy='omit') + assert_equal(res, ref) + + a = [[10., 20., 20.], [np.nan, np.nan, np.nan]] + res = stats.mode(a, axis=1, nan_policy='omit') + ref = ([20, np.nan], [2, 0]) + assert_equal(res, ref) + + res = stats.mode(a, axis=1, nan_policy='propagate') + ref = ([20, np.nan], [2, 3]) + assert_equal(res, ref) + + z = np.array([[], []]) + res = stats.mode(z, axis=1) + ref = ([np.nan, np.nan], [0, 0]) + assert_equal(res, ref) + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') # np.mean warns + @pytest.mark.parametrize('z', [np.empty((0, 1, 2)), np.empty((1, 1, 2))]) + def test_gh17214(self, z): + res = stats.mode(z, axis=None, keepdims=True) + ref = np.mean(z, axis=None, keepdims=True) + assert res[0].shape == res[1].shape == ref.shape == (1, 1, 1) + + def test_raise_non_numeric_gh18254(self): + message = "Argument `a` is not recognized as numeric." + + class ArrLike: + def __init__(self, x): + self._x = x + + def __array__(self, dtype=None, copy=None): + return self._x.astype(object) + + with pytest.raises(TypeError, match=message): + stats.mode(ArrLike(np.arange(3))) + with pytest.raises(TypeError, match=message): + stats.mode(np.arange(3, dtype=object)) + +class TestSEM: + + testcase = [1, 2, 3, 4] + scalar_testcase = 4. + + def test_sem(self): + # This is not in R, so used: + # sqrt(var(testcase)*3/4)/sqrt(3) + + # y = stats.sem(self.shoes[0]) + # assert_approx_equal(y,0.775177399) + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + y = stats.sem(self.scalar_testcase) + assert_(np.isnan(y)) + + y = stats.sem(self.testcase) + assert_approx_equal(y, 0.6454972244) + n = len(self.testcase) + assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)), + stats.sem(self.testcase, ddof=2)) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.sem(x), np.nan) + assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769) + assert_raises(ValueError, stats.sem, x, nan_policy='raise') + assert_raises(ValueError, stats.sem, x, nan_policy='foobar') + + +class TestZmapZscore: + + @pytest.mark.parametrize( + 'x, y', + [([1, 2, 3, 4], [1, 2, 3, 4]), + ([1, 2, 3], [0, 1, 2, 3, 4])] + ) + def test_zmap(self, x, y): + z = stats.zmap(x, y) + # For these simple cases, calculate the expected result directly + # by using the formula for the z-score. + expected = (x - np.mean(y))/np.std(y) + assert_allclose(z, expected, rtol=1e-12) + + def test_zmap_axis(self): + # Test use of 'axis' keyword in zmap. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 2.0], + [2.0, 0.0, 2.0, 0.0]]) + + t1 = 1.0/np.sqrt(2.0/3) + t2 = np.sqrt(3.)/3 + t3 = np.sqrt(2.) + + z0 = stats.zmap(x, x, axis=0) + z1 = stats.zmap(x, x, axis=1) + + z0_expected = [[-t1, -t3/2, -t3/2, 0.0], + [0.0, t3, -t3/2, t1], + [t1, -t3/2, t3, -t1]] + z1_expected = [[-1.0, -1.0, 1.0, 1.0], + [-t2, -t2, -t2, np.sqrt(3.)], + [1.0, -1.0, 1.0, -1.0]] + + assert_array_almost_equal(z0, z0_expected) + assert_array_almost_equal(z1, z1_expected) + + def test_zmap_ddof(self): + # Test use of 'ddof' keyword in zmap. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [0.0, 1.0, 2.0, 3.0]]) + + z = stats.zmap(x, x, axis=1, ddof=1) + + z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) + z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) + assert_array_almost_equal(z[0], z0_expected) + assert_array_almost_equal(z[1], z1_expected) + + @pytest.mark.parametrize('ddof', [0, 2]) + def test_zmap_nan_policy_omit(self, ddof): + # nans in `scores` are propagated, regardless of `nan_policy`. + # `nan_policy` only affects how nans in `compare` are handled. + scores = np.array([-3, -1, 2, np.nan]) + compare = np.array([-8, -3, 2, 7, 12, np.nan]) + z = stats.zmap(scores, compare, ddof=ddof, nan_policy='omit') + assert_allclose(z, stats.zmap(scores, compare[~np.isnan(compare)], + ddof=ddof)) + + @pytest.mark.parametrize('ddof', [0, 2]) + def test_zmap_nan_policy_omit_with_axis(self, ddof): + scores = np.arange(-5.0, 9.0).reshape(2, -1) + compare = np.linspace(-8, 6, 24).reshape(2, -1) + compare[0, 4] = np.nan + compare[0, 6] = np.nan + compare[1, 1] = np.nan + z = stats.zmap(scores, compare, nan_policy='omit', axis=1, ddof=ddof) + expected = np.array([stats.zmap(scores[0], + compare[0][~np.isnan(compare[0])], + ddof=ddof), + stats.zmap(scores[1], + compare[1][~np.isnan(compare[1])], + ddof=ddof)]) + assert_allclose(z, expected, rtol=1e-14) + + def test_zmap_nan_policy_raise(self): + scores = np.array([1, 2, 3]) + compare = np.array([-8, -3, 2, 7, 12, np.nan]) + with pytest.raises(ValueError, match='input contains nan'): + stats.zmap(scores, compare, nan_policy='raise') + + def test_zscore(self): + # not in R, so tested by using: + # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4) + y = stats.zscore([1, 2, 3, 4]) + desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, + 1.3416407864999]) + assert_array_almost_equal(desired, y, decimal=12) + + def test_zscore_axis(self): + # Test use of 'axis' keyword in zscore. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 2.0], + [2.0, 0.0, 2.0, 0.0]]) + + t1 = 1.0/np.sqrt(2.0/3) + t2 = np.sqrt(3.)/3 + t3 = np.sqrt(2.) + + z0 = stats.zscore(x, axis=0) + z1 = stats.zscore(x, axis=1) + + z0_expected = [[-t1, -t3/2, -t3/2, 0.0], + [0.0, t3, -t3/2, t1], + [t1, -t3/2, t3, -t1]] + z1_expected = [[-1.0, -1.0, 1.0, 1.0], + [-t2, -t2, -t2, np.sqrt(3.)], + [1.0, -1.0, 1.0, -1.0]] + + assert_array_almost_equal(z0, z0_expected) + assert_array_almost_equal(z1, z1_expected) + + def test_zscore_ddof(self): + # Test use of 'ddof' keyword in zscore. + x = np.array([[0.0, 0.0, 1.0, 1.0], + [0.0, 1.0, 2.0, 3.0]]) + + z = stats.zscore(x, axis=1, ddof=1) + + z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) + z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) + assert_array_almost_equal(z[0], z0_expected) + assert_array_almost_equal(z[1], z1_expected) + + def test_zscore_nan_propagate(self): + x = np.array([1, 2, np.nan, 4, 5]) + z = stats.zscore(x, nan_policy='propagate') + assert all(np.isnan(z)) + + def test_zscore_nan_omit(self): + x = np.array([1, 2, np.nan, 4, 5]) + + z = stats.zscore(x, nan_policy='omit') + + expected = np.array([-1.2649110640673518, + -0.6324555320336759, + np.nan, + 0.6324555320336759, + 1.2649110640673518 + ]) + assert_array_almost_equal(z, expected) + + def test_zscore_nan_omit_with_ddof(self): + x = np.array([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0]) + z = stats.zscore(x, ddof=1, nan_policy='omit') + expected = np.r_[np.nan, stats.zscore(x[1:], ddof=1)] + assert_allclose(z, expected, rtol=1e-13) + + def test_zscore_nan_raise(self): + x = np.array([1, 2, np.nan, 4, 5]) + + assert_raises(ValueError, stats.zscore, x, nan_policy='raise') + + def test_zscore_constant_input_1d(self): + x = [-0.087] * 3 + z = stats.zscore(x) + assert_equal(z, np.full(len(x), np.nan)) + + def test_zscore_constant_input_2d(self): + x = np.array([[10.0, 10.0, 10.0, 10.0], + [10.0, 11.0, 12.0, 13.0]]) + z0 = stats.zscore(x, axis=0) + assert_equal(z0, np.array([[np.nan, -1.0, -1.0, -1.0], + [np.nan, 1.0, 1.0, 1.0]])) + z1 = stats.zscore(x, axis=1) + assert_equal(z1, np.array([[np.nan, np.nan, np.nan, np.nan], + stats.zscore(x[1])])) + z = stats.zscore(x, axis=None) + assert_equal(z, stats.zscore(x.ravel()).reshape(x.shape)) + + y = np.ones((3, 6)) + z = stats.zscore(y, axis=None) + assert_equal(z, np.full(y.shape, np.nan)) + + def test_zscore_constant_input_2d_nan_policy_omit(self): + x = np.array([[10.0, 10.0, 10.0, 10.0], + [10.0, 11.0, 12.0, np.nan], + [10.0, 12.0, np.nan, 10.0]]) + z0 = stats.zscore(x, nan_policy='omit', axis=0) + s = np.sqrt(3/2) + s2 = np.sqrt(2) + assert_allclose(z0, np.array([[np.nan, -s, -1.0, np.nan], + [np.nan, 0, 1.0, np.nan], + [np.nan, s, np.nan, np.nan]])) + z1 = stats.zscore(x, nan_policy='omit', axis=1) + assert_allclose(z1, np.array([[np.nan, np.nan, np.nan, np.nan], + [-s, 0, s, np.nan], + [-s2/2, s2, np.nan, -s2/2]])) + + def test_zscore_2d_all_nan_row(self): + # A row is all nan, and we use axis=1. + x = np.array([[np.nan, np.nan, np.nan, np.nan], + [10.0, 10.0, 12.0, 12.0]]) + z = stats.zscore(x, nan_policy='omit', axis=1) + assert_equal(z, np.array([[np.nan, np.nan, np.nan, np.nan], + [-1.0, -1.0, 1.0, 1.0]])) + + def test_zscore_2d_all_nan(self): + # The entire 2d array is nan, and we use axis=None. + y = np.full((2, 3), np.nan) + z = stats.zscore(y, nan_policy='omit', axis=None) + assert_equal(z, y) + + @pytest.mark.parametrize('x', [np.array([]), np.zeros((3, 0, 5))]) + def test_zscore_empty_input(self, x): + z = stats.zscore(x) + assert_equal(z, x) + + def test_gzscore_normal_array(self): + x = np.array([1, 2, 3, 4]) + z = stats.gzscore(x) + desired = np.log(x / stats.gmean(x)) / np.log(stats.gstd(x, ddof=0)) + assert_allclose(desired, z) + + def test_gzscore_masked_array(self): + x = np.array([1, 2, -1, 3, 4]) + mx = np.ma.masked_array(x, mask=[0, 0, 1, 0, 0]) + z = stats.gzscore(mx) + desired = ([-1.526072095151, -0.194700599824, np.inf, 0.584101799472, + 1.136670895503]) + assert_allclose(desired, z) + + def test_zscore_masked_element_0_gh19039(self): + # zscore returned all NaNs when 0th element was masked. See gh-19039. + rng = np.random.default_rng(8675309) + x = rng.standard_normal(10) + mask = np.zeros_like(x) + y = np.ma.masked_array(x, mask) + y.mask[0] = True + + ref = stats.zscore(x[1:]) # compute reference from non-masked elements + assert not np.any(np.isnan(ref)) + res = stats.zscore(y) + assert_allclose(res[1:], ref) + res = stats.zscore(y, axis=None) + assert_allclose(res[1:], ref) + + y[1:] = y[1] # when non-masked elements are identical, result is nan + res = stats.zscore(y) + assert_equal(res[1:], np.nan) + res = stats.zscore(y, axis=None) + assert_equal(res[1:], np.nan) + +class TestMedianAbsDeviation: + def setup_class(self): + self.dat_nan = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, + 3.03, 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, + 3.6, 3.7, 3.7, 3.7, 3.7, 3.77, 5.28, np.nan]) + self.dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03, + 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, + 3.7, 3.7, 3.7, 3.77, 5.28, 28.95]) + + def test_median_abs_deviation(self): + assert_almost_equal(stats.median_abs_deviation(self.dat, axis=None), + 0.355) + dat = self.dat.reshape(6, 4) + mad = stats.median_abs_deviation(dat, axis=0) + mad_expected = np.asarray([0.435, 0.5, 0.45, 0.4]) + assert_array_almost_equal(mad, mad_expected) + + def test_mad_nan_omit(self): + mad = stats.median_abs_deviation(self.dat_nan, nan_policy='omit') + assert_almost_equal(mad, 0.34) + + def test_axis_and_nan(self): + x = np.array([[1.0, 2.0, 3.0, 4.0, np.nan], + [1.0, 4.0, 5.0, 8.0, 9.0]]) + mad = stats.median_abs_deviation(x, axis=1) + assert_equal(mad, np.array([np.nan, 3.0])) + + def test_nan_policy_omit_with_inf(self): + z = np.array([1, 3, 4, 6, 99, np.nan, np.inf]) + mad = stats.median_abs_deviation(z, nan_policy='omit') + assert_equal(mad, 3.0) + + @pytest.mark.parametrize('axis', [0, 1, 2, None]) + def test_size_zero_with_axis(self, axis): + x = np.zeros((3, 0, 4)) + mad = stats.median_abs_deviation(x, axis=axis) + assert_equal(mad, np.full_like(x.sum(axis=axis), fill_value=np.nan)) + + @pytest.mark.parametrize('nan_policy, expected', + [('omit', np.array([np.nan, 1.5, 1.5])), + ('propagate', np.array([np.nan, np.nan, 1.5]))]) + def test_nan_policy_with_axis(self, nan_policy, expected): + x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + [1, 5, 3, 6, np.nan, np.nan], + [5, 6, 7, 9, 9, 10]]) + mad = stats.median_abs_deviation(x, nan_policy=nan_policy, axis=1) + assert_equal(mad, expected) + + @pytest.mark.parametrize('axis, expected', + [(1, [2.5, 2.0, 12.0]), (None, 4.5)]) + def test_center_mean_with_nan(self, axis, expected): + x = np.array([[1, 2, 4, 9, np.nan], + [0, 1, 1, 1, 12], + [-10, -10, -10, 20, 20]]) + mad = stats.median_abs_deviation(x, center=np.mean, nan_policy='omit', + axis=axis) + assert_allclose(mad, expected, rtol=1e-15, atol=1e-15) + + def test_center_not_callable(self): + with pytest.raises(TypeError, match='callable'): + stats.median_abs_deviation([1, 2, 3, 5], center=99) + + +def _check_warnings(warn_list, expected_type, expected_len): + """ + Checks that all of the warnings from a list returned by + `warnings.catch_all(record=True)` are of the required type and that the list + contains expected number of warnings. + """ + assert_equal(len(warn_list), expected_len, "number of warnings") + for warn_ in warn_list: + assert_(warn_.category is expected_type) + + +class TestIQR: + + def test_basic(self): + x = np.arange(8) * 0.5 + np.random.shuffle(x) + assert_equal(stats.iqr(x), 1.75) + + def test_api(self): + d = np.ones((5, 5)) + stats.iqr(d) + stats.iqr(d, None) + stats.iqr(d, 1) + stats.iqr(d, (0, 1)) + stats.iqr(d, None, (10, 90)) + stats.iqr(d, None, (30, 20), 1.0) + stats.iqr(d, None, (25, 75), 1.5, 'propagate') + stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear') + stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True) + + def test_empty(self): + assert_equal(stats.iqr([]), np.nan) + assert_equal(stats.iqr(np.arange(0)), np.nan) + + def test_constant(self): + # Constant array always gives 0 + x = np.ones((7, 4)) + assert_equal(stats.iqr(x), 0.0) + assert_array_equal(stats.iqr(x, axis=0), np.zeros(4)) + assert_array_equal(stats.iqr(x, axis=1), np.zeros(7)) + assert_equal(stats.iqr(x, interpolation='linear'), 0.0) + assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0) + assert_equal(stats.iqr(x, interpolation='nearest'), 0.0) + assert_equal(stats.iqr(x, interpolation='lower'), 0.0) + assert_equal(stats.iqr(x, interpolation='higher'), 0.0) + + # 0 only along constant dimensions + # This also tests much of `axis` + y = np.ones((4, 5, 6)) * np.arange(6) + assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6))) + assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6))) + assert_array_equal(stats.iqr(y, axis=2), np.full((4, 5), 2.5)) + assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6)) + assert_array_equal(stats.iqr(y, axis=(0, 2)), np.full(5, 3.)) + assert_array_equal(stats.iqr(y, axis=(1, 2)), np.full(4, 3.)) + + def test_scalarlike(self): + x = np.arange(1) + 7.0 + assert_equal(stats.iqr(x[0]), 0.0) + assert_equal(stats.iqr(x), 0.0) + assert_array_equal(stats.iqr(x, keepdims=True), [0.0]) + + def test_2D(self): + x = np.arange(15).reshape((3, 5)) + assert_equal(stats.iqr(x), 7.0) + assert_array_equal(stats.iqr(x, axis=0), np.full(5, 5.)) + assert_array_equal(stats.iqr(x, axis=1), np.full(3, 2.)) + assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0) + assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0) + + def test_axis(self): + # The `axis` keyword is also put through its paces in `test_keepdims`. + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) # x.shape = (71, 23, 10) + q = stats.iqr(o) + + assert_equal(stats.iqr(x, axis=(0, 1)), q) + x = np.moveaxis(x, -1, 0) # x.shape = (10, 71, 23) + assert_equal(stats.iqr(x, axis=(2, 1)), q) + x = x.swapaxes(0, 1) # x.shape = (71, 10, 23) + assert_equal(stats.iqr(x, axis=(0, 2)), q) + x = x.swapaxes(0, 1) # x.shape = (10, 71, 23) + + assert_equal(stats.iqr(x, axis=(0, 1, 2)), + stats.iqr(x, axis=None)) + assert_equal(stats.iqr(x, axis=(0,)), + stats.iqr(x, axis=0)) + + d = np.arange(3 * 5 * 7 * 11) + # Older versions of numpy only shuffle along axis=0. + # Not sure about newer, don't care. + np.random.shuffle(d) + d = d.reshape((3, 5, 7, 11)) + assert_equal(stats.iqr(d, axis=(0, 1, 2))[0], + stats.iqr(d[:,:,:, 0].ravel())) + assert_equal(stats.iqr(d, axis=(0, 1, 3))[1], + stats.iqr(d[:,:, 1,:].ravel())) + assert_equal(stats.iqr(d, axis=(3, 1, -4))[2], + stats.iqr(d[:,:, 2,:].ravel())) + assert_equal(stats.iqr(d, axis=(3, 1, 2))[2], + stats.iqr(d[2,:,:,:].ravel())) + assert_equal(stats.iqr(d, axis=(3, 2))[2, 1], + stats.iqr(d[2, 1,:,:].ravel())) + assert_equal(stats.iqr(d, axis=(1, -2))[2, 1], + stats.iqr(d[2, :, :, 1].ravel())) + assert_equal(stats.iqr(d, axis=(1, 3))[2, 2], + stats.iqr(d[2, :, 2,:].ravel())) + + assert_raises(AxisError, stats.iqr, d, axis=4) + assert_raises(ValueError, stats.iqr, d, axis=(0, 0)) + + def test_rng(self): + x = np.arange(5) + assert_equal(stats.iqr(x), 2) + assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5) + assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5) + assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4 + + assert_raises(ValueError, stats.iqr, x, rng=(0, 101)) + assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25)) + assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60)) + + def test_interpolation(self): + x = np.arange(5) + y = np.arange(4) + # Default + assert_equal(stats.iqr(x), 2) + assert_equal(stats.iqr(y), 1.5) + # Linear + assert_equal(stats.iqr(x, interpolation='linear'), 2) + assert_equal(stats.iqr(y, interpolation='linear'), 1.5) + # Higher + assert_equal(stats.iqr(x, interpolation='higher'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3) + assert_equal(stats.iqr(y, interpolation='higher'), 2) + # Lower (will generally, but not always be the same as higher) + assert_equal(stats.iqr(x, interpolation='lower'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2) + assert_equal(stats.iqr(y, interpolation='lower'), 2) + # Nearest + assert_equal(stats.iqr(x, interpolation='nearest'), 2) + assert_equal(stats.iqr(y, interpolation='nearest'), 1) + # Midpoint + assert_equal(stats.iqr(x, interpolation='midpoint'), 2) + assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5) + assert_equal(stats.iqr(y, interpolation='midpoint'), 2) + + # Check all method= values new in numpy 1.22.0 are accepted + for method in ('inverted_cdf', 'averaged_inverted_cdf', + 'closest_observation', 'interpolated_inverted_cdf', + 'hazen', 'weibull', 'median_unbiased', + 'normal_unbiased'): + stats.iqr(y, interpolation=method) + + assert_raises(ValueError, stats.iqr, x, interpolation='foobar') + + def test_keepdims(self): + # Also tests most of `axis` + x = np.ones((3, 5, 7, 11)) + assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ()) + assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11)) + assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11)) + assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7)) + assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11)) + assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ()) + assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,)) + + assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1)) + assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11)) + assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) + assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) + assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) + assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) + assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) + + def test_nanpolicy(self): + x = np.arange(15.0).reshape((3, 5)) + + # No NaNs + assert_equal(stats.iqr(x, nan_policy='propagate'), 7) + assert_equal(stats.iqr(x, nan_policy='omit'), 7) + assert_equal(stats.iqr(x, nan_policy='raise'), 7) + + # Yes NaNs + x[1, 2] = np.nan + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + assert_equal(stats.iqr(x, nan_policy='propagate'), + np.nan) + assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), + [5, 5, np.nan, 5, 5]) + assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), + [2, np.nan, 2]) + + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + assert_equal(stats.iqr(x, nan_policy='omit'), 7.5) + assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), np.full(5, 5)) + assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2]) + + assert_raises(ValueError, stats.iqr, x, nan_policy='raise') + assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise') + assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise') + + # Bad policy + assert_raises(ValueError, stats.iqr, x, nan_policy='barfood') + + def test_scale(self): + x = np.arange(15.0).reshape((3, 5)) + + # No NaNs + assert_equal(stats.iqr(x, scale=1.0), 7) + assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795) + assert_equal(stats.iqr(x, scale=2.0), 3.5) + + # Yes NaNs + x[1, 2] = np.nan + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + assert_equal(stats.iqr(x, scale=1.0, nan_policy='propagate'), np.nan) + assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan) + assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan) + # axis=1 chosen to show behavior with both nans and without + assert_equal(stats.iqr(x, axis=1, scale=1.0, + nan_policy='propagate'), [2, np.nan, 2]) + assert_almost_equal(stats.iqr(x, axis=1, scale='normal', + nan_policy='propagate'), + np.array([2, np.nan, 2]) / 1.3489795) + assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'), + [1, np.nan, 1]) + # Since NumPy 1.17.0.dev, warnings are no longer emitted by + # np.percentile with nans, so we don't check the number of + # warnings here. See https://github.com/numpy/numpy/pull/12679. + + assert_equal(stats.iqr(x, scale=1.0, nan_policy='omit'), 7.5) + assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'), + 7.5 / 1.3489795) + assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75) + + # Bad scale + assert_raises(ValueError, stats.iqr, x, scale='foobar') + + +class TestMoments: + """ + Comparison numbers are found using R v.1.5.1 + note that length(testcase) = 4 + testmathworks comes from documentation for the + Statistics Toolbox for Matlab and can be found at both + https://www.mathworks.com/help/stats/kurtosis.html + https://www.mathworks.com/help/stats/skewness.html + Note that both test cases came from here. + """ + testcase = [1,2,3,4] + scalar_testcase = 4. + np.random.seed(1234) + testcase_moment_accuracy = np.random.rand(42) + testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965] + + def _assert_equal(self, actual, expect, *, shape=None, dtype=None): + expect = np.asarray(expect) + if shape is not None: + expect = np.broadcast_to(expect, shape) + assert_array_equal(actual, expect) + if dtype is None: + dtype = expect.dtype + assert actual.dtype == dtype + + @pytest.mark.parametrize('size', [10, (10, 2)]) + @pytest.mark.parametrize('m, c', product((0, 1, 2, 3), (None, 0, 1))) + def test_moment_center_scalar_moment(self, size, m, c): + rng = np.random.default_rng(6581432544381372042) + x = rng.random(size=size) + res = stats.moment(x, m, center=c) + c = np.mean(x, axis=0) if c is None else c + ref = np.sum((x - c)**m, axis=0)/len(x) + assert_allclose(res, ref, atol=1e-16) + + @pytest.mark.parametrize('size', [10, (10, 2)]) + @pytest.mark.parametrize('c', (None, 0, 1)) + def test_moment_center_array_moment(self, size, c): + rng = np.random.default_rng(1706828300224046506) + x = rng.random(size=size) + m = [0, 1, 2, 3] + res = stats.moment(x, m, center=c) + ref = [stats.moment(x, i, center=c) for i in m] + assert_equal(res, ref) + + def test_moment(self): + # mean((testcase-mean(testcase))**power,axis=0),axis=0))**power)) + y = stats.moment(self.scalar_testcase) + assert_approx_equal(y, 0.0) + y = stats.moment(self.testcase, 0) + assert_approx_equal(y, 1.0) + y = stats.moment(self.testcase, 1) + assert_approx_equal(y, 0.0, 10) + y = stats.moment(self.testcase, 2) + assert_approx_equal(y, 1.25) + y = stats.moment(self.testcase, 3) + assert_approx_equal(y, 0.0) + y = stats.moment(self.testcase, 4) + assert_approx_equal(y, 2.5625) + + # check array_like input for moment + y = stats.moment(self.testcase, [1, 2, 3, 4]) + assert_allclose(y, [0, 1.25, 0, 2.5625]) + + # check moment input consists only of integers + y = stats.moment(self.testcase, 0.0) + assert_approx_equal(y, 1.0) + assert_raises(ValueError, stats.moment, self.testcase, 1.2) + y = stats.moment(self.testcase, [1.0, 2, 3, 4.0]) + assert_allclose(y, [0, 1.25, 0, 2.5625]) + + # test empty input + message = r"Mean of empty slice\.|invalid value encountered.*" + with pytest.warns(RuntimeWarning, match=message): + y = stats.moment([]) + self._assert_equal(y, np.nan, dtype=np.float64) + y = stats.moment(np.array([], dtype=np.float32)) + self._assert_equal(y, np.nan, dtype=np.float32) + y = stats.moment(np.zeros((1, 0)), axis=0) + self._assert_equal(y, [], shape=(0,), dtype=np.float64) + y = stats.moment([[]], axis=1) + self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64) + y = stats.moment([[]], order=[0, 1], axis=0) + self._assert_equal(y, [], shape=(2, 0)) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.moment(x, 2), np.nan) + assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0) + assert_raises(ValueError, stats.moment, x, nan_policy='raise') + assert_raises(ValueError, stats.moment, x, nan_policy='foobar') + + @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.complex128]) + @pytest.mark.parametrize('expect, order', [(0, 1), (1, 0)]) + def test_constant_moments(self, dtype, expect, order): + x = np.random.rand(5).astype(dtype) + y = stats.moment(x, order=order) + self._assert_equal(y, expect, dtype=dtype) + + y = stats.moment(np.broadcast_to(x, (6, 5)), axis=0, order=order) + self._assert_equal(y, expect, shape=(5,), dtype=dtype) + + y = stats.moment(np.broadcast_to(x, (1, 2, 3, 4, 5)), axis=2, + order=order) + self._assert_equal(y, expect, shape=(1, 2, 4, 5), dtype=dtype) + + y = stats.moment(np.broadcast_to(x, (1, 2, 3, 4, 5)), axis=None, + order=order) + self._assert_equal(y, expect, shape=(), dtype=dtype) + + def test_moment_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + mm = stats.moment(a, 2, axis=1, nan_policy="propagate") + np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15) + + def test_moment_empty_order(self): + # tests moment with empty `order` list + with pytest.raises(ValueError, match=r"'order' must be a scalar or a" + r" non-empty 1D list/array."): + stats.moment([1, 2, 3, 4], order=[]) + + def test_rename_moment_order(self): + # Parameter 'order' was formerly known as 'moment'. The old name + # has not been deprecated, so it must continue to work. + x = np.arange(10) + res = stats.moment(x, moment=3) + ref = stats.moment(x, order=3) + np.testing.assert_equal(res, ref) + + def test_skewness(self): + # Scalar test case + y = stats.skew(self.scalar_testcase) + assert np.isnan(y) + # sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) / + # ((sqrt(var(testmathworks)*4/5))**3)/5 + y = stats.skew(self.testmathworks) + assert_approx_equal(y, -0.29322304336607, 10) + y = stats.skew(self.testmathworks, bias=0) + assert_approx_equal(y, -0.437111105023940, 10) + y = stats.skew(self.testcase) + assert_approx_equal(y, 0.0, 10) + + x = np.arange(10.) + x[9] = np.nan + with np.errstate(invalid='ignore'): + assert_equal(stats.skew(x), np.nan) + assert_equal(stats.skew(x, nan_policy='omit'), 0.) + assert_raises(ValueError, stats.skew, x, nan_policy='raise') + assert_raises(ValueError, stats.skew, x, nan_policy='foobar') + + def test_skewness_scalar(self): + # `skew` must return a scalar for 1-dim input + assert_equal(stats.skew(arange(10)), 0.0) + + def test_skew_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + with np.errstate(invalid='ignore'): + s = stats.skew(a, axis=1, nan_policy="propagate") + np.testing.assert_allclose(s, [0, np.nan], atol=1e-15) + + def test_skew_constant_value(self): + # Skewness of a constant input should be zero even when the mean is not + # exact (gh-13245) + with pytest.warns(RuntimeWarning, match="Precision loss occurred"): + a = np.repeat(-0.27829495, 10) + assert np.isnan(stats.skew(a)) + assert np.isnan(stats.skew(a * float(2**50))) + assert np.isnan(stats.skew(a / float(2**50))) + assert np.isnan(stats.skew(a, bias=False)) + + # similarly, from gh-11086: + assert np.isnan(stats.skew([14.3]*7)) + assert np.isnan(stats.skew(1 + np.arange(-3, 4)*1e-16)) + + def test_kurtosis(self): + # Scalar test case + y = stats.kurtosis(self.scalar_testcase) + assert np.isnan(y) + + # sum((testcase-mean(testcase,axis=0))**4,axis=0) + # / ((sqrt(var(testcase)*3/4))**4) + # / 4 + # + # sum((test2-mean(testmathworks,axis=0))**4,axis=0) + # / ((sqrt(var(testmathworks)*4/5))**4) + # / 5 + # + # Set flags for axis = 0 and + # fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab) + y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1) + assert_approx_equal(y, 2.1658856802973, 10) + + # Note that MATLAB has confusing docs for the following case + # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness + # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) + # The MATLAB docs imply that both should give Fisher's + y = stats.kurtosis(self.testmathworks, fisher=0, bias=0) + assert_approx_equal(y, 3.663542721189047, 10) + y = stats.kurtosis(self.testcase, 0, 0) + assert_approx_equal(y, 1.64) + + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.kurtosis(x), np.nan) + assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000) + assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise') + assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar') + + def test_kurtosis_array_scalar(self): + assert_equal(type(stats.kurtosis([1, 2, 3])), np.float64) + + def test_kurtosis_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + k = stats.kurtosis(a, axis=1, nan_policy="propagate") + np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15) + + def test_kurtosis_constant_value(self): + # Kurtosis of a constant input should be zero, even when the mean is not + # exact (gh-13245) + a = np.repeat(-0.27829495, 10) + with pytest.warns(RuntimeWarning, match="Precision loss occurred"): + assert np.isnan(stats.kurtosis(a, fisher=False)) + assert np.isnan(stats.kurtosis(a * float(2**50), fisher=False)) + assert np.isnan(stats.kurtosis(a / float(2**50), fisher=False)) + assert np.isnan(stats.kurtosis(a, fisher=False, bias=False)) + + def test_moment_accuracy(self): + # 'moment' must have a small enough error compared to the slower + # but very accurate numpy.power() implementation. + tc_no_mean = self.testcase_moment_accuracy - \ + np.mean(self.testcase_moment_accuracy) + assert_allclose(np.power(tc_no_mean, 42).mean(), + stats.moment(self.testcase_moment_accuracy, 42)) + + def test_precision_loss_gh15554(self): + # gh-15554 was one of several issues that have reported problems with + # constant or near-constant input. We can't always fix these, but + # make sure there's a warning. + with pytest.warns(RuntimeWarning, match="Precision loss occurred"): + rng = np.random.default_rng(34095309370) + a = rng.random(size=(100, 10)) + a[:, 0] = 1.01 + stats.skew(a)[0] + + def test_empty_1d(self): + message = r"Mean of empty slice\.|invalid value encountered.*" + with pytest.warns(RuntimeWarning, match=message): + stats.skew([]) + with pytest.warns(RuntimeWarning, match=message): + stats.kurtosis([]) + + +@hypothesis.strategies.composite +def ttest_data_axis_strategy(draw): + # draw an array under shape and value constraints + dtype = npst.floating_dtypes() + elements = dict(allow_nan=False, allow_infinity=False) + shape = npst.array_shapes(min_dims=1, min_side=2) + data = draw(npst.arrays(dtype=dtype, elements=elements, shape=shape)) + + # determine axes over which nonzero variance can be computed accurately + ok_axes = [] + # Locally, I don't need catch_warnings or simplefilter, and I can just + # suppress RuntimeWarning. I include all that in hope of getting the same + # behavior on CI. + with warnings.catch_warnings(): + warnings.simplefilter("error") + for axis in range(len(data.shape)): + with contextlib.suppress(Exception): + var = stats.moment(data, order=2, axis=axis) + if np.all(var > 0) and np.all(np.isfinite(var)): + ok_axes.append(axis) + # if there are no valid axes, tell hypothesis to try a different example + hypothesis.assume(ok_axes) + + # draw one of the valid axes + axis = draw(hypothesis.strategies.sampled_from(ok_axes)) + + return data, axis + + +class TestStudentTest: + X1 = np.array([-1, 0, 1]) + X2 = np.array([0, 1, 2]) + T1_0 = 0 + P1_0 = 1 + T1_1 = -1.7320508075 + P1_1 = 0.22540333075 + T1_2 = -3.464102 + P1_2 = 0.0741799 + T2_0 = 1.732051 + P2_0 = 0.2254033 + P1_1_l = P1_1 / 2 + P1_1_g = 1 - (P1_1 / 2) + + def test_onesample(self): + with suppress_warnings() as sup, \ + np.errstate(invalid="ignore", divide="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + t, p = stats.ttest_1samp(4., 3.) + assert_(np.isnan(t)) + assert_(np.isnan(p)) + + t, p = stats.ttest_1samp(self.X1, 0) + + assert_array_almost_equal(t, self.T1_0) + assert_array_almost_equal(p, self.P1_0) + + res = stats.ttest_1samp(self.X1, 0) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + t, p = stats.ttest_1samp(self.X2, 0) + + assert_array_almost_equal(t, self.T2_0) + assert_array_almost_equal(p, self.P2_0) + + t, p = stats.ttest_1samp(self.X1, 1) + + assert_array_almost_equal(t, self.T1_1) + assert_array_almost_equal(p, self.P1_1) + + t, p = stats.ttest_1samp(self.X1, 2) + + assert_array_almost_equal(t, self.T1_2) + assert_array_almost_equal(p, self.P1_2) + + # check nan policy + x = stats.norm.rvs(loc=5, scale=10, size=51, random_state=7654567) + x[50] = np.nan + with np.errstate(invalid="ignore"): + assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan)) + + assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'), + (-1.6412624074367159, 0.107147027334048005)) + assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise') + assert_raises(ValueError, stats.ttest_1samp, x, 5.0, + nan_policy='foobar') + + def test_1samp_alternative(self): + assert_raises(ValueError, stats.ttest_1samp, self.X1, 0, + alternative="error") + + t, p = stats.ttest_1samp(self.X1, 1, alternative="less") + assert_allclose(p, self.P1_1_l) + assert_allclose(t, self.T1_1) + + t, p = stats.ttest_1samp(self.X1, 1, alternative="greater") + assert_allclose(p, self.P1_1_g) + assert_allclose(t, self.T1_1) + + @pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater']) + def test_1samp_ci_1d(self, alternative): + # test confidence interval method against reference values + rng = np.random.default_rng(8066178009154342972) + n = 10 + x = rng.normal(size=n, loc=1.5, scale=2) + popmean = rng.normal() # this shouldn't affect confidence interval + # Reference values generated with R t.test: + # options(digits=16) + # x = c(2.75532884, 0.93892217, 0.94835861, 1.49489446, -0.62396595, + # -1.88019867, -1.55684465, 4.88777104, 5.15310979, 4.34656348) + # t.test(x, conf.level=0.85, alternative='l') + + ref = {'two-sided': [0.3594423211709136, 2.9333455028290860], + 'greater': [0.7470806207371626, np.inf], + 'less': [-np.inf, 2.545707203262837]} + res = stats.ttest_1samp(x, popmean=popmean, alternative=alternative) + ci = res.confidence_interval(confidence_level=0.85) + assert_allclose(ci, ref[alternative]) + assert_equal(res.df, n-1) + + def test_1samp_ci_iv(self): + # test `confidence_interval` method input validation + res = stats.ttest_1samp(np.arange(10), 0) + message = '`confidence_level` must be a number between 0 and 1.' + with pytest.raises(ValueError, match=message): + res.confidence_interval(confidence_level=10) + + @pytest.mark.xslow + @hypothesis.given(alpha=hypothesis.strategies.floats(1e-15, 1-1e-15), + data_axis=ttest_data_axis_strategy()) + @pytest.mark.parametrize('alternative', ['less', 'greater']) + def test_pvalue_ci(self, alpha, data_axis, alternative): + # test relationship between one-sided p-values and confidence intervals + data, axis = data_axis + res = stats.ttest_1samp(data, 0, + alternative=alternative, axis=axis) + l, u = res.confidence_interval(confidence_level=alpha) + popmean = l if alternative == 'greater' else u + popmean = np.expand_dims(popmean, axis=axis) + res = stats.ttest_1samp(data, popmean, + alternative=alternative, axis=axis) + np.testing.assert_allclose(res.pvalue, 1-alpha) + + +class TestPercentileOfScore: + + def f(self, *args, **kwargs): + return stats.percentileofscore(*args, **kwargs) + + @pytest.mark.parametrize("kind, result", [("rank", 40), + ("mean", 35), + ("strict", 30), + ("weak", 40)]) + def test_unique(self, kind, result): + a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(self.f(a, 4, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", 45), + ("mean", 40), + ("strict", 30), + ("weak", 50)]) + def test_multiple2(self, kind, result): + a = [1, 2, 3, 4, 4, 5, 6, 7, 8, 9] + assert_equal(self.f(a, 4, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", 50), + ("mean", 45), + ("strict", 30), + ("weak", 60)]) + def test_multiple3(self, kind, result): + a = [1, 2, 3, 4, 4, 4, 5, 6, 7, 8] + assert_equal(self.f(a, 4, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", 30), + ("mean", 30), + ("strict", 30), + ("weak", 30)]) + def test_missing(self, kind, result): + a = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11] + assert_equal(self.f(a, 4, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", 40), + ("mean", 35), + ("strict", 30), + ("weak", 40)]) + def test_large_numbers(self, kind, result): + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] + assert_equal(self.f(a, 40, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", 50), + ("mean", 45), + ("strict", 30), + ("weak", 60)]) + def test_large_numbers_multiple3(self, kind, result): + a = [10, 20, 30, 40, 40, 40, 50, 60, 70, 80] + assert_equal(self.f(a, 40, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", 30), + ("mean", 30), + ("strict", 30), + ("weak", 30)]) + def test_large_numbers_missing(self, kind, result): + a = [10, 20, 30, 50, 60, 70, 80, 90, 100, 110] + assert_equal(self.f(a, 40, kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", [0, 10, 100, 100]), + ("mean", [0, 5, 95, 100]), + ("strict", [0, 0, 90, 100]), + ("weak", [0, 10, 100, 100])]) + def test_boundaries(self, kind, result): + a = [10, 20, 30, 50, 60, 70, 80, 90, 100, 110] + assert_equal(self.f(a, [0, 10, 110, 200], kind=kind), result) + + @pytest.mark.parametrize("kind, result", [("rank", [0, 10, 100]), + ("mean", [0, 5, 95]), + ("strict", [0, 0, 90]), + ("weak", [0, 10, 100])]) + def test_inf(self, kind, result): + a = [1, 2, 3, 4, 5, 6, 7, 8, 9, +np.inf] + assert_equal(self.f(a, [-np.inf, 1, +np.inf], kind=kind), result) + + cases = [("propagate", [], 1, np.nan), + ("propagate", [np.nan], 1, np.nan), + ("propagate", [np.nan], [0, 1, 2], [np.nan, np.nan, np.nan]), + ("propagate", [1, 2], [1, 2, np.nan], [50, 100, np.nan]), + ("omit", [1, 2, np.nan], [0, 1, 2], [0, 50, 100]), + ("omit", [1, 2], [0, 1, np.nan], [0, 50, np.nan]), + ("omit", [np.nan, np.nan], [0, 1, 2], [np.nan, np.nan, np.nan])] + + @pytest.mark.parametrize("policy, a, score, result", cases) + def test_nans_ok(self, policy, a, score, result): + assert_equal(self.f(a, score, nan_policy=policy), result) + + cases = [ + ("raise", [1, 2, 3, np.nan], [1, 2, 3], + "The input contains nan values"), + ("raise", [1, 2, 3], [1, 2, 3, np.nan], + "The input contains nan values"), + ] + + @pytest.mark.parametrize("policy, a, score, message", cases) + def test_nans_fail(self, policy, a, score, message): + with assert_raises(ValueError, match=message): + self.f(a, score, nan_policy=policy) + + @pytest.mark.parametrize("shape", [ + (6, ), + (2, 3), + (2, 1, 3), + (2, 1, 1, 3), + ]) + def test_nd(self, shape): + a = np.array([0, 1, 2, 3, 4, 5]) + scores = a.reshape(shape) + results = scores*10 + a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(self.f(a, scores, kind="rank"), results) + + +PowerDivCase = namedtuple('Case', # type: ignore[name-match] + ['f_obs', 'f_exp', 'ddof', 'axis', + 'chi2', # Pearson's + 'log', # G-test (log-likelihood) + 'mod_log', # Modified log-likelihood + 'cr', # Cressie-Read (lambda=2/3) + ]) + +# The details of the first two elements in power_div_1d_cases are used +# in a test in TestPowerDivergence. Check that code before making +# any changes here. +power_div_1d_cases = [ + # Use the default f_exp. + PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None, + chi2=4, + log=2*(4*np.log(4/8) + 12*np.log(12/8)), + mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)), + cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)), + # Give a non-uniform f_exp. + PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None, + chi2=24, + log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)), + mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)), + cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) + + 8*((8/2)**(2/3) - 1))/(5/9)), + # f_exp is a scalar. + PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None, + chi2=4, + log=2*(4*np.log(4/8) + 12*np.log(12/8)), + mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)), + cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)), + # f_exp equal to f_obs. + PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0, + chi2=0, log=0, mod_log=0, cr=0), +] + + +power_div_empty_cases = [ + # Shape is (0,)--a data set with length 0. The computed + # test statistic should be 0. + PowerDivCase(f_obs=[], + f_exp=None, ddof=0, axis=0, + chi2=0, log=0, mod_log=0, cr=0), + # Shape is (0, 3). This is 3 data sets, but each data set has + # length 0, so the computed test statistic should be [0, 0, 0]. + PowerDivCase(f_obs=np.array([[],[],[]]).T, + f_exp=None, ddof=0, axis=0, + chi2=[0, 0, 0], + log=[0, 0, 0], + mod_log=[0, 0, 0], + cr=[0, 0, 0]), + # Shape is (3, 0). This represents an empty collection of + # data sets in which each data set has length 3. The test + # statistic should be an empty array. + PowerDivCase(f_obs=np.array([[],[],[]]), + f_exp=None, ddof=0, axis=0, + chi2=[], + log=[], + mod_log=[], + cr=[]), +] + + +class TestPowerDivergence: + + def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_, + expected_stat): + f_obs = np.asarray(f_obs) + if axis is None: + num_obs = f_obs.size + else: + b = np.broadcast(f_obs, f_exp) + num_obs = b.shape[axis] + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + stat, p = stats.power_divergence( + f_obs=f_obs, f_exp=f_exp, ddof=ddof, + axis=axis, lambda_=lambda_) + assert_allclose(stat, expected_stat) + + if lambda_ == 1 or lambda_ == "pearson": + # Also test stats.chisquare. + stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof, + axis=axis) + assert_allclose(stat, expected_stat) + + ddof = np.asarray(ddof) + expected_p = stats.distributions.chi2.sf(expected_stat, + num_obs - 1 - ddof) + assert_allclose(p, expected_p) + + def test_basic(self): + for case in power_div_1d_cases: + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + None, case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "pearson", case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + 1, case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "log-likelihood", case.log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "mod-log-likelihood", case.mod_log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "cressie-read", case.cr) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + 2/3, case.cr) + + def test_basic_masked(self): + for case in power_div_1d_cases: + mobs = np.ma.array(case.f_obs) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + None, case.chi2) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "pearson", case.chi2) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + 1, case.chi2) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "log-likelihood", case.log) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "mod-log-likelihood", case.mod_log) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + "cressie-read", case.cr) + self.check_power_divergence( + mobs, case.f_exp, case.ddof, case.axis, + 2/3, case.cr) + + def test_axis(self): + case0 = power_div_1d_cases[0] + case1 = power_div_1d_cases[1] + f_obs = np.vstack((case0.f_obs, case1.f_obs)) + f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs), + case1.f_exp)) + # Check the four computational code paths in power_divergence + # using a 2D array with axis=1. + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "pearson", [case0.chi2, case1.chi2]) + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "log-likelihood", [case0.log, case1.log]) + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "mod-log-likelihood", [case0.mod_log, case1.mod_log]) + self.check_power_divergence( + f_obs, f_exp, 0, 1, + "cressie-read", [case0.cr, case1.cr]) + # Reshape case0.f_obs to shape (2,2), and use axis=None. + # The result should be the same. + self.check_power_divergence( + np.array(case0.f_obs).reshape(2, 2), None, 0, None, + "pearson", case0.chi2) + + def test_ddof_broadcasting(self): + # Test that ddof broadcasts correctly. + # ddof does not affect the test statistic. It is broadcast + # with the computed test statistic for the computation of + # the p value. + + case0 = power_div_1d_cases[0] + case1 = power_div_1d_cases[1] + # Create 4x2 arrays of observed and expected frequencies. + f_obs = np.vstack((case0.f_obs, case1.f_obs)).T + f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs), + case1.f_exp)).T + + expected_chi2 = [case0.chi2, case1.chi2] + + # ddof has shape (2, 1). This is broadcast with the computed + # statistic, so p will have shape (2,2). + ddof = np.array([[0], [1]]) + + stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof) + assert_allclose(stat, expected_chi2) + + # Compute the p values separately, passing in scalars for ddof. + stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0]) + stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0]) + + assert_array_equal(p, np.vstack((p0, p1))) + + def test_empty_cases(self): + with warnings.catch_warnings(): + for case in power_div_empty_cases: + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "pearson", case.chi2) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "log-likelihood", case.log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "mod-log-likelihood", case.mod_log) + self.check_power_divergence( + case.f_obs, case.f_exp, case.ddof, case.axis, + "cressie-read", case.cr) + + def test_power_divergence_result_attributes(self): + f_obs = power_div_1d_cases[0].f_obs + f_exp = power_div_1d_cases[0].f_exp + ddof = power_div_1d_cases[0].ddof + axis = power_div_1d_cases[0].axis + + res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof, + axis=axis, lambda_="pearson") + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_power_divergence_gh_12282(self): + # The sums of observed and expected frequencies must match + f_obs = np.array([[10, 20], [30, 20]]) + f_exp = np.array([[5, 15], [35, 25]]) + with assert_raises(ValueError, match='For each axis slice...'): + stats.power_divergence(f_obs=[10, 20], f_exp=[30, 60]) + with assert_raises(ValueError, match='For each axis slice...'): + stats.power_divergence(f_obs=f_obs, f_exp=f_exp, axis=1) + stat, pval = stats.power_divergence(f_obs=f_obs, f_exp=f_exp) + assert_allclose(stat, [5.71428571, 2.66666667]) + assert_allclose(pval, [0.01682741, 0.10247043]) + + +def test_gh_chisquare_12282(): + # Currently `chisquare` is implemented via power_divergence + # in case that ever changes, perform a basic test like + # test_power_divergence_gh_12282 + with assert_raises(ValueError, match='For each axis slice...'): + stats.chisquare(f_obs=[10, 20], f_exp=[30, 60]) + + +@pytest.mark.parametrize("n, dtype", [(200, np.uint8), (1000000, np.int32)]) +def test_chiquare_data_types_attributes(n, dtype): + # Regression test for gh-10159 and gh-18368 + obs = np.array([n, 0], dtype=dtype) + exp = np.array([n // 2, n // 2], dtype=dtype) + res = stats.chisquare(obs, exp) + stat, p = res + assert_allclose(stat, n, rtol=1e-13) + # check that attributes are identical to unpacked outputs - see gh-18368 + assert_equal(res.statistic, stat) + assert_equal(res.pvalue, p) + + +def test_chisquare_masked_arrays(): + # Test masked arrays. + obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T + mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T + mobs = np.ma.masked_array(obs, mask) + expected_chisq = np.array([24.0, 0.5]) + expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)), + 2*(3*np.log(0.75) + 5*np.log(1.25))]) + + chi2 = stats.distributions.chi2 + + chisq, p = stats.chisquare(mobs) + mat.assert_array_equal(chisq, expected_chisq) + mat.assert_array_almost_equal(p, chi2.sf(expected_chisq, + mobs.count(axis=0) - 1)) + + g, p = stats.power_divergence(mobs, lambda_='log-likelihood') + mat.assert_array_almost_equal(g, expected_g, decimal=15) + mat.assert_array_almost_equal(p, chi2.sf(expected_g, + mobs.count(axis=0) - 1)) + + chisq, p = stats.chisquare(mobs.T, axis=1) + mat.assert_array_equal(chisq, expected_chisq) + mat.assert_array_almost_equal(p, chi2.sf(expected_chisq, + mobs.T.count(axis=1) - 1)) + g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood") + mat.assert_array_almost_equal(g, expected_g, decimal=15) + mat.assert_array_almost_equal(p, chi2.sf(expected_g, + mobs.count(axis=0) - 1)) + + obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0]) + exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1]) + chi2, p = stats.chisquare(obs1, f_exp=exp1) + # Because of the mask at index 3 of obs1 and at index 4 of exp1, + # only the first three elements are included in the calculation + # of the statistic. + mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8) + + # When axis=None, the two values should have type np.float64. + chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None) + assert_(isinstance(chisq, np.float64)) + assert_(isinstance(p, np.float64)) + assert_equal(chisq, 1.0) + assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2)) + + # Empty arrays: + # A data set with length 0 returns a masked scalar. + with np.errstate(invalid='ignore'): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + chisq, p = stats.chisquare(np.ma.array([])) + assert_(isinstance(chisq, np.ma.MaskedArray)) + assert_equal(chisq.shape, ()) + assert_(chisq.mask) + + empty3 = np.ma.array([[],[],[]]) + + # empty3 is a collection of 0 data sets (whose lengths would be 3, if + # there were any), so the return value is an array with length 0. + chisq, p = stats.chisquare(empty3) + assert_(isinstance(chisq, np.ma.MaskedArray)) + mat.assert_array_equal(chisq, []) + + # empty3.T is an array containing 3 data sets, each with length 0, + # so an array of size (3,) is returned, with all values masked. + with np.errstate(invalid='ignore'): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "Mean of empty slice") + chisq, p = stats.chisquare(empty3.T) + + assert_(isinstance(chisq, np.ma.MaskedArray)) + assert_equal(chisq.shape, (3,)) + assert_(np.all(chisq.mask)) + + +def test_power_divergence_against_cressie_read_data(): + # Test stats.power_divergence against tables 4 and 5 from + # Cressie and Read, "Multimonial Goodness-of-Fit Tests", + # J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464. + # This tests the calculation for several values of lambda. + + # Table 4 data recalculated for greater precision according to: + # Shelby J. Haberman, Analysis of Qualitative Data: Volume 1 + # Introductory Topics, Academic Press, New York, USA (1978). + obs = np.array([15, 11, 14, 17, 5, 11, 10, 4, 8, + 10, 7, 9, 11, 3, 6, 1, 1, 4]) + beta = -0.083769 # Haberman (1978), p. 15 + i = np.arange(1, len(obs) + 1) + alpha = np.log(obs.sum() / np.exp(beta*i).sum()) + expected_counts = np.exp(alpha + beta*i) + + # `table4` holds just the second and third columns from Table 4. + table4 = np.vstack((obs, expected_counts)).T + + table5 = np.array([ + # lambda, statistic + -10.0, 72.2e3, + -5.0, 28.9e1, + -3.0, 65.6, + -2.0, 40.6, + -1.5, 34.0, + -1.0, 29.5, + -0.5, 26.5, + 0.0, 24.6, + 0.5, 23.4, + 0.67, 23.1, + 1.0, 22.7, + 1.5, 22.6, + 2.0, 22.9, + 3.0, 24.8, + 5.0, 35.5, + 10.0, 21.4e1, + ]).reshape(-1, 2) + + for lambda_, expected_stat in table5: + stat, p = stats.power_divergence(table4[:,0], table4[:,1], + lambda_=lambda_) + assert_allclose(stat, expected_stat, rtol=5e-3) + + +def test_friedmanchisquare(): + # see ticket:113 + # verified with matlab and R + # From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets" + # 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28) + x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583, + 0.775, 1.0, 0.94, 0.619, 0.972, 0.957]), + array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583, + 0.838, 1.0, 0.962, 0.666, 0.981, 0.978]), + array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563, + 0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]), + array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625, + 0.875, 1.0, 0.962, 0.669, 0.975, 0.970])] + + # From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001: + x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]), + array([2,2,1,2,3,1,2,3,2,1,1,3]), + array([2,4,3,3,4,3,3,4,4,1,2,1]), + array([3,5,4,3,4,4,3,3,3,4,4,4])] + + # From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), + # Xf=10.68, 0.005 < p < 0.01: + # Probability from this example is inexact + # using Chisquare approximation of Friedman Chisquare. + x3 = [array([7.0,9.9,8.5,5.1,10.3]), + array([5.3,5.7,4.7,3.5,7.7]), + array([4.9,7.6,5.5,2.8,8.4]), + array([8.8,8.9,8.1,3.3,9.1])] + + assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]), + (10.2283464566929, 0.0167215803284414)) + assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]), + (18.9428571428571, 0.000280938375189499)) + assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]), + (10.68, 0.0135882729582176)) + assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1]) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.friedmanchisquare(*x1) + check_named_results(res, attributes) + + # test using mstats + assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1], + x1[2], x1[3]), + (10.2283464566929, 0.0167215803284414)) + # the following fails + # assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]), + # (18.9428571428571, 0.000280938375189499)) + assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1], + x3[2], x3[3]), + (10.68, 0.0135882729582176)) + assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1]) + + +class TestKSTest: + """Tests kstest and ks_1samp agree with K-S various sizes, alternatives, modes.""" + + def _testOne(self, x, alternative, expected_statistic, expected_prob, + mode='auto', decimal=14): + result = stats.kstest(x, 'norm', alternative=alternative, mode=mode) + expected = np.array([expected_statistic, expected_prob]) + assert_array_almost_equal(np.array(result), expected, decimal=decimal) + + def _test_kstest_and_ks1samp(self, x, alternative, mode='auto', decimal=14): + result = stats.kstest(x, 'norm', alternative=alternative, mode=mode) + result_1samp = stats.ks_1samp(x, stats.norm.cdf, + alternative=alternative, mode=mode) + assert_array_almost_equal(np.array(result), result_1samp, decimal=decimal) + + def test_namedtuple_attributes(self): + x = np.linspace(-1, 1, 9) + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.kstest(x, 'norm') + check_named_results(res, attributes) + + def test_agree_with_ks_1samp(self): + x = np.linspace(-1, 1, 9) + self._test_kstest_and_ks1samp(x, 'two-sided') + + x = np.linspace(-15, 15, 9) + self._test_kstest_and_ks1samp(x, 'two-sided') + + x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99] + self._test_kstest_and_ks1samp(x, 'two-sided') + self._test_kstest_and_ks1samp(x, 'greater', mode='exact') + self._test_kstest_and_ks1samp(x, 'less', mode='exact') + + # missing: no test that uses *args + + +class TestKSOneSample: + """ + Tests kstest and ks_samp 1-samples with K-S various sizes, alternatives, modes. + """ + + def _testOne(self, x, alternative, expected_statistic, expected_prob, + mode='auto', decimal=14): + result = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode) + expected = np.array([expected_statistic, expected_prob]) + assert_array_almost_equal(np.array(result), expected, decimal=decimal) + + def test_namedtuple_attributes(self): + x = np.linspace(-1, 1, 9) + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ks_1samp(x, stats.norm.cdf) + check_named_results(res, attributes) + + def test_agree_with_r(self): + # comparing with some values from R + x = np.linspace(-1, 1, 9) + self._testOne(x, 'two-sided', 0.15865525393145705, 0.95164069201518386) + + x = np.linspace(-15, 15, 9) + self._testOne(x, 'two-sided', 0.44435602715924361, 0.038850140086788665) + + x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99] + self._testOne(x, 'two-sided', 0.293580126801961, 0.293408463684361) + self._testOne(x, 'greater', 0.293580126801961, 0.146988835042376, mode='exact') + self._testOne(x, 'less', 0.109348552425692, 0.732768892470675, mode='exact') + + def test_known_examples(self): + # the following tests rely on deterministically replicated rvs + x = stats.norm.rvs(loc=0.2, size=100, random_state=987654321) + self._testOne(x, 'two-sided', 0.12464329735846891, 0.089444888711820769, + mode='asymp') + self._testOne(x, 'less', 0.12464329735846891, 0.040989164077641749) + self._testOne(x, 'greater', 0.0072115233216310994, 0.98531158590396228) + + def test_ks1samp_allpaths(self): + # Check NaN input, output. + assert_(np.isnan(kolmogn(np.nan, 1, True))) + with assert_raises(ValueError, match='n is not integral: 1.5'): + kolmogn(1.5, 1, True) + assert_(np.isnan(kolmogn(-1, 1, True))) + + dataset = np.asarray([ + # Check x out of range + (101, 1, True, 1.0), + (101, 1.1, True, 1.0), + (101, 0, True, 0.0), + (101, -0.1, True, 0.0), + + (32, 1.0 / 64, True, 0.0), # Ruben-Gambino + (32, 1.0 / 64, False, 1.0), # Ruben-Gambino + + # Miller + (32, 0.5, True, 0.9999999363163307), + # Miller 2 * special.smirnov(32, 0.5) + (32, 0.5, False, 6.368366937916623e-08), + + # Check some other paths + (32, 1.0 / 8, True, 0.34624229979775223), + (32, 1.0 / 4, True, 0.9699508336558085), + (1600, 0.49, False, 0.0), + # 2 * special.smirnov(1600, 1/16.0) + (1600, 1 / 16.0, False, 7.0837876229702195e-06), + # _kolmogn_DMTW + (1600, 14 / 1600, False, 0.99962357317602), + # _kolmogn_PelzGood + (1600, 1 / 32, False, 0.08603386296651416), + ]) + FuncData(kolmogn, dataset, (0, 1, 2), 3).check(dtypes=[int, float, bool]) + + @pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_1samp]) + @pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign", + [('greater', 6, 6, +1), + ('less', 7, 7, -1), + ('two-sided', 6, 6, +1), + ('two-sided', 7, 7, -1)]) + def test_location_sign(self, ksfunc, alternative, + x6val, ref_location, ref_sign): + # Test that location and sign corresponding with statistic are as + # expected. (Test is designed to be easy to predict.) + x = np.arange(10) + 0.5 + x[6] = x6val + cdf = stats.uniform(scale=10).cdf + res = ksfunc(x, cdf, alternative=alternative) + assert_allclose(res.statistic, 0.1, rtol=1e-15) + assert res.statistic_location == ref_location + assert res.statistic_sign == ref_sign + + # missing: no test that uses *args + + +class TestKSTwoSamples: + """Tests 2-samples with K-S various sizes, alternatives, modes.""" + + def _testOne(self, x1, x2, alternative, expected_statistic, expected_prob, + mode='auto'): + result = stats.ks_2samp(x1, x2, alternative, mode=mode) + expected = np.array([expected_statistic, expected_prob]) + assert_array_almost_equal(np.array(result), expected) + + def testSmall(self): + self._testOne([0], [1], 'two-sided', 1.0/1, 1.0) + self._testOne([0], [1], 'greater', 1.0/1, 0.5) + self._testOne([0], [1], 'less', 0.0/1, 1.0) + self._testOne([1], [0], 'two-sided', 1.0/1, 1.0) + self._testOne([1], [0], 'greater', 0.0/1, 1.0) + self._testOne([1], [0], 'less', 1.0/1, 0.5) + + def testTwoVsThree(self): + data1 = np.array([1.0, 2.0]) + data1p = data1 + 0.01 + data1m = data1 - 0.01 + data2 = np.array([1.0, 2.0, 3.0]) + self._testOne(data1p, data2, 'two-sided', 1.0 / 3, 1.0) + self._testOne(data1p, data2, 'greater', 1.0 / 3, 0.7) + self._testOne(data1p, data2, 'less', 1.0 / 3, 0.7) + self._testOne(data1m, data2, 'two-sided', 2.0 / 3, 0.6) + self._testOne(data1m, data2, 'greater', 2.0 / 3, 0.3) + self._testOne(data1m, data2, 'less', 0, 1.0) + + def testTwoVsFour(self): + data1 = np.array([1.0, 2.0]) + data1p = data1 + 0.01 + data1m = data1 - 0.01 + data2 = np.array([1.0, 2.0, 3.0, 4.0]) + self._testOne(data1p, data2, 'two-sided', 2.0 / 4, 14.0/15) + self._testOne(data1p, data2, 'greater', 2.0 / 4, 8.0/15) + self._testOne(data1p, data2, 'less', 1.0 / 4, 12.0/15) + + self._testOne(data1m, data2, 'two-sided', 3.0 / 4, 6.0/15) + self._testOne(data1m, data2, 'greater', 3.0 / 4, 3.0/15) + self._testOne(data1m, data2, 'less', 0, 1.0) + + def test100_100(self): + x100 = np.linspace(1, 100, 100) + x100_2_p1 = x100 + 2 + 0.1 + x100_2_m1 = x100 + 2 - 0.1 + self._testOne(x100, x100_2_p1, 'two-sided', 3.0 / 100, 0.9999999999962055) + self._testOne(x100, x100_2_p1, 'greater', 3.0 / 100, 0.9143290114276248) + self._testOne(x100, x100_2_p1, 'less', 0, 1.0) + self._testOne(x100, x100_2_m1, 'two-sided', 2.0 / 100, 1.0) + self._testOne(x100, x100_2_m1, 'greater', 2.0 / 100, 0.960978450786184) + self._testOne(x100, x100_2_m1, 'less', 0, 1.0) + + def test100_110(self): + x100 = np.linspace(1, 100, 100) + x110 = np.linspace(1, 100, 110) + x110_20_p1 = x110 + 20 + 0.1 + x110_20_m1 = x110 + 20 - 0.1 + # 100, 110 + self._testOne(x100, x110_20_p1, 'two-sided', 232.0 / 1100, 0.015739183865607353) + self._testOne(x100, x110_20_p1, 'greater', 232.0 / 1100, 0.007869594319053203) + self._testOne(x100, x110_20_p1, 'less', 0, 1) + self._testOne(x100, x110_20_m1, 'two-sided', 229.0 / 1100, 0.017803803861026313) + self._testOne(x100, x110_20_m1, 'greater', 229.0 / 1100, 0.008901905958245056) + self._testOne(x100, x110_20_m1, 'less', 0.0, 1.0) + + def testRepeatedValues(self): + x2233 = np.array([2] * 3 + [3] * 4 + [5] * 5 + [6] * 4, dtype=int) + x3344 = x2233 + 1 + x2356 = np.array([2] * 3 + [3] * 4 + [5] * 10 + [6] * 4, dtype=int) + x3467 = np.array([3] * 10 + [4] * 2 + [6] * 10 + [7] * 4, dtype=int) + self._testOne(x2233, x3344, 'two-sided', 5.0/16, 0.4262934613454952) + self._testOne(x2233, x3344, 'greater', 5.0/16, 0.21465428276573786) + self._testOne(x2233, x3344, 'less', 0.0/16, 1.0) + self._testOne(x2356, x3467, 'two-sided', 190.0/21/26, 0.0919245790168125) + self._testOne(x2356, x3467, 'greater', 190.0/21/26, 0.0459633806858544) + self._testOne(x2356, x3467, 'less', 70.0/21/26, 0.6121593130022775) + + def testEqualSizes(self): + data2 = np.array([1.0, 2.0, 3.0]) + self._testOne(data2, data2+1, 'two-sided', 1.0/3, 1.0) + self._testOne(data2, data2+1, 'greater', 1.0/3, 0.75) + self._testOne(data2, data2+1, 'less', 0.0/3, 1.) + self._testOne(data2, data2+0.5, 'two-sided', 1.0/3, 1.0) + self._testOne(data2, data2+0.5, 'greater', 1.0/3, 0.75) + self._testOne(data2, data2+0.5, 'less', 0.0/3, 1.) + self._testOne(data2, data2-0.5, 'two-sided', 1.0/3, 1.0) + self._testOne(data2, data2-0.5, 'greater', 0.0/3, 1.0) + self._testOne(data2, data2-0.5, 'less', 1.0/3, 0.75) + + @pytest.mark.slow + def testMiddlingBoth(self): + # 500, 600 + n1, n2 = 500, 600 + delta = 1.0/n1/n2/2/2 + x = np.linspace(1, 200, n1) - delta + y = np.linspace(2, 200, n2) + self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, + mode='auto') + self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, + mode='asymp') + self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, + mode='asymp') + self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, + mode='asymp') + with suppress_warnings() as sup: + message = "ks_2samp: Exact calculation unsuccessful." + sup.filter(RuntimeWarning, message) + self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, + mode='exact') + self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, + mode='exact') + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, + mode='exact') + _check_warnings(w, RuntimeWarning, 1) + + @pytest.mark.slow + def testMediumBoth(self): + # 1000, 1100 + n1, n2 = 1000, 1100 + delta = 1.0/n1/n2/2/2 + x = np.linspace(1, 200, n1) - delta + y = np.linspace(2, 200, n2) + self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, + mode='asymp') + self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, + mode='auto') + self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, + mode='asymp') + self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, + mode='asymp') + + with suppress_warnings() as sup: + message = "ks_2samp: Exact calculation unsuccessful." + sup.filter(RuntimeWarning, message) + self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, + mode='exact') + self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, + mode='exact') + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, + mode='exact') + _check_warnings(w, RuntimeWarning, 1) + + def testLarge(self): + # 10000, 110 + n1, n2 = 10000, 110 + lcm = n1*11.0 + delta = 1.0/n1/n2/2/2 + x = np.linspace(1, 200, n1) - delta + y = np.linspace(2, 100, n2) + self._testOne(x, y, 'two-sided', 55275.0 / lcm, 4.2188474935755949e-15) + self._testOne(x, y, 'greater', 561.0 / lcm, 0.99115454582047591) + self._testOne(x, y, 'less', 55275.0 / lcm, 3.1317328311518713e-26) + + def test_gh11184(self): + # 3000, 3001, exact two-sided + np.random.seed(123456) + x = np.random.normal(size=3000) + y = np.random.normal(size=3001) * 1.5 + self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, + mode='asymp') + self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, + mode='exact') + + @pytest.mark.xslow + def test_gh11184_bigger(self): + # 10000, 10001, exact two-sided + np.random.seed(123456) + x = np.random.normal(size=10000) + y = np.random.normal(size=10001) * 1.5 + self._testOne(x, y, 'two-sided', 0.10597913208679133, 3.3149311398483503e-49, + mode='asymp') + self._testOne(x, y, 'two-sided', 0.10597913208679133, 2.7755575615628914e-15, + mode='exact') + self._testOne(x, y, 'greater', 0.10597913208679133, 2.7947433906389253e-41, + mode='asymp') + self._testOne(x, y, 'less', 0.09658002199780022, 2.7947433906389253e-41, + mode='asymp') + + @pytest.mark.xslow + def test_gh12999(self): + np.random.seed(123456) + for x in range(1000, 12000, 1000): + vals1 = np.random.normal(size=(x)) + vals2 = np.random.normal(size=(x + 10), loc=0.5) + exact = stats.ks_2samp(vals1, vals2, mode='exact').pvalue + asymp = stats.ks_2samp(vals1, vals2, mode='asymp').pvalue + # these two p-values should be in line with each other + assert_array_less(exact, 3 * asymp) + assert_array_less(asymp, 3 * exact) + + @pytest.mark.slow + def testLargeBoth(self): + # 10000, 11000 + n1, n2 = 10000, 11000 + lcm = n1*11.0 + delta = 1.0/n1/n2/2/2 + x = np.linspace(1, 200, n1) - delta + y = np.linspace(2, 200, n2) + self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576, + mode='asymp') + self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990456491488628, + mode='exact') + self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576, + mode='auto') + self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673) + self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724) + with suppress_warnings() as sup: + message = "ks_2samp: Exact calculation unsuccessful." + sup.filter(RuntimeWarning, message) + self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673, + mode='exact') + self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724, + mode='exact') + + def testNamedAttributes(self): + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ks_2samp([1, 2], [3]) + check_named_results(res, attributes) + + @pytest.mark.slow + def test_some_code_paths(self): + # Check that some code paths are executed + from scipy.stats._stats_py import ( + _count_paths_outside_method, + _compute_outer_prob_inside_method + ) + + _compute_outer_prob_inside_method(1, 1, 1, 1) + _count_paths_outside_method(1000, 1, 1, 1001) + + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, _count_paths_outside_method, + 1100, 1099, 1, 1) + assert_raises(FloatingPointError, _count_paths_outside_method, + 2000, 1000, 1, 1) + + def test_argument_checking(self): + # Check that an empty array causes a ValueError + assert_raises(ValueError, stats.ks_2samp, [], [1]) + assert_raises(ValueError, stats.ks_2samp, [1], []) + assert_raises(ValueError, stats.ks_2samp, [], []) + + @pytest.mark.slow + def test_gh12218(self): + """Ensure gh-12218 is fixed.""" + # gh-1228 triggered a TypeError calculating sqrt(n1*n2*(n1+n2)). + # n1, n2 both large integers, the product exceeded 2^64 + np.random.seed(12345678) + n1 = 2097152 # 2*^21 + rvs1 = stats.uniform.rvs(size=n1, loc=0., scale=1) + rvs2 = rvs1 + 1 # Exact value of rvs2 doesn't matter. + stats.ks_2samp(rvs1, rvs2, alternative='greater', mode='asymp') + stats.ks_2samp(rvs1, rvs2, alternative='less', mode='asymp') + stats.ks_2samp(rvs1, rvs2, alternative='two-sided', mode='asymp') + + def test_warnings_gh_14019(self): + # Check that RuntimeWarning is raised when method='auto' and exact + # p-value calculation fails. See gh-14019. + rng = np.random.RandomState(seed=23493549) + # random samples of the same size as in the issue + data1 = rng.random(size=881) + 0.5 + data2 = rng.random(size=369) + message = "ks_2samp: Exact calculation unsuccessful" + with pytest.warns(RuntimeWarning, match=message): + res = stats.ks_2samp(data1, data2, alternative='less') + assert_allclose(res.pvalue, 0, atol=1e-14) + + @pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_2samp]) + @pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign", + [('greater', 5.9, 5.9, +1), + ('less', 6.1, 6.0, -1), + ('two-sided', 5.9, 5.9, +1), + ('two-sided', 6.1, 6.0, -1)]) + def test_location_sign(self, ksfunc, alternative, + x6val, ref_location, ref_sign): + # Test that location and sign corresponding with statistic are as + # expected. (Test is designed to be easy to predict.) + x = np.arange(10, dtype=np.float64) + y = x.copy() + x[6] = x6val + res = stats.ks_2samp(x, y, alternative=alternative) + assert res.statistic == 0.1 + assert res.statistic_location == ref_location + assert res.statistic_sign == ref_sign + + +def test_ttest_rel(): + # regression test + tr,pr = 0.81248591389165692, 0.41846234511362157 + tpr = ([tr,-tr],[pr,pr]) + + rvs1 = np.linspace(1,100,100) + rvs2 = np.linspace(1.01,99.989,100) + rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)]) + rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)]) + + t,p = stats.ttest_rel(rvs1, rvs2, axis=0) + assert_array_almost_equal([t,p],(tr,pr)) + t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0) + assert_array_almost_equal([t,p],tpr) + t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal([t,p],tpr) + + # test scalars + with suppress_warnings() as sup, \ + np.errstate(invalid="ignore", divide="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + t, p = stats.ttest_rel(4., 3.) + assert_(np.isnan(t)) + assert_(np.isnan(p)) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ttest_rel(rvs1, rvs2, axis=0) + check_named_results(res, attributes) + + # test on 3 dimensions + rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) + rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) + t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1) + assert_array_almost_equal(np.abs(t), tr) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + + t, p = stats.ttest_rel(np.moveaxis(rvs1_3D, 2, 0), + np.moveaxis(rvs2_3D, 2, 0), + axis=2) + assert_array_almost_equal(np.abs(t), tr) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + + # test alternative parameter + assert_raises(ValueError, stats.ttest_rel, rvs1, rvs2, alternative="error") + + t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="less") + assert_allclose(p, 1 - pr/2) + assert_allclose(t, tr) + + t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="greater") + assert_allclose(p, pr/2) + assert_allclose(t, tr) + + # check nan policy + rng = np.random.RandomState(12345678) + x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng) + x[500] = np.nan + y = (stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng) + + stats.norm.rvs(scale=0.2, size=501, random_state=rng)) + y[500] = np.nan + + with np.errstate(invalid="ignore"): + assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan)) + + assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'), + (0.25299925303978066, 0.8003729814201519)) + assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise') + assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar') + + # test zero division problem + with pytest.warns(RuntimeWarning, match="Precision loss occurred"): + t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + with np.errstate(invalid="ignore"): + assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan], [-1, 1]]) + assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))), + ([0, np.nan], [1, np.nan])) + + # test incorrect input shape raise an error + x = np.arange(24) + assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)), + x.reshape((2, 3, 4))) + + # Convert from two-sided p-values to one sided using T result data. + def convert(t, p, alt): + if (t < 0 and alt == "less") or (t > 0 and alt == "greater"): + return p / 2 + return 1 - (p / 2) + converter = np.vectorize(convert) + + rvs1_2D[:, 20:30] = np.nan + rvs2_2D[:, 15:25] = np.nan + + tr, pr = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit') + + t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit', + alternative='less') + assert_allclose(t, tr, rtol=1e-14) + with np.errstate(invalid='ignore'): + assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14) + + t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit', + alternative='greater') + assert_allclose(t, tr, rtol=1e-14) + with np.errstate(invalid='ignore'): + assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14) + + +def test_ttest_rel_nan_2nd_arg(): + # regression test for gh-6134: nans in the second arg were not handled + x = [np.nan, 2.0, 3.0, 4.0] + y = [1.0, 2.0, 1.0, 2.0] + + r1 = stats.ttest_rel(x, y, nan_policy='omit') + r2 = stats.ttest_rel(y, x, nan_policy='omit') + assert_allclose(r2.statistic, -r1.statistic, atol=1e-15) + assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15) + + # NB: arguments are paired when NaNs are dropped + r3 = stats.ttest_rel(y[1:], x[1:]) + assert_allclose(r2, r3, atol=1e-15) + + # .. and this is consistent with R. R code: + # x = c(NA, 2.0, 3.0, 4.0) + # y = c(1.0, 2.0, 1.0, 2.0) + # t.test(x, y, paired=TRUE) + assert_allclose(r2, (-2, 0.1835), atol=1e-4) + + +def test_ttest_rel_empty_1d_returns_nan(): + # Two empty inputs should return a TtestResult containing nan + # for both values. + result = stats.ttest_rel([], []) + assert isinstance(result, stats._stats_py.TtestResult) + assert_equal(result, (np.nan, np.nan)) + + +@pytest.mark.parametrize('b, expected_shape', + [(np.empty((1, 5, 0)), (3, 5)), + (np.empty((1, 0, 0)), (3, 0))]) +def test_ttest_rel_axis_size_zero(b, expected_shape): + # In this test, the length of the axis dimension is zero. + # The results should be arrays containing nan with shape + # given by the broadcast nonaxis dimensions. + a = np.empty((3, 1, 0)) + result = stats.ttest_rel(a, b, axis=-1) + assert isinstance(result, stats._stats_py.TtestResult) + expected_value = np.full(expected_shape, fill_value=np.nan) + assert_equal(result.statistic, expected_value) + assert_equal(result.pvalue, expected_value) + + +def test_ttest_rel_nonaxis_size_zero(): + # In this test, the length of the axis dimension is nonzero, + # but one of the nonaxis dimensions has length 0. Check that + # we still get the correctly broadcast shape, which is (5, 0) + # in this case. + a = np.empty((1, 8, 0)) + b = np.empty((5, 8, 1)) + result = stats.ttest_rel(a, b, axis=1) + assert isinstance(result, stats._stats_py.TtestResult) + assert_equal(result.statistic.shape, (5, 0)) + assert_equal(result.pvalue.shape, (5, 0)) + + +@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater']) +def test_ttest_rel_ci_1d(alternative): + # test confidence interval method against reference values + rng = np.random.default_rng(3749065329432213059) + n = 10 + x = rng.normal(size=n, loc=1.5, scale=2) + y = rng.normal(size=n, loc=2, scale=2) + # Reference values generated with R t.test: + # options(digits=16) + # x = c(1.22825792, 1.63950485, 4.39025641, 0.68609437, 2.03813481, + # -1.20040109, 1.81997937, 1.86854636, 2.94694282, 3.94291373) + # y = c(3.49961496, 1.53192536, 5.53620083, 2.91687718, 0.04858043, + # 3.78505943, 3.3077496 , 2.30468892, 3.42168074, 0.56797592) + # t.test(x, y, paired=TRUE, conf.level=0.85, alternative='l') + + ref = {'two-sided': [-1.912194489914035, 0.400169725914035], + 'greater': [-1.563944820311475, np.inf], + 'less': [-np.inf, 0.05192005631147523]} + res = stats.ttest_rel(x, y, alternative=alternative) + ci = res.confidence_interval(confidence_level=0.85) + assert_allclose(ci, ref[alternative]) + assert_equal(res.df, n-1) + + +@pytest.mark.parametrize("test_fun, args", + [(stats.ttest_1samp, (np.arange(10), 0)), + (stats.ttest_rel, (np.arange(10), np.arange(10)))]) +def test_ttest_ci_iv(test_fun, args): + # test `confidence_interval` method input validation + res = test_fun(*args) + message = '`confidence_level` must be a number between 0 and 1.' + with pytest.raises(ValueError, match=message): + res.confidence_interval(confidence_level=10) + + +def _desc_stats(x1, x2, axis=0): + def _stats(x, axis=0): + x = np.asarray(x) + mu = np.mean(x, axis=axis) + std = np.std(x, axis=axis, ddof=1) + nobs = x.shape[axis] + return mu, std, nobs + return _stats(x1, axis) + _stats(x2, axis) + + +def test_ttest_ind(): + # regression test + tr = 1.0912746897927283 + pr = 0.27647818616351882 + tpr = ([tr,-tr],[pr,pr]) + + rvs2 = np.linspace(1,100,100) + rvs1 = np.linspace(5,105,100) + rvs1_2D = np.array([rvs1, rvs2]) + rvs2_2D = np.array([rvs2, rvs1]) + + t,p = stats.ttest_ind(rvs1, rvs2, axis=0) + assert_array_almost_equal([t,p],(tr,pr)) + # test from_stats API + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, + rvs2)), + [t, p]) + t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D.T, rvs2_2D.T) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args), + [t, p]) + t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args), + [t, p]) + + # test scalars + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + t, p = stats.ttest_ind(4., 3.) + assert_(np.isnan(t)) + assert_(np.isnan(p)) + + # test on 3 dimensions + rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) + rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) + t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1) + assert_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + + t, p = stats.ttest_ind(np.moveaxis(rvs1_3D, 2, 0), + np.moveaxis(rvs2_3D, 2, 0), + axis=2) + assert_array_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + + # test alternative parameter + assert_raises(ValueError, stats.ttest_ind, rvs1, rvs2, alternative="error") + assert_raises(ValueError, stats.ttest_ind_from_stats, + *_desc_stats(rvs1_2D.T, rvs2_2D.T), alternative="error") + + t, p = stats.ttest_ind(rvs1, rvs2, alternative="less") + assert_allclose(p, 1 - (pr/2)) + assert_allclose(t, tr) + + t, p = stats.ttest_ind(rvs1, rvs2, alternative="greater") + assert_allclose(p, pr/2) + assert_allclose(t, tr) + + # Below makes sure ttest_ind_from_stats p-val functions identically to + # ttest_ind + t, p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="less") + args = _desc_stats(rvs1_2D.T, rvs2_2D.T) + assert_allclose( + stats.ttest_ind_from_stats(*args, alternative="less"), [t, p]) + + t, p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="greater") + args = _desc_stats(rvs1_2D.T, rvs2_2D.T) + assert_allclose( + stats.ttest_ind_from_stats(*args, alternative="greater"), [t, p]) + + # check nan policy + rng = np.random.RandomState(12345678) + x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng) + x[500] = np.nan + y = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng) + + with np.errstate(invalid="ignore"): + assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan)) + + assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'), + (0.24779670949091914, 0.80434267337517906)) + assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise') + assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar') + + # test zero division problem + with pytest.warns(RuntimeWarning, match="Precision loss occurred"): + t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1]) + assert_equal((np.abs(t), p), (np.inf, 0)) + + with np.errstate(invalid="ignore"): + assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan], [-1, 1]]) + assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))), + ([0, np.nan], [1, np.nan])) + + rvs1_3D[:, :, 10:15] = np.nan + rvs2_3D[:, :, 6:12] = np.nan + + # Convert from two-sided p-values to one sided using T result data. + def convert(t, p, alt): + if (t < 0 and alt == "less") or (t > 0 and alt == "greater"): + return p / 2 + return 1 - (p / 2) + converter = np.vectorize(convert) + + tr, pr = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit') + + t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit', + alternative='less') + assert_allclose(t, tr, rtol=1e-14) + assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14) + + t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit', + alternative='greater') + assert_allclose(t, tr, rtol=1e-14) + assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14) + + +class Test_ttest_ind_permutations: + N = 20 + + # data for most tests + np.random.seed(0) + a = np.vstack((np.arange(3*N//4), np.random.random(3*N//4))) + b = np.vstack((np.arange(N//4) + 100, np.random.random(N//4))) + + # data for equal variance tests + a2 = np.arange(10) + b2 = np.arange(10) + 100 + + # data for exact test + a3 = [1, 2] + b3 = [3, 4] + + # data for bigger test + np.random.seed(0) + rvs1 = stats.norm.rvs(loc=5, scale=10, # type: ignore + size=500).reshape(100, 5).T + rvs2 = stats.norm.rvs(loc=8, scale=20, size=100) # type: ignore + + p_d = [1/1001, (676+1)/1001] # desired pvalues + p_d_gen = [1/1001, (672 + 1)/1001] # desired pvalues for Generator seed + p_d_big = [(993+1)/1001, (685+1)/1001, (840+1)/1001, + (955+1)/1001, (255+1)/1001] + + params = [ + (a, b, {"axis": 1}, p_d), # basic test + (a.T, b.T, {'axis': 0}, p_d), # along axis 0 + (a[0, :], b[0, :], {'axis': None}, p_d[0]), # 1d data + (a[0, :].tolist(), b[0, :].tolist(), {'axis': None}, p_d[0]), + # different seeds + (a, b, {'random_state': 0, "axis": 1}, p_d), + (a, b, {'random_state': np.random.RandomState(0), "axis": 1}, p_d), + (a2, b2, {'equal_var': True}, 1/1001), # equal variances + (rvs1, rvs2, {'axis': -1, 'random_state': 0}, p_d_big), # bigger test + (a3, b3, {}, 1/3), # exact test + (a, b, {'random_state': np.random.default_rng(0), "axis": 1}, p_d_gen), + ] + + @pytest.mark.parametrize("a,b,update,p_d", params) + def test_ttest_ind_permutations(self, a, b, update, p_d): + options_a = {'axis': None, 'equal_var': False} + options_p = {'axis': None, 'equal_var': False, + 'permutations': 1000, 'random_state': 0} + options_a.update(update) + options_p.update(update) + + stat_a, _ = stats.ttest_ind(a, b, **options_a) + stat_p, pvalue = stats.ttest_ind(a, b, **options_p) + assert_array_almost_equal(stat_a, stat_p, 5) + assert_array_almost_equal(pvalue, p_d) + + def test_ttest_ind_exact_alternative(self): + np.random.seed(0) + N = 3 + a = np.random.rand(2, N, 2) + b = np.random.rand(2, N, 2) + + options_p = {'axis': 1, 'permutations': 1000} + + options_p.update(alternative="greater") + res_g_ab = stats.ttest_ind(a, b, **options_p) + res_g_ba = stats.ttest_ind(b, a, **options_p) + + options_p.update(alternative="less") + res_l_ab = stats.ttest_ind(a, b, **options_p) + res_l_ba = stats.ttest_ind(b, a, **options_p) + + options_p.update(alternative="two-sided") + res_2_ab = stats.ttest_ind(a, b, **options_p) + res_2_ba = stats.ttest_ind(b, a, **options_p) + + # Alternative doesn't affect the statistic + assert_equal(res_g_ab.statistic, res_l_ab.statistic) + assert_equal(res_g_ab.statistic, res_2_ab.statistic) + + # Reversing order of inputs negates statistic + assert_equal(res_g_ab.statistic, -res_g_ba.statistic) + assert_equal(res_l_ab.statistic, -res_l_ba.statistic) + assert_equal(res_2_ab.statistic, -res_2_ba.statistic) + + # Reversing order of inputs does not affect p-value of 2-sided test + assert_equal(res_2_ab.pvalue, res_2_ba.pvalue) + + # In exact test, distribution is perfectly symmetric, so these + # identities are exactly satisfied. + assert_equal(res_g_ab.pvalue, res_l_ba.pvalue) + assert_equal(res_l_ab.pvalue, res_g_ba.pvalue) + mask = res_g_ab.pvalue <= 0.5 + assert_equal(res_g_ab.pvalue[mask] + res_l_ba.pvalue[mask], + res_2_ab.pvalue[mask]) + assert_equal(res_l_ab.pvalue[~mask] + res_g_ba.pvalue[~mask], + res_2_ab.pvalue[~mask]) + + def test_ttest_ind_exact_selection(self): + # test the various ways of activating the exact test + np.random.seed(0) + N = 3 + a = np.random.rand(N) + b = np.random.rand(N) + res0 = stats.ttest_ind(a, b) + res1 = stats.ttest_ind(a, b, permutations=1000) + res2 = stats.ttest_ind(a, b, permutations=0) + res3 = stats.ttest_ind(a, b, permutations=np.inf) + assert res1.pvalue != res0.pvalue + assert res2.pvalue == res0.pvalue + assert res3.pvalue == res1.pvalue + + def test_ttest_ind_exact_distribution(self): + # the exact distribution of the test statistic should have + # binom(na + nb, na) elements, all unique. This was not always true + # in gh-4824; fixed by gh-13661. + np.random.seed(0) + a = np.random.rand(3) + b = np.random.rand(4) + + data = np.concatenate((a, b)) + na, nb = len(a), len(b) + + permutations = 100000 + t_stat, _, _ = _permutation_distribution_t(data, permutations, na, + True) + + n_unique = len(set(t_stat)) + assert n_unique == binom(na + nb, na) + assert len(t_stat) == n_unique + + def test_ttest_ind_randperm_alternative(self): + np.random.seed(0) + N = 50 + a = np.random.rand(2, 3, N) + b = np.random.rand(3, N) + options_p = {'axis': -1, 'permutations': 1000, "random_state": 0} + + options_p.update(alternative="greater") + res_g_ab = stats.ttest_ind(a, b, **options_p) + res_g_ba = stats.ttest_ind(b, a, **options_p) + + options_p.update(alternative="less") + res_l_ab = stats.ttest_ind(a, b, **options_p) + res_l_ba = stats.ttest_ind(b, a, **options_p) + + # Alternative doesn't affect the statistic + assert_equal(res_g_ab.statistic, res_l_ab.statistic) + + # Reversing order of inputs negates statistic + assert_equal(res_g_ab.statistic, -res_g_ba.statistic) + assert_equal(res_l_ab.statistic, -res_l_ba.statistic) + + # For random permutations, the chance of ties between the observed + # test statistic and the population is small, so: + assert_equal(res_g_ab.pvalue + res_l_ab.pvalue, + 1 + 1/(options_p['permutations'] + 1)) + assert_equal(res_g_ba.pvalue + res_l_ba.pvalue, + 1 + 1/(options_p['permutations'] + 1)) + + @pytest.mark.slow() + def test_ttest_ind_randperm_alternative2(self): + np.random.seed(0) + N = 50 + a = np.random.rand(N, 4) + b = np.random.rand(N, 4) + options_p = {'permutations': 20000, "random_state": 0} + + options_p.update(alternative="greater") + res_g_ab = stats.ttest_ind(a, b, **options_p) + + options_p.update(alternative="less") + res_l_ab = stats.ttest_ind(a, b, **options_p) + + options_p.update(alternative="two-sided") + res_2_ab = stats.ttest_ind(a, b, **options_p) + + # For random permutations, the chance of ties between the observed + # test statistic and the population is small, so: + assert_equal(res_g_ab.pvalue + res_l_ab.pvalue, + 1 + 1/(options_p['permutations'] + 1)) + + # For for large sample sizes, the distribution should be approximately + # symmetric, so these identities should be approximately satisfied + mask = res_g_ab.pvalue <= 0.5 + assert_allclose(2 * res_g_ab.pvalue[mask], + res_2_ab.pvalue[mask], atol=2e-2) + assert_allclose(2 * (1-res_g_ab.pvalue[~mask]), + res_2_ab.pvalue[~mask], atol=2e-2) + assert_allclose(2 * res_l_ab.pvalue[~mask], + res_2_ab.pvalue[~mask], atol=2e-2) + assert_allclose(2 * (1-res_l_ab.pvalue[mask]), + res_2_ab.pvalue[mask], atol=2e-2) + + def test_ttest_ind_permutation_nanpolicy(self): + np.random.seed(0) + N = 50 + a = np.random.rand(N, 5) + b = np.random.rand(N, 5) + a[5, 1] = np.nan + b[8, 2] = np.nan + a[9, 3] = np.nan + b[9, 3] = np.nan + options_p = {'permutations': 1000, "random_state": 0} + + # Raise + options_p.update(nan_policy="raise") + with assert_raises(ValueError, match="The input contains nan values"): + res = stats.ttest_ind(a, b, **options_p) + + # Propagate + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "invalid value*") + options_p.update(nan_policy="propagate") + res = stats.ttest_ind(a, b, **options_p) + + mask = np.isnan(a).any(axis=0) | np.isnan(b).any(axis=0) + res2 = stats.ttest_ind(a[:, ~mask], b[:, ~mask], **options_p) + + assert_equal(res.pvalue[mask], np.nan) + assert_equal(res.statistic[mask], np.nan) + + assert_allclose(res.pvalue[~mask], res2.pvalue) + assert_allclose(res.statistic[~mask], res2.statistic) + + # Propagate 1d + res = stats.ttest_ind(a.ravel(), b.ravel(), **options_p) + assert np.isnan(res.pvalue) # assert makes sure it's a scalar + assert np.isnan(res.statistic) + + def test_ttest_ind_permutation_check_inputs(self): + with assert_raises(ValueError, match="Permutations must be"): + stats.ttest_ind(self.a2, self.b2, permutations=-3) + with assert_raises(ValueError, match="Permutations must be"): + stats.ttest_ind(self.a2, self.b2, permutations=1.5) + with assert_raises(ValueError, match="'hello' cannot be used"): + stats.ttest_ind(self.a, self.b, permutations=1, + random_state='hello', axis=1) + + def test_ttest_ind_permutation_check_p_values(self): + # p-values should never be exactly zero + N = 10 + a = np.random.rand(N, 20) + b = np.random.rand(N, 20) + p_values = stats.ttest_ind(a, b, permutations=1).pvalue + print(0.0 not in p_values) + assert 0.0 not in p_values + + +class Test_ttest_ind_common: + # for tests that are performed on variations of the t-test such as + # permutations and trimming + @pytest.mark.slow() + @pytest.mark.parametrize("kwds", [{'permutations': 200, 'random_state': 0}, + {'trim': .2}, {}], + ids=["permutations", "trim", "basic"]) + @pytest.mark.parametrize('equal_var', [True, False], + ids=['equal_var', 'unequal_var']) + def test_ttest_many_dims(self, kwds, equal_var): + # Test that test works on many-dimensional arrays + np.random.seed(0) + a = np.random.rand(5, 4, 4, 7, 1, 6) + b = np.random.rand(4, 1, 8, 2, 6) + res = stats.ttest_ind(a, b, axis=-3, **kwds) + + # compare fully-vectorized t-test against t-test on smaller slice + i, j, k = 2, 3, 1 + a2 = a[i, :, j, :, 0, :] + b2 = b[:, 0, :, k, :] + res2 = stats.ttest_ind(a2, b2, axis=-2, **kwds) + assert_equal(res.statistic[i, :, j, k, :], + res2.statistic) + assert_equal(res.pvalue[i, :, j, k, :], + res2.pvalue) + + # compare against t-test on one axis-slice at a time + + # manually broadcast with tile; move axis to end to simplify + x = np.moveaxis(np.tile(a, (1, 1, 1, 1, 2, 1)), -3, -1) + y = np.moveaxis(np.tile(b, (5, 1, 4, 1, 1, 1)), -3, -1) + shape = x.shape[:-1] + statistics = np.zeros(shape) + pvalues = np.zeros(shape) + for indices in product(*(range(i) for i in shape)): + xi = x[indices] # use tuple to index single axis slice + yi = y[indices] + res3 = stats.ttest_ind(xi, yi, axis=-1, **kwds) + statistics[indices] = res3.statistic + pvalues[indices] = res3.pvalue + + assert_allclose(statistics, res.statistic) + assert_allclose(pvalues, res.pvalue) + + @pytest.mark.parametrize("kwds", [{'permutations': 200, 'random_state': 0}, + {'trim': .2}, {}], + ids=["trim", "permutations", "basic"]) + @pytest.mark.parametrize("axis", [-1, 0]) + def test_nans_on_axis(self, kwds, axis): + # confirm that with `nan_policy='propagate'`, NaN results are returned + # on the correct location + a = np.random.randint(10, size=(5, 3, 10)).astype('float') + b = np.random.randint(10, size=(5, 3, 10)).astype('float') + # set some indices in `a` and `b` to be `np.nan`. + a[0][2][3] = np.nan + b[2][0][6] = np.nan + + # arbitrarily use `np.sum` as a baseline for which indices should be + # NaNs + expected = np.isnan(np.sum(a + b, axis=axis)) + # multidimensional inputs to `t.sf(np.abs(t), df)` with NaNs on some + # indices throws an warning. See issue gh-13844 + with suppress_warnings() as sup, np.errstate(invalid="ignore"): + sup.filter(RuntimeWarning, + "invalid value encountered in less_equal") + sup.filter(RuntimeWarning, "Precision loss occurred") + res = stats.ttest_ind(a, b, axis=axis, **kwds) + p_nans = np.isnan(res.pvalue) + assert_array_equal(p_nans, expected) + statistic_nans = np.isnan(res.statistic) + assert_array_equal(statistic_nans, expected) + + +class Test_ttest_trim: + params = [ + [[1, 2, 3], [1.1, 2.9, 4.2], 0.53619490753126731, -0.6864951273557258, + .2], + [[56, 128.6, 12, 123.8, 64.34, 78, 763.3], [1.1, 2.9, 4.2], + 0.00998909252078421, 4.591598691181999, .2], + [[56, 128.6, 12, 123.8, 64.34, 78, 763.3], [1.1, 2.9, 4.2], + 0.10512380092302633, 2.832256715395378, .32], + [[2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9], + [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1], + 0.002878909511344, -4.2461168970325, .2], + [[-0.84504783, 0.13366078, 3.53601757, -0.62908581, 0.54119466, + -1.16511574, -0.08836614, 1.18495416, 2.48028757, -1.58925028, + -1.6706357, 0.3090472, -2.12258305, 0.3697304, -1.0415207, + -0.57783497, -0.90997008, 1.09850192, 0.41270579, -1.4927376], + [1.2725522, 1.1657899, 2.7509041, 1.2389013, -0.9490494, -1.0752459, + 1.1038576, 2.9912821, 3.5349111, 0.4171922, 1.0168959, -0.7625041, + -0.4300008, 3.0431921, 1.6035947, 0.5285634, -0.7649405, 1.5575896, + 1.3670797, 1.1726023], 0.005293305834235, -3.0983317739483, .2]] + + @pytest.mark.parametrize("a,b,pr,tr,trim", params) + def test_ttest_compare_r(self, a, b, pr, tr, trim): + ''' + Using PairedData's yuen.t.test method. Something to note is that there + are at least 3 R packages that come with a trimmed t-test method, and + comparisons were made between them. It was found that PairedData's + method's results match this method, SAS, and one of the other R + methods. A notable discrepancy was the DescTools implementation of the + function, which only sometimes agreed with SAS, WRS2, PairedData and + this implementation. For this reason, most comparisons in R are made + against PairedData's method. + + Rather than providing the input and output for all evaluations, here is + a representative example: + > library(PairedData) + > a <- c(1, 2, 3) + > b <- c(1.1, 2.9, 4.2) + > options(digits=16) + > yuen.t.test(a, b, tr=.2) + + Two-sample Yuen test, trim=0.2 + + data: x and y + t = -0.68649512735573, df = 3.4104431643464, p-value = 0.5361949075313 + alternative hypothesis: true difference in trimmed means is not equal + to 0 + 95 percent confidence interval: + -3.912777195645217 2.446110528978550 + sample estimates: + trimmed mean of x trimmed mean of y + 2.000000000000000 2.73333333333333 + ''' + statistic, pvalue = stats.ttest_ind(a, b, trim=trim, equal_var=False) + assert_allclose(statistic, tr, atol=1e-15) + assert_allclose(pvalue, pr, atol=1e-15) + + def test_compare_SAS(self): + # Source of the data used in this test: + # https://support.sas.com/resources/papers/proceedings14/1660-2014.pdf + a = [12, 14, 18, 25, 32, 44, 12, 14, 18, 25, 32, 44] + b = [17, 22, 14, 12, 30, 29, 19, 17, 22, 14, 12, 30, 29, 19] + # In this paper, a trimming percentage of 5% is used. However, + # in their implementation, the number of values trimmed is rounded to + # the nearest whole number. However, consistent with + # `scipy.stats.trimmed_mean`, this test truncates to the lower + # whole number. In this example, the paper notes that 1 value is + # trimmed off of each side. 9% replicates this amount of trimming. + statistic, pvalue = stats.ttest_ind(a, b, trim=.09, equal_var=False) + assert_allclose(pvalue, 0.514522, atol=1e-6) + assert_allclose(statistic, 0.669169, atol=1e-6) + + def test_equal_var(self): + ''' + The PairedData library only supports unequal variances. To compare + samples with equal variances, the multicon library is used. + > library(multicon) + > a <- c(2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9) + > b <- c(6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1) + > dv = c(a,b) + > iv = c(rep('a', length(a)), rep('b', length(b))) + > yuenContrast(dv~ iv, EQVAR = TRUE) + $Ms + N M wgt + a 11 2.442857142857143 1 + b 11 5.385714285714286 -1 + + $test + stat df crit p + results -4.246116897032513 12 2.178812829667228 0.00113508833897713 + ''' + a = [2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9] + b = [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1] + # `equal_var=True` is default + statistic, pvalue = stats.ttest_ind(a, b, trim=.2) + assert_allclose(pvalue, 0.00113508833897713, atol=1e-10) + assert_allclose(statistic, -4.246116897032513, atol=1e-10) + + @pytest.mark.parametrize('alt,pr,tr', + (('greater', 0.9985605452443, -4.2461168970325), + ('less', 0.001439454755672, -4.2461168970325),), + ) + def test_alternatives(self, alt, pr, tr): + ''' + > library(PairedData) + > a <- c(2.7,2.7,1.1,3.0,1.9,3.0,3.8,3.8,0.3,1.9,1.9) + > b <- c(6.5,5.4,8.1,3.5,0.5,3.8,6.8,4.9,9.5,6.2,4.1) + > options(digits=16) + > yuen.t.test(a, b, alternative = 'greater') + ''' + a = [2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9] + b = [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1] + + statistic, pvalue = stats.ttest_ind(a, b, trim=.2, equal_var=False, + alternative=alt) + assert_allclose(pvalue, pr, atol=1e-10) + assert_allclose(statistic, tr, atol=1e-10) + + def test_errors_unsupported(self): + # confirm that attempting to trim with NaNs or permutations raises an + # error + match = "Permutations are currently not supported with trimming." + with assert_raises(ValueError, match=match): + stats.ttest_ind([1, 2], [2, 3], trim=.2, permutations=2) + + @pytest.mark.parametrize("trim", [-.2, .5, 1]) + def test_trim_bounds_error(self, trim): + match = "Trimming percentage should be 0 <= `trim` < .5." + with assert_raises(ValueError, match=match): + stats.ttest_ind([1, 2], [2, 1], trim=trim) + + +class Test_ttest_CI: + # indices in order [alternative={two-sided, less, greater}, + # equal_var={False, True}, trim={0, 0.2}] + # reference values in order `statistic, df, pvalue, low, high` + # equal_var=False reference values computed with R PairedData yuen.t.test: + # + # library(PairedData) + # options(digits=16) + # a < - c(0.88236329, 0.97318744, 0.4549262, 0.97893335, 0.0606677, + # 0.44013366, 0.55806018, 0.40151434, 0.14453315, 0.25860601, + # 0.20202162) + # b < - c(0.93455277, 0.42680603, 0.49751939, 0.14152846, 0.711435, + # 0.77669667, 0.20507578, 0.78702772, 0.94691855, 0.32464958, + # 0.3873582, 0.35187468, 0.21731811) + # yuen.t.test(a, b, tr=0, conf.level = 0.9, alternative = 'l') + # + # equal_var=True reference values computed with R multicon yuenContrast: + # + # library(multicon) + # options(digits=16) + # a < - c(0.88236329, 0.97318744, 0.4549262, 0.97893335, 0.0606677, + # 0.44013366, 0.55806018, 0.40151434, 0.14453315, 0.25860601, + # 0.20202162) + # b < - c(0.93455277, 0.42680603, 0.49751939, 0.14152846, 0.711435, + # 0.77669667, 0.20507578, 0.78702772, 0.94691855, 0.32464958, + # 0.3873582, 0.35187468, 0.21731811) + # dv = c(a, b) + # iv = c(rep('a', length(a)), rep('b', length(b))) + # yuenContrast(dv~iv, EQVAR = FALSE, alternative = 'unequal', tr = 0.2) + r = np.empty(shape=(3, 2, 2, 5)) + r[0, 0, 0] = [-0.2314607, 19.894435, 0.8193209, -0.247220294, 0.188729943] + r[1, 0, 0] = [-0.2314607, 19.894435, 0.40966045, -np.inf, 0.1382426469] + r[2, 0, 0] = [-0.2314607, 19.894435, 0.5903395, -0.1967329982, np.inf] + r[0, 0, 1] = [-0.2452886, 11.427896, 0.8105823, -0.34057446, 0.25847383] + r[1, 0, 1] = [-0.2452886, 11.427896, 0.40529115, -np.inf, 0.1865829074] + r[2, 0, 1] = [-0.2452886, 11.427896, 0.5947089, -0.268683541, np.inf] + # confidence interval not available for equal_var=True + r[0, 1, 0] = [-0.2345625322555006, 22, 0.8167175905643815, None, None] + r[1, 1, 0] = [-0.2345625322555006, 22, 0.4083587952821908, None, None] + r[2, 1, 0] = [-0.2345625322555006, 22, 0.5916412047178092, None, None] + r[0, 1, 1] = [-0.2505369406507428, 14, 0.8058115135702835, None, None] + r[1, 1, 1] = [-0.2505369406507428, 14, 0.4029057567851417, None, None] + r[2, 1, 1] = [-0.2505369406507428, 14, 0.5970942432148583, None, None] + @pytest.mark.parametrize('alternative', ['two-sided', 'less', 'greater']) + @pytest.mark.parametrize('equal_var', [False, True]) + @pytest.mark.parametrize('trim', [0, 0.2]) + def test_confidence_interval(self, alternative, equal_var, trim): + if equal_var and trim: + pytest.xfail('Discrepancy in `main`; needs further investigation.') + + rng = np.random.default_rng(3810954496107292580) + x = rng.random(11) + y = rng.random(13) + + res = stats.ttest_ind(x, y, alternative=alternative, + equal_var=equal_var, trim=trim) + + alternatives = {'two-sided': 0, 'less': 1, 'greater': 2} + ref = self.r[alternatives[alternative], int(equal_var), int(np.ceil(trim))] + statistic, df, pvalue, low, high = ref + assert_allclose(res.statistic, statistic) + assert_allclose(res.df, df) + assert_allclose(res.pvalue, pvalue) + if not equal_var: # CI not available when `equal_var is True` + ci = res.confidence_interval(0.9) + assert_allclose(ci.low, low) + assert_allclose(ci.high, high) + + +def test__broadcast_concatenate(): + # test that _broadcast_concatenate properly broadcasts arrays along all + # axes except `axis`, then concatenates along axis + np.random.seed(0) + a = np.random.rand(5, 4, 4, 3, 1, 6) + b = np.random.rand(4, 1, 8, 2, 6) + c = _broadcast_concatenate((a, b), axis=-3) + # broadcast manually as an independent check + a = np.tile(a, (1, 1, 1, 1, 2, 1)) + b = np.tile(b[None, ...], (5, 1, 4, 1, 1, 1)) + for index in product(*(range(i) for i in c.shape)): + i, j, k, l, m, n = index + if l < a.shape[-3]: + assert a[i, j, k, l, m, n] == c[i, j, k, l, m, n] + else: + assert b[i, j, k, l - a.shape[-3], m, n] == c[i, j, k, l, m, n] + + +def test_ttest_ind_with_uneq_var(): + # check vs. R + a = (1, 2, 3) + b = (1.1, 2.9, 4.2) + pr = 0.53619490753126731 + tr = -0.68649512735572582 + t, p = stats.ttest_ind(a, b, equal_var=False) + assert_array_almost_equal([t,p], [tr, pr]) + # test from desc stats API + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b), + equal_var=False), + [t, p]) + + a = (1, 2, 3, 4) + pr = 0.84354139131608286 + tr = -0.2108663315950719 + t, p = stats.ttest_ind(a, b, equal_var=False) + assert_array_almost_equal([t,p], [tr, pr]) + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b), + equal_var=False), + [t, p]) + + # regression test + tr = 1.0912746897927283 + tr_uneq_n = 0.66745638708050492 + pr = 0.27647831993021388 + pr_uneq_n = 0.50873585065616544 + tpr = ([tr,-tr],[pr,pr]) + + rvs3 = np.linspace(1,100, 25) + rvs2 = np.linspace(1,100,100) + rvs1 = np.linspace(5,105,100) + rvs1_2D = np.array([rvs1, rvs2]) + + rvs2_2D = np.array([rvs2, rvs1]) + + t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False) + assert_array_almost_equal([t,p],(tr,pr)) + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, + rvs2), + equal_var=False), + (t, p)) + + t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False) + assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n)) + assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, + rvs3), + equal_var=False), + (t, p)) + + t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D.T, rvs2_2D.T) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args, + equal_var=False), + (t, p)) + + t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False) + assert_array_almost_equal([t,p],tpr) + args = _desc_stats(rvs1_2D, rvs2_2D, axis=1) + assert_array_almost_equal(stats.ttest_ind_from_stats(*args, + equal_var=False), + (t, p)) + + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False) + check_named_results(res, attributes) + + # test on 3 dimensions + rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) + rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) + t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False) + assert_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + args = _desc_stats(rvs1_3D, rvs2_3D, axis=1) + t, p = stats.ttest_ind_from_stats(*args, equal_var=False) + assert_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (2, 3)) + + t, p = stats.ttest_ind(np.moveaxis(rvs1_3D, 2, 0), + np.moveaxis(rvs2_3D, 2, 0), + axis=2, equal_var=False) + assert_array_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + args = _desc_stats(np.moveaxis(rvs1_3D, 2, 0), + np.moveaxis(rvs2_3D, 2, 0), axis=2) + t, p = stats.ttest_ind_from_stats(*args, equal_var=False) + assert_array_almost_equal(np.abs(t), np.abs(tr)) + assert_array_almost_equal(np.abs(p), pr) + assert_equal(t.shape, (3, 2)) + + # test zero division problem + with pytest.warns(RuntimeWarning, match="Precision loss occurred"): + t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False) + assert_equal((np.abs(t), p), (np.inf, 0)) + with np.errstate(all='ignore'): + assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False), + (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan], [-1, 1]]) + assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False), + ([0, np.nan], [1, np.nan])) + + +def test_ttest_ind_nan_2nd_arg(): + # regression test for gh-6134: nans in the second arg were not handled + x = [np.nan, 2.0, 3.0, 4.0] + y = [1.0, 2.0, 1.0, 2.0] + + r1 = stats.ttest_ind(x, y, nan_policy='omit') + r2 = stats.ttest_ind(y, x, nan_policy='omit') + assert_allclose(r2.statistic, -r1.statistic, atol=1e-15) + assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15) + + # NB: arguments are not paired when NaNs are dropped + r3 = stats.ttest_ind(y, x[1:]) + assert_allclose(r2, r3, atol=1e-15) + + # .. and this is consistent with R. R code: + # x = c(NA, 2.0, 3.0, 4.0) + # y = c(1.0, 2.0, 1.0, 2.0) + # t.test(x, y, var.equal=TRUE) + assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), + atol=1e-15) + + +def test_ttest_ind_empty_1d_returns_nan(): + # Two empty inputs should return a TtestResult containing nan + # for both values. + result = stats.ttest_ind([], []) + assert isinstance(result, stats._stats_py.TtestResult) + assert_equal(result, (np.nan, np.nan)) + + +@pytest.mark.parametrize('b, expected_shape', + [(np.empty((1, 5, 0)), (3, 5)), + (np.empty((1, 0, 0)), (3, 0))]) +def test_ttest_ind_axis_size_zero(b, expected_shape): + # In this test, the length of the axis dimension is zero. + # The results should be arrays containing nan with shape + # given by the broadcast nonaxis dimensions. + a = np.empty((3, 1, 0)) + result = stats.ttest_ind(a, b, axis=-1) + assert isinstance(result, stats._stats_py.TtestResult) + expected_value = np.full(expected_shape, fill_value=np.nan) + assert_equal(result.statistic, expected_value) + assert_equal(result.pvalue, expected_value) + + +def test_ttest_ind_nonaxis_size_zero(): + # In this test, the length of the axis dimension is nonzero, + # but one of the nonaxis dimensions has length 0. Check that + # we still get the correctly broadcast shape, which is (5, 0) + # in this case. + a = np.empty((1, 8, 0)) + b = np.empty((5, 8, 1)) + result = stats.ttest_ind(a, b, axis=1) + assert isinstance(result, stats._stats_py.TtestResult) + assert_equal(result.statistic.shape, (5, 0)) + assert_equal(result.pvalue.shape, (5, 0)) + + +def test_ttest_ind_nonaxis_size_zero_different_lengths(): + # In this test, the length of the axis dimension is nonzero, + # and that size is different in the two inputs, + # and one of the nonaxis dimensions has length 0. Check that + # we still get the correctly broadcast shape, which is (5, 0) + # in this case. + a = np.empty((1, 7, 0)) + b = np.empty((5, 8, 1)) + result = stats.ttest_ind(a, b, axis=1) + assert isinstance(result, stats._stats_py.TtestResult) + assert_equal(result.statistic.shape, (5, 0)) + assert_equal(result.pvalue.shape, (5, 0)) + + +def test_gh5686(): + mean1, mean2 = np.array([1, 2]), np.array([3, 4]) + std1, std2 = np.array([5, 3]), np.array([4, 5]) + nobs1, nobs2 = np.array([130, 140]), np.array([100, 150]) + # This will raise a TypeError unless gh-5686 is fixed. + stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2) + + +def test_ttest_ind_from_stats_inputs_zero(): + # Regression test for gh-6409. + result = stats.ttest_ind_from_stats(0, 0, 6, 0, 0, 6, equal_var=False) + assert_equal(result, [np.nan, np.nan]) + + +def test_ttest_single_observation(): + # test that p-values are uniformly distributed under the null hypothesis + rng = np.random.default_rng(246834602926842) + x = rng.normal(size=(10000, 2)) + y = rng.normal(size=(10000, 1)) + q = rng.uniform(size=100) + + res = stats.ttest_ind(x, y, equal_var=True, axis=-1) + assert stats.ks_1samp(res.pvalue, stats.uniform().cdf).pvalue > 0.1 + assert_allclose(np.percentile(res.pvalue, q*100), q, atol=1e-2) + + res = stats.ttest_ind(y, x, equal_var=True, axis=-1) + assert stats.ks_1samp(res.pvalue, stats.uniform().cdf).pvalue > 0.1 + assert_allclose(np.percentile(res.pvalue, q*100), q, atol=1e-2) + + # reference values from R: + # options(digits=16) + # t.test(c(2, 3, 5), c(1.5), var.equal=TRUE) + res = stats.ttest_ind([2, 3, 5], [1.5], equal_var=True) + assert_allclose(res, (1.0394023007754, 0.407779907736), rtol=1e-10) + + +def test_ttest_1samp_new(): + n1, n2, n3 = (10,15,20) + rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3)) + + # check multidimensional array and correct axis handling + # deterministic rvn1 and rvn2 would be better as in test_ttest_rel + t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0) + t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0) + t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1) + assert_array_almost_equal(t1,t2, decimal=14) + assert_almost_equal(t1[0,0],t3, decimal=14) + assert_equal(t1.shape, (n2,n3)) + + t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1, 1, n3)),axis=1) + t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1) + t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1) + assert_array_almost_equal(t1,t2, decimal=14) + assert_almost_equal(t1[0,0],t3, decimal=14) + assert_equal(t1.shape, (n1,n3)) + + t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2,1)),axis=2) + t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2) + t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1) + assert_array_almost_equal(t1,t2, decimal=14) + assert_almost_equal(t1[0,0],t3, decimal=14) + assert_equal(t1.shape, (n1,n2)) + + # test zero division problem + t, p = stats.ttest_1samp([0, 0, 0], 1) + assert_equal((np.abs(t), p), (np.inf, 0)) + + # test alternative parameter + # Convert from two-sided p-values to one sided using T result data. + def convert(t, p, alt): + if (t < 0 and alt == "less") or (t > 0 and alt == "greater"): + return p / 2 + return 1 - (p / 2) + converter = np.vectorize(convert) + tr, pr = stats.ttest_1samp(rvn1[:, :, :], 1) + + t, p = stats.ttest_1samp(rvn1[:, :, :], 1, alternative="greater") + pc = converter(tr, pr, "greater") + assert_allclose(p, pc) + assert_allclose(t, tr) + + t, p = stats.ttest_1samp(rvn1[:, :, :], 1, alternative="less") + pc = converter(tr, pr, "less") + assert_allclose(p, pc) + assert_allclose(t, tr) + + with np.errstate(all='ignore'): + assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan)) + + # check that nan in input array result in nan output + anan = np.array([[1, np.nan],[-1, 1]]) + assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan])) + + rvn1[0:2, 1:3, 4:8] = np.nan + + tr, pr = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit') + + t, p = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit', + alternative="greater") + pc = converter(tr, pr, "greater") + assert_allclose(p, pc) + assert_allclose(t, tr) + + t, p = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit', + alternative="less") + pc = converter(tr, pr, "less") + assert_allclose(p, pc) + assert_allclose(t, tr) + + +def test_ttest_1samp_popmean_array(): + # when popmean.shape[axis] != 1, raise an error + # if the user wants to test multiple null hypotheses simultaneously, + # use standard broadcasting rules + rng = np.random.default_rng(2913300596553337193) + x = rng.random(size=(1, 15, 20)) + + message = r"`popmean.shape\[axis\]` must equal 1." + popmean = rng.random(size=(5, 2, 20)) + with pytest.raises(ValueError, match=message): + stats.ttest_1samp(x, popmean=popmean, axis=-2) + + popmean = rng.random(size=(5, 1, 20)) + res = stats.ttest_1samp(x, popmean=popmean, axis=-2) + assert res.statistic.shape == (5, 20) + + ci = np.expand_dims(res.confidence_interval(), axis=-2) + res = stats.ttest_1samp(x, popmean=ci, axis=-2) + assert_allclose(res.pvalue, 0.05) + + +class TestDescribe: + def test_describe_scalar(self): + with suppress_warnings() as sup, \ + np.errstate(invalid="ignore", divide="ignore"): + sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") + n, mm, m, v, sk, kurt = stats.describe(4.) + assert_equal(n, 1) + assert_equal(mm, (4.0, 4.0)) + assert_equal(m, 4.0) + assert np.isnan(v) + assert np.isnan(sk) + assert np.isnan(kurt) + + def test_describe_numbers(self): + x = np.vstack((np.ones((3,4)), np.full((2, 4), 2))) + nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.])) + mc = np.array([1.4, 1.4, 1.4, 1.4]) + vc = np.array([0.3, 0.3, 0.3, 0.3]) + skc = [0.40824829046386357] * 4 + kurtc = [-1.833333333333333] * 4 + n, mm, m, v, sk, kurt = stats.describe(x) + assert_equal(n, nc) + assert_equal(mm, mmc) + assert_equal(m, mc) + assert_equal(v, vc) + assert_array_almost_equal(sk, skc, decimal=13) + assert_array_almost_equal(kurt, kurtc, decimal=13) + n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1) + assert_equal(n, nc) + assert_equal(mm, mmc) + assert_equal(m, mc) + assert_equal(v, vc) + assert_array_almost_equal(sk, skc, decimal=13) + assert_array_almost_equal(kurt, kurtc, decimal=13) + + x = np.arange(10.) + x[9] = np.nan + + nc, mmc = (9, (0.0, 8.0)) + mc = 4.0 + vc = 7.5 + skc = 0.0 + kurtc = -1.2300000000000002 + n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit') + assert_equal(n, nc) + assert_equal(mm, mmc) + assert_equal(m, mc) + assert_equal(v, vc) + assert_array_almost_equal(sk, skc) + assert_array_almost_equal(kurt, kurtc, decimal=13) + + assert_raises(ValueError, stats.describe, x, nan_policy='raise') + assert_raises(ValueError, stats.describe, x, nan_policy='foobar') + + def test_describe_result_attributes(self): + actual = stats.describe(np.arange(5)) + attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', + 'kurtosis') + check_named_results(actual, attributes) + + def test_describe_ddof(self): + x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2))) + nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.])) + mc = np.array([1.4, 1.4, 1.4, 1.4]) + vc = np.array([0.24, 0.24, 0.24, 0.24]) + skc = [0.40824829046386357] * 4 + kurtc = [-1.833333333333333] * 4 + n, mm, m, v, sk, kurt = stats.describe(x, ddof=0) + assert_equal(n, nc) + assert_allclose(mm, mmc, rtol=1e-15) + assert_allclose(m, mc, rtol=1e-15) + assert_allclose(v, vc, rtol=1e-15) + assert_array_almost_equal(sk, skc, decimal=13) + assert_array_almost_equal(kurt, kurtc, decimal=13) + + def test_describe_axis_none(self): + x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2))) + + # expected values + e_nobs, e_minmax = (20, (1.0, 2.0)) + e_mean = 1.3999999999999999 + e_var = 0.25263157894736848 + e_skew = 0.4082482904638634 + e_kurt = -1.8333333333333333 + + # actual values + a = stats.describe(x, axis=None) + + assert_equal(a.nobs, e_nobs) + assert_almost_equal(a.minmax, e_minmax) + assert_almost_equal(a.mean, e_mean) + assert_almost_equal(a.variance, e_var) + assert_array_almost_equal(a.skewness, e_skew, decimal=13) + assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13) + + def test_describe_empty(self): + assert_raises(ValueError, stats.describe, []) + + +def test_normalitytests(): + assert_raises(ValueError, stats.skewtest, 4.) + assert_raises(ValueError, stats.kurtosistest, 4.) + assert_raises(ValueError, stats.normaltest, 4.) + + # numbers verified with R: dagoTest in package fBasics + st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734) + pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019) + pv_skew_less, pv_kurt_less = 1 - pv_skew / 2, pv_kurt / 2 + pv_skew_greater, pv_kurt_greater = pv_skew / 2, 1 - pv_kurt / 2 + x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 + attributes = ('statistic', 'pvalue') + + assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal)) + check_named_results(stats.normaltest(x), attributes) + assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew)) + assert_array_almost_equal(stats.skewtest(x, alternative='less'), + (st_skew, pv_skew_less)) + assert_array_almost_equal(stats.skewtest(x, alternative='greater'), + (st_skew, pv_skew_greater)) + check_named_results(stats.skewtest(x), attributes) + assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt)) + assert_array_almost_equal(stats.kurtosistest(x, alternative='less'), + (st_kurt, pv_kurt_less)) + assert_array_almost_equal(stats.kurtosistest(x, alternative='greater'), + (st_kurt, pv_kurt_greater)) + check_named_results(stats.kurtosistest(x), attributes) + + # some more intuitive tests for kurtosistest and skewtest. + # see gh-13549. + # skew parameter is 1 > 0 + a1 = stats.skewnorm.rvs(a=1, size=10000, random_state=123) + pval = stats.skewtest(a1, alternative='greater').pvalue + assert_almost_equal(pval, 0.0, decimal=5) + # excess kurtosis of laplace is 3 > 0 + a2 = stats.laplace.rvs(size=10000, random_state=123) + pval = stats.kurtosistest(a2, alternative='greater').pvalue + assert_almost_equal(pval, 0.0) + + # Test axis=None (equal to axis=0 for 1-D input) + assert_array_almost_equal(stats.normaltest(x, axis=None), + (st_normal, pv_normal)) + assert_array_almost_equal(stats.skewtest(x, axis=None), + (st_skew, pv_skew)) + assert_array_almost_equal(stats.kurtosistest(x, axis=None), + (st_kurt, pv_kurt)) + + x = np.arange(10.) + x[9] = np.nan + with np.errstate(invalid="ignore"): + assert_array_equal(stats.skewtest(x), (np.nan, np.nan)) + + expected = (1.0184643553962129, 0.30845733195153502) + assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected) + + # test alternative with nan_policy='omit' + a1[10:100] = np.nan + z, p = stats.skewtest(a1, nan_policy='omit') + zl, pl = stats.skewtest(a1, nan_policy='omit', alternative='less') + zg, pg = stats.skewtest(a1, nan_policy='omit', alternative='greater') + assert_allclose(zl, z, atol=1e-15) + assert_allclose(zg, z, atol=1e-15) + assert_allclose(pl, 1 - p/2, atol=1e-15) + assert_allclose(pg, p/2, atol=1e-15) + + with np.errstate(all='ignore'): + assert_raises(ValueError, stats.skewtest, x, nan_policy='raise') + assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar') + assert_raises(ValueError, stats.skewtest, list(range(8)), + alternative='foobar') + + x = np.arange(30.) + x[29] = np.nan + with np.errstate(all='ignore'): + assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan)) + + expected = (-2.2683547379505273, 0.023307594135872967) + assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'), + expected) + + # test alternative with nan_policy='omit' + a2[10:20] = np.nan + z, p = stats.kurtosistest(a2[:100], nan_policy='omit') + zl, pl = stats.kurtosistest(a2[:100], nan_policy='omit', + alternative='less') + zg, pg = stats.kurtosistest(a2[:100], nan_policy='omit', + alternative='greater') + assert_allclose(zl, z, atol=1e-15) + assert_allclose(zg, z, atol=1e-15) + assert_allclose(pl, 1 - p/2, atol=1e-15) + assert_allclose(pg, p/2, atol=1e-15) + + assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise') + assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar') + assert_raises(ValueError, stats.kurtosistest, list(range(20)), + alternative='foobar') + + with np.errstate(all='ignore'): + assert_array_equal(stats.normaltest(x), (np.nan, np.nan)) + + expected = (6.2260409514287449, 0.04446644248650191) + assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected) + + assert_raises(ValueError, stats.normaltest, x, nan_policy='raise') + assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar') + + # regression test for issue gh-9033: x clearly non-normal but power of + # negative denom needs to be handled correctly to reject normality + counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167] + x = np.hstack([np.full(c, i) for i, c in enumerate(counts)]) + assert_equal(stats.kurtosistest(x)[1] < 0.01, True) + + +class TestRankSums: + + np.random.seed(0) + x, y = np.random.rand(2, 10) + + @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided']) + def test_ranksums_result_attributes(self, alternative): + # ranksums pval = mannwhitneyu pval w/out continuity or tie correction + res1 = stats.ranksums(self.x, self.y, + alternative=alternative).pvalue + res2 = stats.mannwhitneyu(self.x, self.y, use_continuity=False, + alternative=alternative).pvalue + assert_allclose(res1, res2) + + def test_ranksums_named_results(self): + res = stats.ranksums(self.x, self.y) + check_named_results(res, ('statistic', 'pvalue')) + + def test_input_validation(self): + with assert_raises(ValueError, match="`alternative` must be 'less'"): + stats.ranksums(self.x, self.y, alternative='foobar') + + +class TestJarqueBera: + def test_jarque_bera_stats(self): + np.random.seed(987654321) + x = np.random.normal(0, 1, 100000) + y = np.random.chisquare(10000, 100000) + z = np.random.rayleigh(1, 100000) + + assert_equal(stats.jarque_bera(x)[0], stats.jarque_bera(x).statistic) + assert_equal(stats.jarque_bera(x)[1], stats.jarque_bera(x).pvalue) + + assert_equal(stats.jarque_bera(y)[0], stats.jarque_bera(y).statistic) + assert_equal(stats.jarque_bera(y)[1], stats.jarque_bera(y).pvalue) + + assert_equal(stats.jarque_bera(z)[0], stats.jarque_bera(z).statistic) + assert_equal(stats.jarque_bera(z)[1], stats.jarque_bera(z).pvalue) + + assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1]) + assert_(stats.jarque_bera(x).pvalue > stats.jarque_bera(y).pvalue) + + assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1]) + assert_(stats.jarque_bera(x).pvalue > stats.jarque_bera(z).pvalue) + + assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1]) + assert_(stats.jarque_bera(y).pvalue > stats.jarque_bera(z).pvalue) + + def test_jarque_bera_array_like(self): + np.random.seed(987654321) + x = np.random.normal(0, 1, 100000) + + jb_test1 = JB1, p1 = stats.jarque_bera(list(x)) + jb_test2 = JB2, p2 = stats.jarque_bera(tuple(x)) + jb_test3 = JB3, p3 = stats.jarque_bera(x.reshape(2, 50000)) + + assert JB1 == JB2 == JB3 == jb_test1.statistic == jb_test2.statistic == jb_test3.statistic # noqa: E501 + assert p1 == p2 == p3 == jb_test1.pvalue == jb_test2.pvalue == jb_test3.pvalue + + def test_jarque_bera_size(self): + assert_raises(ValueError, stats.jarque_bera, []) + + def test_axis(self): + rng = np.random.RandomState(seed=122398129) + x = rng.random(size=(2, 45)) + + assert_equal(stats.jarque_bera(x, axis=None), + stats.jarque_bera(x.ravel())) + + res = stats.jarque_bera(x, axis=1) + s0, p0 = stats.jarque_bera(x[0, :]) + s1, p1 = stats.jarque_bera(x[1, :]) + assert_allclose(res.statistic, [s0, s1]) + assert_allclose(res.pvalue, [p0, p1]) + + resT = stats.jarque_bera(x.T, axis=0) + assert_allclose(res, resT) + + +def test_skewtest_too_few_samples(): + # Regression test for ticket #1492. + # skewtest requires at least 8 samples; 7 should raise a ValueError. + x = np.arange(7.0) + assert_raises(ValueError, stats.skewtest, x) + + +def test_kurtosistest_too_few_samples(): + # Regression test for ticket #1425. + # kurtosistest requires at least 5 samples; 4 should raise a ValueError. + x = np.arange(4.0) + assert_raises(ValueError, stats.kurtosistest, x) + + +class TestMannWhitneyU: + X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589, + 20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105, + 19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953, + 20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274, + 20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021, + 19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892, + 17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179, + 20.4970638083542, 19.5567594734914] + + Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575, + 19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655, + 19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841, + 18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636, + 19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356] + + significant = 14 + + def test_mannwhitneyu_one_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater') + u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater') + u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less') + + assert_equal(p1, p2) + assert_equal(p3, p4) + assert_(p1 != p3) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_equal(u3, 498) + assert_equal(u4, 102) + assert_approx_equal(p1, 0.999957683256589, significant=self.significant) + assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant) + + def test_mannwhitneyu_two_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided') + + assert_equal(p1, p2) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_approx_equal(p1, 9.188326533255e-05, + significant=self.significant) + + def test_mannwhitneyu_no_correct_one_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, False, + alternative='less') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, False, + alternative='greater') + u3, p3 = stats.mannwhitneyu(self.X, self.Y, False, + alternative='greater') + u4, p4 = stats.mannwhitneyu(self.Y, self.X, False, + alternative='less') + + assert_equal(p1, p2) + assert_equal(p3, p4) + assert_(p1 != p3) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_equal(u3, 498) + assert_equal(u4, 102) + assert_approx_equal(p1, 0.999955905990004, significant=self.significant) + assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant) + + def test_mannwhitneyu_no_correct_two_sided(self): + u1, p1 = stats.mannwhitneyu(self.X, self.Y, False, + alternative='two-sided') + u2, p2 = stats.mannwhitneyu(self.Y, self.X, False, + alternative='two-sided') + + assert_equal(p1, p2) + assert_equal(u1, 498) + assert_equal(u2, 102) + assert_approx_equal(p1, 8.81880199916178e-05, + significant=self.significant) + + def test_mannwhitneyu_ones(self): + # test for gh-1428 + x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1.]) + + y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., + 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., + 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., + 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., + 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., + 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., + 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., + 1., 1., 1., 1.]) + + # checked against R wilcox.test + assert_allclose(stats.mannwhitneyu(x, y, alternative='less'), + (16980.5, 2.8214327656317373e-005)) + # p-value from R, e.g. wilcox.test(x, y, alternative="g") + assert_allclose(stats.mannwhitneyu(x, y, alternative='greater'), + (16980.5, 0.9999719954296)) + assert_allclose(stats.mannwhitneyu(x, y, alternative='two-sided'), + (16980.5, 5.642865531266e-05)) + + def test_mannwhitneyu_result_attributes(self): + # test for namedtuple attribute results + attributes = ('statistic', 'pvalue') + res = stats.mannwhitneyu(self.X, self.Y, alternative="less") + check_named_results(res, attributes) + + +def test_pointbiserial(): + # same as mstats test except for the nan + # Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output + x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, + 0,0,0,0,1] + y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, + 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, + 0.8,0.7,0.6,0.5,0.2,0.2,0.1] + assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5) + + # test for namedtuple attribute results + attributes = ('correlation', 'pvalue') + res = stats.pointbiserialr(x, y) + check_named_results(res, attributes) + assert_equal(res.correlation, res.statistic) + + +def test_obrientransform(): + # A couple tests calculated by hand. + x1 = np.array([0, 2, 4]) + t1 = stats.obrientransform(x1) + expected = [7, -2, 7] + assert_allclose(t1[0], expected) + + x2 = np.array([0, 3, 6, 9]) + t2 = stats.obrientransform(x2) + expected = np.array([30, 0, 0, 30]) + assert_allclose(t2[0], expected) + + # Test two arguments. + a, b = stats.obrientransform(x1, x2) + assert_equal(a, t1[0]) + assert_equal(b, t2[0]) + + # Test three arguments. + a, b, c = stats.obrientransform(x1, x2, x1) + assert_equal(a, t1[0]) + assert_equal(b, t2[0]) + assert_equal(c, t1[0]) + + # This is a regression test to check np.var replacement. + # The author of this test didn't separately verify the numbers. + x1 = np.arange(5) + result = np.array( + [[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667], + [21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]]) + assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8) + + # Example from "O'Brien Test for Homogeneity of Variance" + # by Herve Abdi. + values = range(5, 11) + reps = np.array([5, 11, 9, 3, 2, 2]) + data = np.repeat(values, reps) + transformed_values = np.array([3.1828, 0.5591, 0.0344, + 1.6086, 5.2817, 11.0538]) + expected = np.repeat(transformed_values, reps) + result = stats.obrientransform(data) + assert_array_almost_equal(result[0], expected, decimal=4) + + +def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7, + weights=None): + # Note this doesn't test when axis is not specified + x = stats.gmean(array_like, axis=axis, dtype=dtype, weights=weights) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7, + weights=None): + x = stats.hmean(array_like, axis=axis, dtype=dtype, weights=weights) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +def check_equal_pmean(array_like, exp, desired, axis=None, dtype=None, + rtol=1e-7, weights=None): + x = stats.pmean(array_like, exp, axis=axis, dtype=dtype, weights=weights) + assert_allclose(x, desired, rtol=rtol) + assert_equal(x.dtype, dtype) + + +class TestHarMean: + def test_0(self): + a = [1, 0, 2] + desired = 0 + check_equal_hmean(a, desired) + + def test_1d_list(self): + # Test a 1d list + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] + desired = 34.1417152147 + check_equal_hmean(a, desired) + + a = [1, 2, 3, 4] + desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4) + check_equal_hmean(a, desired) + + def test_1d_array(self): + # Test a 1d array + a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + desired = 34.1417152147 + check_equal_hmean(a, desired) + + def test_1d_array_with_zero(self): + a = np.array([1, 0]) + desired = 0.0 + assert_equal(stats.hmean(a), desired) + + def test_1d_array_with_negative_value(self): + a = np.array([1, 0, -1]) + assert_raises(ValueError, stats.hmean, a) + + # Note the next tests use axis=None as default, not axis=0 + def test_2d_list(self): + # Test a 2d list + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = 38.6696271841 + check_equal_hmean(a, desired) + + def test_2d_array(self): + # Test a 2d array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = 38.6696271841 + check_equal_hmean(np.array(a), desired) + + def test_2d_axis0(self): + # Test a 2d list with axis=0 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) + check_equal_hmean(a, desired, axis=0) + + def test_2d_axis0_with_zero(self): + a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = np.array([22.88135593, 0.0, 52.90076336, 65.45454545]) + assert_allclose(stats.hmean(a, axis=0), desired) + + def test_2d_axis1(self): + # Test a 2d list with axis=1 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = np.array([19.2, 63.03939962, 103.80078637]) + check_equal_hmean(a, desired, axis=1) + + def test_2d_axis1_with_zero(self): + a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = np.array([0.0, 63.03939962, 103.80078637]) + assert_allclose(stats.hmean(a, axis=1), desired) + + def test_weights_1d_list(self): + # Desired result from: + # https://www.hackmath.net/en/math-problem/35871 + a = [2, 10, 6] + weights = [10, 5, 3] + desired = 3 + check_equal_hmean(a, desired, weights=weights, rtol=1e-5) + + def test_weights_2d_array_axis0(self): + # Desired result from: + # https://www.hackmath.net/en/math-problem/35871 + a = np.array([[2, 5], [10, 5], [6, 5]]) + weights = np.array([[10, 1], [5, 1], [3, 1]]) + desired = np.array([3, 5]) + check_equal_hmean(a, desired, axis=0, weights=weights, rtol=1e-5) + + def test_weights_2d_array_axis1(self): + # Desired result from: + # https://www.hackmath.net/en/math-problem/35871 + a = np.array([[2, 10, 6], [7, 7, 7]]) + weights = np.array([[10, 5, 3], [1, 1, 1]]) + desired = np.array([3, 7]) + check_equal_hmean(a, desired, axis=1, weights=weights, rtol=1e-5) + + def test_weights_masked_1d_array(self): + # Desired result from: + # https://www.hackmath.net/en/math-problem/35871 + a = np.array([2, 10, 6, 42]) + weights = np.ma.array([10, 5, 3, 42], mask=[0, 0, 0, 1]) + desired = 3 + check_equal_hmean(a, desired, weights=weights, rtol=1e-5) + + +class TestGeoMean: + def test_0(self): + a = [1, 0, 2] + desired = 0 + check_equal_gmean(a, desired) + + def test_1d_list(self): + # Test a 1d list + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] + desired = 45.2872868812 + check_equal_gmean(a, desired) + + a = [1, 2, 3, 4] + desired = power(1 * 2 * 3 * 4, 1. / 4.) + check_equal_gmean(a, desired, rtol=1e-14) + + def test_1d_array(self): + # Test a 1d array + a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) + desired = 45.2872868812 + check_equal_gmean(a, desired) + + a = array([1, 2, 3, 4], float32) + desired = power(1 * 2 * 3 * 4, 1. / 4.) + check_equal_gmean(a, desired, dtype=float32) + + # Note the next tests use axis=None as default, not axis=0 + def test_2d_list(self): + # Test a 2d list + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = 52.8885199 + check_equal_gmean(a, desired) + + def test_2d_array(self): + # Test a 2d array + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = 52.8885199 + check_equal_gmean(array(a), desired) + + def test_2d_axis0(self): + # Test a 2d list with axis=0 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) + check_equal_gmean(a, desired, axis=0) + + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + desired = array([1, 2, 3, 4]) + check_equal_gmean(a, desired, axis=0, rtol=1e-14) + + def test_2d_axis1(self): + # Test a 2d list with axis=1 + a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] + desired = np.array([22.13363839, 64.02171746, 104.40086817]) + check_equal_gmean(a, desired, axis=1) + + a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + v = power(1 * 2 * 3 * 4, 1. / 4.) + desired = array([v, v, v]) + check_equal_gmean(a, desired, axis=1, rtol=1e-14) + + def test_large_values(self): + a = array([1e100, 1e200, 1e300]) + desired = 1e200 + check_equal_gmean(a, desired, rtol=1e-13) + + def test_1d_list0(self): + # Test a 1d list with zero element + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0] + desired = 0.0 # due to exp(-inf)=0 + with np.errstate(all='ignore'): + check_equal_gmean(a, desired) + + def test_1d_array0(self): + # Test a 1d array with zero element + a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) + desired = 0.0 # due to exp(-inf)=0 + with np.errstate(divide='ignore'): + check_equal_gmean(a, desired) + + def test_1d_list_neg(self): + # Test a 1d list with negative element + a = [10, 20, 30, 40, 50, 60, 70, 80, 90, -1] + desired = np.nan # due to log(-1) = nan + with np.errstate(invalid='ignore'): + check_equal_gmean(a, desired) + + def test_weights_1d_list(self): + # Desired result from: + # https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/ + a = [1, 2, 3, 4, 5] + weights = [2, 5, 6, 4, 3] + desired = 2.77748 + check_equal_gmean(a, desired, weights=weights, rtol=1e-5) + + def test_weights_1d_array(self): + # Desired result from: + # https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/ + a = np.array([1, 2, 3, 4, 5]) + weights = np.array([2, 5, 6, 4, 3]) + desired = 2.77748 + check_equal_gmean(a, desired, weights=weights, rtol=1e-5) + + def test_weights_masked_1d_array(self): + # Desired result from: + # https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/ + a = np.array([1, 2, 3, 4, 5, 6]) + weights = np.ma.array([2, 5, 6, 4, 3, 5], mask=[0, 0, 0, 0, 0, 1]) + desired = 2.77748 + check_equal_gmean(a, desired, weights=weights, rtol=1e-5) + + +class TestPowMean: + + def pmean_reference(a, p): + return (np.sum(a**p) / a.size)**(1/p) + + def wpmean_reference(a, p, weights): + return (np.sum(weights * a**p) / np.sum(weights))**(1/p) + + def test_bad_exponent(self): + with pytest.raises(ValueError, match='Power mean only defined for'): + stats.pmean([1, 2, 3], [0]) + with pytest.raises(ValueError, match='Power mean only defined for'): + stats.pmean([1, 2, 3], np.array([0])) + + def test_1d_list(self): + a, p = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 3.5 + desired = TestPowMean.pmean_reference(np.array(a), p) + check_equal_pmean(a, p, desired) + + a, p = [1, 2, 3, 4], 2 + desired = np.sqrt((1**2 + 2**2 + 3**2 + 4**2) / 4) + check_equal_pmean(a, p, desired) + + def test_1d_array(self): + a, p = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]), -2.5 + desired = TestPowMean.pmean_reference(a, p) + check_equal_pmean(a, p, desired) + + def test_1d_array_with_zero(self): + a, p = np.array([1, 0]), -1 + desired = 0.0 + assert_equal(stats.pmean(a, p), desired) + + def test_1d_array_with_negative_value(self): + a, p = np.array([1, 0, -1]), 1.23 + with pytest.raises(ValueError, match='Power mean only defined if all'): + stats.pmean(a, p) + + @pytest.mark.parametrize( + ("a", "p"), + [([[10, 20], [50, 60], [90, 100]], -0.5), + (np.array([[10, 20], [50, 60], [90, 100]]), 0.5)] + ) + def test_2d_axisnone(self, a, p): + desired = TestPowMean.pmean_reference(np.array(a), p) + check_equal_pmean(a, p, desired) + + @pytest.mark.parametrize( + ("a", "p"), + [([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], -0.5), + ([[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], 0.5)] + ) + def test_2d_list_axis0(self, a, p): + desired = [ + TestPowMean.pmean_reference( + np.array([a[i][j] for i in range(len(a))]), p + ) + for j in range(len(a[0])) + ] + check_equal_pmean(a, p, desired, axis=0) + + @pytest.mark.parametrize( + ("a", "p"), + [([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], -0.5), + ([[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], 0.5)] + ) + def test_2d_list_axis1(self, a, p): + desired = [TestPowMean.pmean_reference(np.array(a_), p) for a_ in a] + check_equal_pmean(a, p, desired, axis=1) + + def test_weights_1d_list(self): + a, p = [2, 10, 6], -1.23456789 + weights = [10, 5, 3] + desired = TestPowMean.wpmean_reference(np.array(a), p, weights) + check_equal_pmean(a, p, desired, weights=weights, rtol=1e-5) + + def test_weights_masked_1d_array(self): + a, p = np.array([2, 10, 6, 42]), 1 + weights = np.ma.array([10, 5, 3, 42], mask=[0, 0, 0, 1]) + desired = np.average(a, weights=weights) + check_equal_pmean(a, p, desired, weights=weights, rtol=1e-5) + + @pytest.mark.parametrize( + ("axis", "fun_name", "p"), + [(None, "wpmean_reference", 9.87654321), + (0, "gmean", 0), + (1, "hmean", -1)] + ) + def test_weights_2d_array(self, axis, fun_name, p): + if fun_name == 'wpmean_reference': + def fun(a, axis, weights): + return TestPowMean.wpmean_reference(a, p, weights) + else: + fun = getattr(stats, fun_name) + a = np.array([[2, 5], [10, 5], [6, 5]]) + weights = np.array([[10, 1], [5, 1], [3, 1]]) + desired = fun(a, axis=axis, weights=weights) + check_equal_pmean(a, p, desired, axis=axis, weights=weights, rtol=1e-5) + + +class TestGeometricStandardDeviation: + # must add 1 as `gstd` is only defined for positive values + array_1d = np.arange(2 * 3 * 4) + 1 + gstd_array_1d = 2.294407613602 + array_3d = array_1d.reshape(2, 3, 4) + + def test_1d_array(self): + gstd_actual = stats.gstd(self.array_1d) + assert_allclose(gstd_actual, self.gstd_array_1d) + + def test_1d_numeric_array_like_input(self): + gstd_actual = stats.gstd(tuple(self.array_1d)) + assert_allclose(gstd_actual, self.gstd_array_1d) + + def test_raises_value_error_non_array_like_input(self): + with pytest.raises(ValueError, match='Invalid array input'): + stats.gstd('This should fail as it can not be cast to an array.') + + def test_raises_value_error_zero_entry(self): + with pytest.raises(ValueError, match='Non positive value'): + stats.gstd(np.append(self.array_1d, [0])) + + def test_raises_value_error_negative_entry(self): + with pytest.raises(ValueError, match='Non positive value'): + stats.gstd(np.append(self.array_1d, [-1])) + + def test_raises_value_error_inf_entry(self): + with pytest.raises(ValueError, match='Infinite value'): + stats.gstd(np.append(self.array_1d, [np.inf])) + + def test_propagates_nan_values(self): + a = array([[1, 1, 1, 16], [np.nan, 1, 2, 3]]) + gstd_actual = stats.gstd(a, axis=1) + assert_allclose(gstd_actual, np.array([4, np.nan])) + + def test_ddof_equal_to_number_of_observations(self): + with pytest.raises(ValueError, match='Degrees of freedom <= 0'): + stats.gstd(self.array_1d, ddof=self.array_1d.size) + + def test_3d_array(self): + gstd_actual = stats.gstd(self.array_3d, axis=None) + assert_allclose(gstd_actual, self.gstd_array_1d) + + def test_3d_array_axis_type_tuple(self): + gstd_actual = stats.gstd(self.array_3d, axis=(1,2)) + assert_allclose(gstd_actual, [2.12939215, 1.22120169]) + + def test_3d_array_axis_0(self): + gstd_actual = stats.gstd(self.array_3d, axis=0) + gstd_desired = np.array([ + [6.1330555493918, 3.958900210120, 3.1206598248344, 2.6651441426902], + [2.3758135028411, 2.174581428192, 2.0260062829505, 1.9115518327308], + [1.8205343606803, 1.746342404566, 1.6846557065742, 1.6325269194382] + ]) + assert_allclose(gstd_actual, gstd_desired) + + def test_3d_array_axis_1(self): + gstd_actual = stats.gstd(self.array_3d, axis=1) + gstd_desired = np.array([ + [3.118993630946, 2.275985934063, 1.933995977619, 1.742896469724], + [1.271693593916, 1.254158641801, 1.238774141609, 1.225164057869] + ]) + assert_allclose(gstd_actual, gstd_desired) + + def test_3d_array_axis_2(self): + gstd_actual = stats.gstd(self.array_3d, axis=2) + gstd_desired = np.array([ + [1.8242475707664, 1.2243686572447, 1.1318311657788], + [1.0934830582351, 1.0724479791887, 1.0591498540749] + ]) + assert_allclose(gstd_actual, gstd_desired) + + def test_masked_3d_array(self): + ma = np.ma.masked_where(self.array_3d > 16, self.array_3d) + gstd_actual = stats.gstd(ma, axis=2) + gstd_desired = stats.gstd(self.array_3d, axis=2) + mask = [[0, 0, 0], [0, 1, 1]] + assert_allclose(gstd_actual, gstd_desired) + assert_equal(gstd_actual.mask, mask) + + +def test_binomtest(): + # precision tests compared to R for ticket:986 + pp = np.concatenate((np.linspace(0.1, 0.2, 5), + np.linspace(0.45, 0.65, 5), + np.linspace(0.85, 0.95, 5))) + n = 501 + x = 450 + results = [0.0, 0.0, 1.0159969301994141e-304, + 2.9752418572150531e-275, 7.7668382922535275e-250, + 2.3381250925167094e-099, 7.8284591587323951e-081, + 9.9155947819961383e-065, 2.8729390725176308e-050, + 1.7175066298388421e-037, 0.0021070691951093692, + 0.12044570587262322, 0.88154763174802508, 0.027120993063129286, + 2.6102587134694721e-006] + + for p, res in zip(pp, results): + assert_approx_equal(stats.binomtest(x, n, p).pvalue, res, + significant=12, err_msg='fail forp=%f' % p) + assert_approx_equal(stats.binomtest(50, 100, 0.1).pvalue, + 5.8320387857343647e-024, + significant=12) + + +def test_binomtest2(): + # test added for issue #2384 + res2 = [ + [1.0, 1.0], + [0.5, 1.0, 0.5], + [0.25, 1.00, 1.00, 0.25], + [0.125, 0.625, 1.000, 0.625, 0.125], + [0.0625, 0.3750, 1.0000, 1.0000, 0.3750, 0.0625], + [0.03125, 0.21875, 0.68750, 1.00000, 0.68750, 0.21875, 0.03125], + [0.015625, 0.125000, 0.453125, 1.000000, 1.000000, 0.453125, 0.125000, + 0.015625], + [0.0078125, 0.0703125, 0.2890625, 0.7265625, 1.0000000, 0.7265625, + 0.2890625, 0.0703125, 0.0078125], + [0.00390625, 0.03906250, 0.17968750, 0.50781250, 1.00000000, + 1.00000000, 0.50781250, 0.17968750, 0.03906250, 0.00390625], + [0.001953125, 0.021484375, 0.109375000, 0.343750000, 0.753906250, + 1.000000000, 0.753906250, 0.343750000, 0.109375000, 0.021484375, + 0.001953125] + ] + for k in range(1, 11): + res1 = [stats.binomtest(v, k, 0.5).pvalue for v in range(k + 1)] + assert_almost_equal(res1, res2[k-1], decimal=10) + + +def test_binomtest3(): + # test added for issue #2384 + # test when x == n*p and neighbors + res3 = [stats.binomtest(v, v*k, 1./k).pvalue + for v in range(1, 11) for k in range(2, 11)] + assert_equal(res3, np.ones(len(res3), int)) + + # > bt=c() + # > for(i in as.single(1:10)) { + # + for(k in as.single(2:10)) { + # + bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); + # + print(c(i+1, k*i,(1/k))) + # + } + # + } + binom_testm1 = np.array([ + 0.5, 0.5555555555555556, 0.578125, 0.5904000000000003, + 0.5981224279835393, 0.603430543396034, 0.607304096221924, + 0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115, + 0.68853759765625, 0.6980101120000006, 0.703906431368616, + 0.70793209416498, 0.7108561134173507, 0.713076544331419, + 0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174, + 0.74986110468096, 0.7548015520398076, 0.7581671424768577, + 0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625, + 0.761553963657302, 0.774800934828818, 0.7818005980538996, + 0.78613491480358, 0.789084353140195, 0.7912217659828884, + 0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176, + 0.7976688481430754, 0.8039848974727624, 0.807891868948366, + 0.8105487660137676, 0.812473307174702, 0.8139318233591120, + 0.815075399104785, 0.7744140625, 0.8037322594985427, + 0.814742863657656, 0.8205425178645808, 0.8241275984172285, + 0.8265645374416, 0.8283292196088257, 0.829666291102775, + 0.8307144686362666, 0.7905273437499996, 0.8178712053954738, + 0.828116983756619, 0.833508948940494, 0.8368403871552892, + 0.839104213210105, 0.840743186196171, 0.84198481438049, + 0.8429580531563676, 0.803619384765625, 0.829338573944648, + 0.8389591907548646, 0.84401876783902, 0.84714369697889, + 0.8492667010581667, 0.850803474598719, 0.851967542858308, + 0.8528799045949524, 0.8145294189453126, 0.838881732845347, + 0.847979024541911, 0.852760894015685, 0.8557134656773457, + 0.8577190131799202, 0.85917058278431, 0.860270010472127, + 0.861131648404582, 0.823802947998047, 0.846984756807511, + 0.855635653643743, 0.860180994825685, 0.86298688573253, + 0.864892525675245, 0.866271647085603, 0.867316125625004, + 0.8681346531755114 + ]) + + # > bt=c() + # > for(i in as.single(1:10)) { + # + for(k in as.single(2:10)) { + # + bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); + # + print(c(i+1, k*i,(1/k))) + # + } + # + } + + binom_testp1 = np.array([ + 0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551, + 0.2635138663069203, 0.2636951804161073, 0.2638162407564354, + 0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875, + 0.4295746560000003, 0.43473045988554, 0.4383309503172684, + 0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875, + 0.4927602499618962, 0.5096031427383425, 0.5189636628480, + 0.5249280070771274, 0.5290623300865124, 0.5320974248125793, + 0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808, + 0.5669248746708034, 0.576436455045805, 0.5824538812831795, + 0.5866053321547824, 0.589642781414643, 0.5919618019300193, + 0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209, + 0.617303847446822, 0.623172512167948, 0.627208862156123, + 0.6301556891501057, 0.632401894928977, 0.6341708982290303, + 0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579, + 0.65392850011132, 0.657816519817211, 0.660650782947676, + 0.662808780346311, 0.6645068560246006, 0.7905273437499996, + 0.6478843304312477, 0.6640468318879372, 0.6727589686071775, + 0.6782129857784873, 0.681950188903695, 0.684671508668418, + 0.686741824999918, 0.688369886732168, 0.803619384765625, + 0.668716055304315, 0.684360013879534, 0.6927642396829181, + 0.6980155964704895, 0.701609591890657, 0.7042244320992127, + 0.7062125081341817, 0.707775152962577, 0.8145294189453126, + 0.686243374488305, 0.7013873696358975, 0.709501223328243, + 0.714563595144314, 0.718024953392931, 0.7205416252126137, + 0.722454130389843, 0.723956813292035, 0.823802947998047, + 0.701255953767043, 0.715928221686075, 0.723772209289768, + 0.7286603031173616, 0.7319999279787631, 0.7344267920995765, + 0.736270323773157, 0.737718376096348 + ]) + + res4_p1 = [stats.binomtest(v+1, v*k, 1./k).pvalue + for v in range(1, 11) for k in range(2, 11)] + res4_m1 = [stats.binomtest(v-1, v*k, 1./k).pvalue + for v in range(1, 11) for k in range(2, 11)] + + assert_almost_equal(res4_p1, binom_testp1, decimal=13) + assert_almost_equal(res4_m1, binom_testm1, decimal=13) + + +class TestTrim: + # test trim functions + def test_trim1(self): + a = np.arange(11) + assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10)) + assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9)) + assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')), + np.arange(2, 11)) + assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')), + np.arange(3, 11)) + assert_equal(stats.trim1(a, 1.0), []) + assert_equal(stats.trim1(a, 1.0, tail='left'), []) + + # empty input + assert_equal(stats.trim1([], 0.1), []) + assert_equal(stats.trim1([], 3/11., tail='left'), []) + assert_equal(stats.trim1([], 4/6.), []) + + # test axis + a = np.arange(24).reshape(6, 4) + ref = np.arange(4, 24).reshape(5, 4) # first row trimmed + + axis = 0 + trimmed = stats.trim1(a, 0.2, tail='left', axis=axis) + assert_equal(np.sort(trimmed, axis=axis), ref) + + axis = 1 + trimmed = stats.trim1(a.T, 0.2, tail='left', axis=axis) + assert_equal(np.sort(trimmed, axis=axis), ref.T) + + def test_trimboth(self): + a = np.arange(11) + assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8)) + assert_equal(np.sort(stats.trimboth(a, 0.2)), + np.array([2, 3, 4, 5, 6, 7, 8])) + assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)), + np.arange(4, 20).reshape(4, 4)) + assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T, + 2/6.)), + np.array([[2, 8, 14, 20], [3, 9, 15, 21]])) + assert_raises(ValueError, stats.trimboth, + np.arange(24).reshape(4, 6).T, 4/6.) + + # empty input + assert_equal(stats.trimboth([], 0.1), []) + assert_equal(stats.trimboth([], 3/11.), []) + assert_equal(stats.trimboth([], 4/6.), []) + + def test_trim_mean(self): + # don't use pre-sorted arrays + a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6]) + idx = np.array([3, 5, 0, 1, 2, 4]) + a2 = np.arange(24).reshape(6, 4)[idx, :] + a3 = np.arange(24).reshape(6, 4, order='F')[idx, :] + assert_equal(stats.trim_mean(a3, 2/6.), + np.array([2.5, 8.5, 14.5, 20.5])) + assert_equal(stats.trim_mean(a2, 2/6.), + np.array([10., 11., 12., 13.])) + idx4 = np.array([1, 0, 3, 2]) + a4 = np.arange(24).reshape(4, 6)[idx4, :] + assert_equal(stats.trim_mean(a4, 2/6.), + np.array([9., 10., 11., 12., 13., 14.])) + # shuffled arange(24) as array_like + a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23, + 20, 2, 14, 4, 13, 8, 3] + assert_equal(stats.trim_mean(a, 2/6.), 11.5) + assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5) + + # check axis argument + np.random.seed(1234) + a = np.random.randint(20, size=(5, 6, 4, 7)) + for axis in [0, 1, 2, 3, -1]: + res1 = stats.trim_mean(a, 2/6., axis=axis) + res2 = stats.trim_mean(np.moveaxis(a, axis, 0), 2/6.) + assert_equal(res1, res2) + + res1 = stats.trim_mean(a, 2/6., axis=None) + res2 = stats.trim_mean(a.ravel(), 2/6.) + assert_equal(res1, res2) + + assert_raises(ValueError, stats.trim_mean, a, 0.6) + + # empty input + assert_equal(stats.trim_mean([], 0.0), np.nan) + assert_equal(stats.trim_mean([], 0.6), np.nan) + + +class TestSigmaClip: + def test_sigmaclip1(self): + a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5))) + fact = 4 # default + c, low, upp = stats.sigmaclip(a) + assert_(c.min() > low) + assert_(c.max() < upp) + assert_equal(low, c.mean() - fact*c.std()) + assert_equal(upp, c.mean() + fact*c.std()) + assert_equal(c.size, a.size) + + def test_sigmaclip2(self): + a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5))) + fact = 1.5 + c, low, upp = stats.sigmaclip(a, fact, fact) + assert_(c.min() > low) + assert_(c.max() < upp) + assert_equal(low, c.mean() - fact*c.std()) + assert_equal(upp, c.mean() + fact*c.std()) + assert_equal(c.size, 4) + assert_equal(a.size, 36) # check original array unchanged + + def test_sigmaclip3(self): + a = np.concatenate((np.linspace(9.5, 10.5, 11), + np.linspace(-100, -50, 3))) + fact = 1.8 + c, low, upp = stats.sigmaclip(a, fact, fact) + assert_(c.min() > low) + assert_(c.max() < upp) + assert_equal(low, c.mean() - fact*c.std()) + assert_equal(upp, c.mean() + fact*c.std()) + assert_equal(c, np.linspace(9.5, 10.5, 11)) + + def test_sigmaclip_result_attributes(self): + a = np.concatenate((np.linspace(9.5, 10.5, 11), + np.linspace(-100, -50, 3))) + fact = 1.8 + res = stats.sigmaclip(a, fact, fact) + attributes = ('clipped', 'lower', 'upper') + check_named_results(res, attributes) + + def test_std_zero(self): + # regression test #8632 + x = np.ones(10) + assert_equal(stats.sigmaclip(x)[0], x) + + +class TestAlexanderGovern: + def test_compare_dtypes(self): + args = [[13, 13, 13, 13, 13, 13, 13, 12, 12], + [14, 13, 12, 12, 12, 12, 12, 11, 11], + [14, 14, 13, 13, 13, 13, 13, 12, 12], + [15, 14, 13, 13, 13, 12, 12, 12, 11]] + args_int16 = np.array(args, dtype=np.int16) + args_int32 = np.array(args, dtype=np.int32) + args_uint8 = np.array(args, dtype=np.uint8) + args_float64 = np.array(args, dtype=np.float64) + + res_int16 = stats.alexandergovern(*args_int16) + res_int32 = stats.alexandergovern(*args_int32) + res_unit8 = stats.alexandergovern(*args_uint8) + res_float64 = stats.alexandergovern(*args_float64) + + assert (res_int16.pvalue == res_int32.pvalue == + res_unit8.pvalue == res_float64.pvalue) + assert (res_int16.statistic == res_int32.statistic == + res_unit8.statistic == res_float64.statistic) + + def test_bad_inputs(self): + # input array is of size zero + with assert_raises(ValueError, match="Input sample size must be" + " greater than one."): + stats.alexandergovern([1, 2], []) + # input is a singular non list element + with assert_raises(ValueError, match="Input sample size must be" + " greater than one."): + stats.alexandergovern([1, 2], 2) + # input list is of size 1 + with assert_raises(ValueError, match="Input sample size must be" + " greater than one."): + stats.alexandergovern([1, 2], [2]) + # inputs are not finite (infinity) + with assert_raises(ValueError, match="Input samples must be finite."): + stats.alexandergovern([1, 2], [np.inf, np.inf]) + + def test_compare_r(self): + ''' + Data generated in R with + > set.seed(1) + > library("onewaytests") + > library("tibble") + > y <- c(rnorm(40, sd=10), + + rnorm(30, sd=15), + + rnorm(20, sd=20)) + > x <- c(rep("one", times=40), + + rep("two", times=30), + + rep("eight", times=20)) + > x <- factor(x) + > ag.test(y ~ x, tibble(y,x)) + + Alexander-Govern Test (alpha = 0.05) + ------------------------------------------------------------- + data : y and x + + statistic : 1.359941 + parameter : 2 + p.value : 0.5066321 + + Result : Difference is not statistically significant. + ------------------------------------------------------------- + Example adapted from: + https://eval-serv2.metpsy.uni-jena.de/wiki-metheval-hp/index.php/R_FUN_Alexander-Govern + + ''' + one = [-6.264538107423324, 1.8364332422208225, -8.356286124100471, + 15.952808021377916, 3.295077718153605, -8.204683841180152, + 4.874290524284853, 7.383247051292173, 5.757813516534923, + -3.0538838715635603, 15.11781168450848, 3.898432364114311, + -6.2124058054180376, -22.146998871774997, 11.249309181431082, + -0.4493360901523085, -0.16190263098946087, 9.438362106852992, + 8.212211950980885, 5.939013212175088, 9.189773716082183, + 7.821363007310671, 0.745649833651906, -19.89351695863373, + 6.198257478947102, -0.5612873952900078, -1.557955067053293, + -14.707523838992744, -4.781500551086204, 4.179415601997024, + 13.58679551529044, -1.0278772734299553, 3.876716115593691, + -0.5380504058290512, -13.770595568286065, -4.149945632996798, + -3.942899537103493, -0.5931339671118566, 11.000253719838831, + 7.631757484575442] + + two = [-2.4678539438038034, -3.8004252020476135, 10.454450631071062, + 8.34994798010486, -10.331335418242798, -10.612427354431794, + 5.468729432052455, 11.527993867731237, -1.6851931822534207, + 13.216615896813222, 5.971588205506021, -9.180395898761569, + 5.116795371366372, -16.94044644121189, 21.495355525515556, + 29.7059984775879, -5.508322146997636, -15.662019394747961, + 8.545794411636193, -2.0258190582123654, 36.024266407571645, + -0.5886000409975387, 10.346090436761651, 0.4200323817099909, + -11.14909813323608, 2.8318844927151434, -27.074379433365568, + 21.98332292344329, 2.2988000731784655, 32.58917505543229] + + eight = [9.510190577993251, -14.198928618436291, 12.214527069781099, + -18.68195263288503, -25.07266800478204, 5.828924710349257, + -8.86583746436866, 0.02210703263248262, 1.4868264830332811, + -11.79041892376144, -11.37337465637004, -2.7035723024766414, + 23.56173993146409, -30.47133600859524, 11.878923752568431, + 6.659007424270365, 21.261996745527256, -6.083678472686013, + 7.400376198325763, 5.341975815444621] + soln = stats.alexandergovern(one, two, eight) + assert_allclose(soln.statistic, 1.3599405447999450836) + assert_allclose(soln.pvalue, 0.50663205309676440091) + + def test_compare_scholar(self): + ''' + Data taken from 'The Modification and Evaluation of the + Alexander-Govern Test in Terms of Power' by Kingsley Ochuko, T., + Abdullah, S., Binti Zain, Z., & Soaad Syed Yahaya, S. (2015). + ''' + young = [482.43, 484.36, 488.84, 495.15, 495.24, 502.69, 504.62, + 518.29, 519.1, 524.1, 524.12, 531.18, 548.42, 572.1, 584.68, + 609.09, 609.53, 666.63, 676.4] + middle = [335.59, 338.43, 353.54, 404.27, 437.5, 469.01, 485.85, + 487.3, 493.08, 494.31, 499.1, 886.41] + old = [519.01, 528.5, 530.23, 536.03, 538.56, 538.83, 557.24, 558.61, + 558.95, 565.43, 586.39, 594.69, 629.22, 645.69, 691.84] + soln = stats.alexandergovern(young, middle, old) + assert_allclose(soln.statistic, 5.3237, atol=1e-3) + assert_allclose(soln.pvalue, 0.06982, atol=1e-4) + + # verify with ag.test in r + ''' + > library("onewaytests") + > library("tibble") + > young <- c(482.43, 484.36, 488.84, 495.15, 495.24, 502.69, 504.62, + + 518.29, 519.1, 524.1, 524.12, 531.18, 548.42, 572.1, + + 584.68, 609.09, 609.53, 666.63, 676.4) + > middle <- c(335.59, 338.43, 353.54, 404.27, 437.5, 469.01, 485.85, + + 487.3, 493.08, 494.31, 499.1, 886.41) + > old <- c(519.01, 528.5, 530.23, 536.03, 538.56, 538.83, 557.24, + + 558.61, 558.95, 565.43, 586.39, 594.69, 629.22, + + 645.69, 691.84) + > young_fct <- c(rep("young", times=19)) + > middle_fct <-c(rep("middle", times=12)) + > old_fct <- c(rep("old", times=15)) + > ag.test(a ~ b, tibble(a=c(young, middle, old), b=factor(c(young_fct, + + middle_fct, old_fct)))) + + Alexander-Govern Test (alpha = 0.05) + ------------------------------------------------------------- + data : a and b + + statistic : 5.324629 + parameter : 2 + p.value : 0.06978651 + + Result : Difference is not statistically significant. + ------------------------------------------------------------- + + ''' + assert_allclose(soln.statistic, 5.324629) + assert_allclose(soln.pvalue, 0.06978651) + + def test_compare_scholar3(self): + ''' + Data taken from 'Robustness And Comparative Power Of WelchAspin, + Alexander-Govern And Yuen Tests Under Non-Normality And Variance + Heteroscedasticity', by Ayed A. Almoied. 2017. Page 34-37. + https://digitalcommons.wayne.edu/cgi/viewcontent.cgi?article=2775&context=oa_dissertations + ''' + x1 = [-1.77559, -1.4113, -0.69457, -0.54148, -0.18808, -0.07152, + 0.04696, 0.051183, 0.148695, 0.168052, 0.422561, 0.458555, + 0.616123, 0.709968, 0.839956, 0.857226, 0.929159, 0.981442, + 0.999554, 1.642958] + x2 = [-1.47973, -1.2722, -0.91914, -0.80916, -0.75977, -0.72253, + -0.3601, -0.33273, -0.28859, -0.09637, -0.08969, -0.01824, + 0.260131, 0.289278, 0.518254, 0.683003, 0.877618, 1.172475, + 1.33964, 1.576766] + soln = stats.alexandergovern(x1, x2) + assert_allclose(soln.statistic, 0.713526, atol=1e-5) + assert_allclose(soln.pvalue, 0.398276, atol=1e-5) + + ''' + tested in ag.test in R: + > library("onewaytests") + > library("tibble") + > x1 <- c(-1.77559, -1.4113, -0.69457, -0.54148, -0.18808, -0.07152, + + 0.04696, 0.051183, 0.148695, 0.168052, 0.422561, 0.458555, + + 0.616123, 0.709968, 0.839956, 0.857226, 0.929159, 0.981442, + + 0.999554, 1.642958) + > x2 <- c(-1.47973, -1.2722, -0.91914, -0.80916, -0.75977, -0.72253, + + -0.3601, -0.33273, -0.28859, -0.09637, -0.08969, -0.01824, + + 0.260131, 0.289278, 0.518254, 0.683003, 0.877618, 1.172475, + + 1.33964, 1.576766) + > x1_fact <- c(rep("x1", times=20)) + > x2_fact <- c(rep("x2", times=20)) + > a <- c(x1, x2) + > b <- factor(c(x1_fact, x2_fact)) + > ag.test(a ~ b, tibble(a, b)) + Alexander-Govern Test (alpha = 0.05) + ------------------------------------------------------------- + data : a and b + + statistic : 0.7135182 + parameter : 1 + p.value : 0.3982783 + + Result : Difference is not statistically significant. + ------------------------------------------------------------- + ''' + assert_allclose(soln.statistic, 0.7135182) + assert_allclose(soln.pvalue, 0.3982783) + + def test_nan_policy_propogate(self): + args = [[1, 2, 3, 4], [1, np.nan]] + # default nan_policy is 'propagate' + res = stats.alexandergovern(*args) + assert_equal(res.pvalue, np.nan) + assert_equal(res.statistic, np.nan) + + def test_nan_policy_raise(self): + args = [[1, 2, 3, 4], [1, np.nan]] + with assert_raises(ValueError, match="The input contains nan values"): + stats.alexandergovern(*args, nan_policy='raise') + + def test_nan_policy_omit(self): + args_nan = [[1, 2, 3, np.nan, 4], [1, np.nan, 19, 25]] + args_no_nan = [[1, 2, 3, 4], [1, 19, 25]] + res_nan = stats.alexandergovern(*args_nan, nan_policy='omit') + res_no_nan = stats.alexandergovern(*args_no_nan) + assert_equal(res_nan.pvalue, res_no_nan.pvalue) + assert_equal(res_nan.statistic, res_no_nan.statistic) + + def test_constant_input(self): + # Zero variance input, consistent with `stats.pearsonr` + msg = "An input array is constant; the statistic is not defined." + with pytest.warns(stats.ConstantInputWarning, match=msg): + res = stats.alexandergovern([0.667, 0.667, 0.667], + [0.123, 0.456, 0.789]) + assert_equal(res.statistic, np.nan) + assert_equal(res.pvalue, np.nan) + + +class TestFOneWay: + + def test_trivial(self): + # A trivial test of stats.f_oneway, with F=0. + F, p = stats.f_oneway([0, 2], [0, 2]) + assert_equal(F, 0.0) + assert_equal(p, 1.0) + + def test_basic(self): + # Despite being a floating point calculation, this data should + # result in F being exactly 2.0. + F, p = stats.f_oneway([0, 2], [2, 4]) + assert_equal(F, 2.0) + assert_allclose(p, 1 - np.sqrt(0.5), rtol=1e-14) + + def test_known_exact(self): + # Another trivial dataset for which the exact F and p can be + # calculated. + F, p = stats.f_oneway([2], [2], [2, 3, 4]) + # The use of assert_equal might be too optimistic, but the calculation + # in this case is trivial enough that it is likely to go through with + # no loss of precision. + assert_equal(F, 3/5) + assert_equal(p, 5/8) + + def test_large_integer_array(self): + a = np.array([655, 788], dtype=np.uint16) + b = np.array([789, 772], dtype=np.uint16) + F, p = stats.f_oneway(a, b) + # The expected value was verified by computing it with mpmath with + # 40 digits of precision. + assert_allclose(F, 0.77450216931805540, rtol=1e-14) + + def test_result_attributes(self): + a = np.array([655, 788], dtype=np.uint16) + b = np.array([789, 772], dtype=np.uint16) + res = stats.f_oneway(a, b) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_nist(self): + # These are the nist ANOVA files. They can be found at: + # https://www.itl.nist.gov/div898/strd/anova/anova.html + filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat', + 'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat', + 'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat'] + + for test_case in filenames: + rtol = 1e-7 + fname = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'data/nist_anova', test_case)) + with open(fname) as f: + content = f.read().split('\n') + certified = [line.split() for line in content[40:48] + if line.strip()] + dataf = np.loadtxt(fname, skiprows=60) + y, x = dataf.T + y = y.astype(int) + caty = np.unique(y) + f = float(certified[0][-1]) + + xlist = [x[y == i] for i in caty] + res = stats.f_oneway(*xlist) + + # With the hard test cases we relax the tolerance a bit. + hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat') + if test_case in hard_tc: + rtol = 1e-4 + + assert_allclose(res[0], f, rtol=rtol, + err_msg='Failing testcase: %s' % test_case) + + @pytest.mark.parametrize("a, b, expected", [ + (np.array([42, 42, 42]), np.array([7, 7, 7]), (np.inf, 0)), + (np.array([42, 42, 42]), np.array([42, 42, 42]), (np.nan, np.nan)) + ]) + def test_constant_input(self, a, b, expected): + # For more details, look on https://github.com/scipy/scipy/issues/11669 + msg = "Each of the input arrays is constant;" + with pytest.warns(stats.ConstantInputWarning, match=msg): + f, p = stats.f_oneway(a, b) + assert f, p == expected + + @pytest.mark.parametrize('axis', [-2, -1, 0, 1]) + def test_2d_inputs(self, axis): + a = np.array([[1, 4, 3, 3], + [2, 5, 3, 3], + [3, 6, 3, 3], + [2, 3, 3, 3], + [1, 4, 3, 3]]) + b = np.array([[3, 1, 5, 3], + [4, 6, 5, 3], + [4, 3, 5, 3], + [1, 5, 5, 3], + [5, 5, 5, 3], + [2, 3, 5, 3], + [8, 2, 5, 3], + [2, 2, 5, 3]]) + c = np.array([[4, 3, 4, 3], + [4, 2, 4, 3], + [5, 4, 4, 3], + [5, 4, 4, 3]]) + + if axis in [-1, 1]: + a = a.T + b = b.T + c = c.T + take_axis = 0 + else: + take_axis = 1 + + warn_msg = "Each of the input arrays is constant;" + with pytest.warns(stats.ConstantInputWarning, match=warn_msg): + f, p = stats.f_oneway(a, b, c, axis=axis) + + # Verify that the result computed with the 2d arrays matches + # the result of calling f_oneway individually on each slice. + for j in [0, 1]: + fj, pj = stats.f_oneway(np.take(a, j, take_axis), + np.take(b, j, take_axis), + np.take(c, j, take_axis)) + assert_allclose(f[j], fj, rtol=1e-14) + assert_allclose(p[j], pj, rtol=1e-14) + for j in [2, 3]: + with pytest.warns(stats.ConstantInputWarning, match=warn_msg): + fj, pj = stats.f_oneway(np.take(a, j, take_axis), + np.take(b, j, take_axis), + np.take(c, j, take_axis)) + assert_equal(f[j], fj) + assert_equal(p[j], pj) + + def test_3d_inputs(self): + # Some 3-d arrays. (There is nothing special about the values.) + a = 1/np.arange(1.0, 4*5*7 + 1).reshape(4, 5, 7) + b = 2/np.arange(1.0, 4*8*7 + 1).reshape(4, 8, 7) + c = np.cos(1/np.arange(1.0, 4*4*7 + 1).reshape(4, 4, 7)) + + f, p = stats.f_oneway(a, b, c, axis=1) + + assert f.shape == (4, 7) + assert p.shape == (4, 7) + + for i in range(a.shape[0]): + for j in range(a.shape[2]): + fij, pij = stats.f_oneway(a[i, :, j], b[i, :, j], c[i, :, j]) + assert_allclose(fij, f[i, j]) + assert_allclose(pij, p[i, j]) + + def test_length0_1d_error(self): + # Require at least one value in each group. + msg = 'at least one input has length 0' + with pytest.warns(stats.DegenerateDataWarning, match=msg): + result = stats.f_oneway([1, 2, 3], [], [4, 5, 6, 7]) + assert_equal(result, (np.nan, np.nan)) + + def test_length0_2d_error(self): + msg = 'at least one input has length 0' + with pytest.warns(stats.DegenerateDataWarning, match=msg): + ncols = 3 + a = np.ones((4, ncols)) + b = np.ones((0, ncols)) + c = np.ones((5, ncols)) + f, p = stats.f_oneway(a, b, c) + nans = np.full((ncols,), fill_value=np.nan) + assert_equal(f, nans) + assert_equal(p, nans) + + def test_all_length_one(self): + msg = 'all input arrays have length 1.' + with pytest.warns(stats.DegenerateDataWarning, match=msg): + result = stats.f_oneway([10], [11], [12], [13]) + assert_equal(result, (np.nan, np.nan)) + + @pytest.mark.parametrize('args', [(), ([1, 2, 3],)]) + def test_too_few_inputs(self, args): + with assert_raises(TypeError): + stats.f_oneway(*args) + + def test_axis_error(self): + a = np.ones((3, 4)) + b = np.ones((5, 4)) + with assert_raises(AxisError): + stats.f_oneway(a, b, axis=2) + + def test_bad_shapes(self): + a = np.ones((3, 4)) + b = np.ones((5, 4)) + with assert_raises(ValueError): + stats.f_oneway(a, b, axis=1) + + +class TestKruskal: + def test_simple(self): + x = [1] + y = [2] + h, p = stats.kruskal(x, y) + assert_equal(h, 1.0) + assert_approx_equal(p, stats.distributions.chi2.sf(h, 1)) + h, p = stats.kruskal(np.array(x), np.array(y)) + assert_equal(h, 1.0) + assert_approx_equal(p, stats.distributions.chi2.sf(h, 1)) + + def test_basic(self): + x = [1, 3, 5, 7, 9] + y = [2, 4, 6, 8, 10] + h, p = stats.kruskal(x, y) + assert_approx_equal(h, 3./11, significant=10) + assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1)) + h, p = stats.kruskal(np.array(x), np.array(y)) + assert_approx_equal(h, 3./11, significant=10) + assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1)) + + def test_simple_tie(self): + x = [1] + y = [1, 2] + h_uncorr = 1.5**2 + 2*2.25**2 - 12 + corr = 0.75 + expected = h_uncorr / corr # 0.5 + h, p = stats.kruskal(x, y) + # Since the expression is simple and the exact answer is 0.5, it + # should be safe to use assert_equal(). + assert_equal(h, expected) + + def test_another_tie(self): + x = [1, 1, 1, 2] + y = [2, 2, 2, 2] + h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9 + corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8) + expected = h_uncorr / corr + h, p = stats.kruskal(x, y) + assert_approx_equal(h, expected) + + def test_three_groups(self): + # A test of stats.kruskal with three groups, with ties. + x = [1, 1, 1] + y = [2, 2, 2] + z = [2, 2] + h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0 + corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8) + expected = h_uncorr / corr # 7.0 + h, p = stats.kruskal(x, y, z) + assert_approx_equal(h, expected) + assert_approx_equal(p, stats.distributions.chi2.sf(h, 2)) + + def test_empty(self): + # A test of stats.kruskal with three groups, with ties. + x = [1, 1, 1] + y = [2, 2, 2] + z = [] + assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan)) + + def test_kruskal_result_attributes(self): + x = [1, 3, 5, 7, 9] + y = [2, 4, 6, 8, 10] + res = stats.kruskal(x, y) + attributes = ('statistic', 'pvalue') + check_named_results(res, attributes) + + def test_nan_policy(self): + x = np.arange(10.) + x[9] = np.nan + assert_equal(stats.kruskal(x, x), (np.nan, np.nan)) + assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0)) + assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise') + assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar') + + def test_large_no_samples(self): + # Test to see if large samples are handled correctly. + n = 50000 + x = np.random.randn(n) + y = np.random.randn(n) + 50 + h, p = stats.kruskal(x, y) + expected = 0 + assert_approx_equal(p, expected) + + +class TestCombinePvalues: + + def test_fisher(self): + # Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example + xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher') + assert_approx_equal(p, 0.02156, significant=4) + + def test_stouffer(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer') + assert_approx_equal(p, 0.01651, significant=4) + + def test_stouffer2(self): + Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer') + assert_approx_equal(p, 0.5, significant=4) + + def test_weighted_stouffer(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer', + weights=np.ones(3)) + assert_approx_equal(p, 0.01651, significant=4) + + def test_weighted_stouffer2(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer', + weights=np.array((1, 4, 9))) + assert_approx_equal(p, 0.1464, significant=4) + + def test_pearson(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='pearson') + assert_approx_equal(p, 0.02213, significant=4) + + def test_tippett(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='tippett') + assert_approx_equal(p, 0.0297, significant=4) + + def test_mudholkar_george(self): + Z, p = stats.combine_pvalues([.1, .1, .1], method='mudholkar_george') + assert_approx_equal(p, 0.019462, significant=4) + + def test_mudholkar_george_equal_fisher_pearson_average(self): + Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george') + Z_f, p_f = stats.combine_pvalues([.01, .2, .3], method='fisher') + Z_p, p_p = stats.combine_pvalues([.01, .2, .3], method='pearson') + assert_approx_equal(0.5 * (Z_f+Z_p), Z, significant=4) + + methods = ["fisher", "pearson", "tippett", "stouffer", "mudholkar_george"] + + @pytest.mark.parametrize("variant", ["single", "all", "random"]) + @pytest.mark.parametrize("method", methods) + def test_monotonicity(self, variant, method): + # Test that result increases monotonically with respect to input. + m, n = 10, 7 + rng = np.random.default_rng(278448169958891062669391462690811630763) + + # `pvaluess` is an m × n array of p values. Each row corresponds to + # a set of p values to be combined with p values increasing + # monotonically down one column (single), simultaneously down each + # column (all), or independently down each column (random). + if variant == "single": + pvaluess = np.full((m, n), rng.random(n)) + pvaluess[:, 0] = np.linspace(0.1, 0.9, m) + elif variant == "all": + pvaluess = np.full((n, m), np.linspace(0.1, 0.9, m)).T + elif variant == "random": + pvaluess = np.sort(rng.uniform(0, 1, size=(m, n)), axis=0) + + combined_pvalues = [ + stats.combine_pvalues(pvalues, method=method)[1] + for pvalues in pvaluess + ] + assert np.all(np.diff(combined_pvalues) >= 0) + + @pytest.mark.parametrize("method", methods) + def test_result(self, method): + res = stats.combine_pvalues([.01, .2, .3], method=method) + assert_equal((res.statistic, res.pvalue), res) + + +class TestCdfDistanceValidation: + """ + Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors + for bad inputs. + """ + + def test_distinct_value_and_weight_lengths(self): + # When the number of weights does not match the number of values, + # a ValueError should be raised. + assert_raises(ValueError, stats.wasserstein_distance, + [1], [2], [4], [3, 1]) + assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0]) + + def test_zero_weight(self): + # When a distribution is given zero weight, a ValueError should be + # raised. + assert_raises(ValueError, stats.wasserstein_distance, + [0, 1], [2], [0, 0]) + assert_raises(ValueError, stats.wasserstein_distance, + [0, 1], [2], [3, 1], [0]) + + def test_negative_weights(self): + # A ValueError should be raised if there are any negative weights. + assert_raises(ValueError, stats.wasserstein_distance, + [0, 1], [2, 2], [1, 1], [3, -1]) + + def test_empty_distribution(self): + # A ValueError should be raised when trying to measure the distance + # between something and nothing. + assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2]) + assert_raises(ValueError, stats.wasserstein_distance, [1], []) + + def test_inf_weight(self): + # An inf weight is not valid. + assert_raises(ValueError, stats.wasserstein_distance, + [1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1]) + + +class TestWassersteinDistanceND: + """ Tests for wasserstein_distance_nd() output values. + """ + + def test_published_values(self): + # Compare against published values and manually computed results. + # The values and computed result are posted at James D. McCaffrey's blog, + # https://jamesmccaffrey.wordpress.com/2018/03/05/earth-mover-distance + # -wasserstein-metric-example-calculation/ + u = [(1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), + (4,2), (6,1), (6,1)] + v = [(2,1), (2,1), (3,2), (3,2), (3,2), (5,1), (5,1), (5,1), (5,1), (5,1), + (5,1), (5,1), (7,1)] + + res = stats.wasserstein_distance_nd(u, v) + # In original post, the author kept two decimal places for ease of calculation. + # This test uses the more precise value of distance to get the precise results. + # For comparison, please see the table and figure in the original blog post. + flow = np.array([2., 3., 5., 1., 1., 1.]) + dist = np.array([1.00, 5**0.5, 4.00, 2**0.5, 1.00, 1.00]) + ref = np.sum(flow * dist)/np.sum(flow) + assert_allclose(res, ref) + + @pytest.mark.parametrize('n_value', (4, 15, 35)) + @pytest.mark.parametrize('ndim', (3, 4, 7)) + @pytest.mark.parametrize('max_repeats', (5, 10)) + def test_same_distribution_nD(self, ndim, n_value, max_repeats): + # Any distribution moved to itself should have a Wasserstein distance + # of zero. + rng = np.random.default_rng(363836384995579937222333) + repeats = rng.integers(1, max_repeats, size=n_value, dtype=int) + + u_values = rng.random(size=(n_value, ndim)) + v_values = np.repeat(u_values, repeats, axis=0) + v_weights = rng.random(np.sum(repeats)) + range_repeat = np.repeat(np.arange(len(repeats)), repeats) + u_weights = np.bincount(range_repeat, weights=v_weights) + index = rng.permutation(len(v_weights)) + v_values, v_weights = v_values[index], v_weights[index] + + res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + assert_allclose(res, 0, atol=1e-15) + + @pytest.mark.parametrize('nu', (8, 9, 38)) + @pytest.mark.parametrize('nv', (8, 12, 17)) + @pytest.mark.parametrize('ndim', (3, 5, 23)) + def test_collapse_nD(self, nu, nv, ndim): + # test collapse for n dimensional values + # Collapsing a n-D distribution to a point distribution at zero + # is equivalent to taking the average of the norm of data. + rng = np.random.default_rng(38573488467338826109) + u_values = rng.random(size=(nu, ndim)) + v_values = np.zeros((nv, ndim)) + u_weights = rng.random(size=nu) + v_weights = rng.random(size=nv) + ref = np.average(np.linalg.norm(u_values, axis=1), weights=u_weights) + res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + assert_allclose(res, ref) + + @pytest.mark.parametrize('nu', (8, 16, 32)) + @pytest.mark.parametrize('nv', (8, 16, 32)) + @pytest.mark.parametrize('ndim', (1, 2, 6)) + def test_zero_weight_nD(self, nu, nv, ndim): + # Values with zero weight have no impact on the Wasserstein distance. + rng = np.random.default_rng(38573488467338826109) + u_values = rng.random(size=(nu, ndim)) + v_values = rng.random(size=(nv, ndim)) + u_weights = rng.random(size=nu) + v_weights = rng.random(size=nv) + ref = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + + add_row, nrows = rng.integers(0, nu, size=2) + add_value = rng.random(size=(nrows, ndim)) + u_values = np.insert(u_values, add_row, add_value, axis=0) + u_weights = np.insert(u_weights, add_row, np.zeros(nrows), axis=0) + res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + assert_allclose(res, ref) + + def test_inf_values(self): + # Inf values can lead to an inf distance or trigger a RuntimeWarning + # (and return NaN) if the distance is undefined. + uv, vv, uw = [[1, 1], [2, 1]], [[np.inf, -np.inf]], [1, 1] + distance = stats.wasserstein_distance_nd(uv, vv, uw) + assert_equal(distance, np.inf) + with np.errstate(invalid='ignore'): + uv, vv = [[np.inf, np.inf]], [[np.inf, -np.inf]] + distance = stats.wasserstein_distance_nd(uv, vv) + assert_equal(distance, np.nan) + + @pytest.mark.parametrize('nu', (10, 15, 20)) + @pytest.mark.parametrize('nv', (10, 15, 20)) + @pytest.mark.parametrize('ndim', (1, 3, 5)) + def test_multi_dim_nD(self, nu, nv, ndim): + # Adding dimension on distributions do not affect the result + rng = np.random.default_rng(2736495738494849509) + u_values = rng.random(size=(nu, ndim)) + v_values = rng.random(size=(nv, ndim)) + u_weights = rng.random(size=nu) + v_weights = rng.random(size=nv) + ref = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + + add_dim = rng.integers(0, ndim) + add_value = rng.random() + + u_values = np.insert(u_values, add_dim, add_value, axis=1) + v_values = np.insert(v_values, add_dim, add_value, axis=1) + res = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + assert_allclose(res, ref) + + @pytest.mark.parametrize('nu', (7, 13, 19)) + @pytest.mark.parametrize('nv', (7, 13, 19)) + @pytest.mark.parametrize('ndim', (2, 4, 7)) + def test_orthogonal_nD(self, nu, nv, ndim): + # orthogonal transformations do not affect the result of the + # wasserstein_distance + rng = np.random.default_rng(34746837464536) + u_values = rng.random(size=(nu, ndim)) + v_values = rng.random(size=(nv, ndim)) + u_weights = rng.random(size=nu) + v_weights = rng.random(size=nv) + ref = stats.wasserstein_distance_nd(u_values, v_values, u_weights, v_weights) + + dist = stats.ortho_group(ndim) + transform = dist.rvs(random_state=rng) + shift = rng.random(size=ndim) + res = stats.wasserstein_distance_nd(u_values @ transform + shift, + v_values @ transform + shift, + u_weights, v_weights) + assert_allclose(res, ref) + + def test_error_code(self): + rng = np.random.default_rng(52473644737485644836320101) + with pytest.raises(ValueError, match='Invalid input values. The inputs'): + u_values = rng.random(size=(4, 10, 15)) + v_values = rng.random(size=(6, 2, 7)) + _ = stats.wasserstein_distance_nd(u_values, v_values) + with pytest.raises(ValueError, match='Invalid input values. Dimensions'): + u_values = rng.random(size=(15,)) + v_values = rng.random(size=(3, 15)) + _ = stats.wasserstein_distance_nd(u_values, v_values) + with pytest.raises(ValueError, + match='Invalid input values. If two-dimensional'): + u_values = rng.random(size=(2, 10)) + v_values = rng.random(size=(2, 2)) + _ = stats.wasserstein_distance_nd(u_values, v_values) + + @pytest.mark.parametrize('u_size', [1, 10, 300]) + @pytest.mark.parametrize('v_size', [1, 10, 300]) + def test_optimization_vs_analytical(self, u_size, v_size): + rng = np.random.default_rng(45634745675) + # Test when u_weights = None, v_weights = None + u_values = rng.random(size=(u_size, 1)) + v_values = rng.random(size=(v_size, 1)) + u_values_flat = u_values.ravel() + v_values_flat = v_values.ravel() + # These three calculations are done using different backends + # but they must be equal + d1 = stats.wasserstein_distance(u_values_flat, v_values_flat) + d2 = stats.wasserstein_distance_nd(u_values, v_values) + d3 = stats.wasserstein_distance_nd(u_values_flat, v_values_flat) + assert_allclose(d2, d1) + assert_allclose(d3, d1) + # Test with u_weights and v_weights specified. + u_weights = rng.random(size=u_size) + v_weights = rng.random(size=v_size) + d1 = stats.wasserstein_distance(u_values_flat, v_values_flat, + u_weights, v_weights) + d2 = stats.wasserstein_distance_nd(u_values, v_values, + u_weights, v_weights) + d3 = stats.wasserstein_distance_nd(u_values_flat, v_values_flat, + u_weights, v_weights) + assert_allclose(d2, d1) + assert_allclose(d3, d1) + + +class TestWassersteinDistance: + """ Tests for wasserstein_distance() output values. + """ + + def test_simple(self): + # For basic distributions, the value of the Wasserstein distance is + # straightforward. + assert_allclose( + stats.wasserstein_distance([0, 1], [0], [1, 1], [1]), + .5) + assert_allclose(stats.wasserstein_distance( + [0, 1], [0], [3, 1], [1]), + .25) + assert_allclose(stats.wasserstein_distance( + [0, 2], [0], [1, 1], [1]), + 1) + assert_allclose(stats.wasserstein_distance( + [0, 1, 2], [1, 2, 3]), + 1) + + def test_same_distribution(self): + # Any distribution moved to itself should have a Wasserstein distance + # of zero. + assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0) + assert_equal( + stats.wasserstein_distance([1, 1, 1, 4], [4, 1], + [1, 1, 1, 1], [1, 3]), + 0) + + def test_shift(self): + # If the whole distribution is shifted by x, then the Wasserstein + # distance should be the norm of x. + assert_allclose(stats.wasserstein_distance([0], [1]), 1) + assert_allclose(stats.wasserstein_distance([-5], [5]), 10) + assert_allclose( + stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]), + 10) + assert_allclose( + stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2], + [3, 1, 1], [1, 3, 1]), + 2.5) + + def test_combine_weights(self): + # Assigning a weight w to a value is equivalent to including that value + # w times in the value array with weight of 1. + assert_allclose( + stats.wasserstein_distance( + [0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4], + [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]), + stats.wasserstein_distance([5, 0, 1], [0, 4, 3], + [1, 2, 4], [1, 2, 4])) + + def test_collapse(self): + # Collapsing a distribution to a point distribution at zero is + # equivalent to taking the average of the absolute values of the + # values. + u = np.arange(-10, 30, 0.3) + v = np.zeros_like(u) + assert_allclose( + stats.wasserstein_distance(u, v), + np.mean(np.abs(u))) + + u_weights = np.arange(len(u)) + v_weights = u_weights[::-1] + assert_allclose( + stats.wasserstein_distance(u, v, u_weights, v_weights), + np.average(np.abs(u), weights=u_weights)) + + def test_zero_weight(self): + # Values with zero weight have no impact on the Wasserstein distance. + assert_allclose( + stats.wasserstein_distance([1, 2, 100000], [1, 1], + [1, 1, 0], [1, 1]), + stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1])) + + def test_inf_values(self): + # Inf values can lead to an inf distance or trigger a RuntimeWarning + # (and return NaN) if the distance is undefined. + assert_equal( + stats.wasserstein_distance([1, 2, np.inf], [1, 1]), + np.inf) + assert_equal( + stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]), + np.inf) + assert_equal( + stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]), + np.inf) + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "invalid value*") + assert_equal( + stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]), + np.nan) + + +class TestEnergyDistance: + """ Tests for energy_distance() output values. + """ + + def test_simple(self): + # For basic distributions, the value of the energy distance is + # straightforward. + assert_almost_equal( + stats.energy_distance([0, 1], [0], [1, 1], [1]), + np.sqrt(2) * .5) + assert_almost_equal(stats.energy_distance( + [0, 1], [0], [3, 1], [1]), + np.sqrt(2) * .25) + assert_almost_equal(stats.energy_distance( + [0, 2], [0], [1, 1], [1]), + 2 * .5) + assert_almost_equal( + stats.energy_distance([0, 1, 2], [1, 2, 3]), + np.sqrt(2) * (3*(1./3**2))**.5) + + def test_same_distribution(self): + # Any distribution moved to itself should have a energy distance of + # zero. + assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0) + assert_equal( + stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]), + 0) + + def test_shift(self): + # If a single-point distribution is shifted by x, then the energy + # distance should be sqrt(2) * sqrt(x). + assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2)) + assert_almost_equal( + stats.energy_distance([-5], [5]), + np.sqrt(2) * 10**.5) + + def test_combine_weights(self): + # Assigning a weight w to a value is equivalent to including that value + # w times in the value array with weight of 1. + assert_almost_equal( + stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4], + [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]), + stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4])) + + def test_zero_weight(self): + # Values with zero weight have no impact on the energy distance. + assert_almost_equal( + stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]), + stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1])) + + def test_inf_values(self): + # Inf values can lead to an inf distance or trigger a RuntimeWarning + # (and return NaN) if the distance is undefined. + assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf) + assert_equal( + stats.energy_distance([1, 2, np.inf], [-np.inf, 1]), + np.inf) + assert_equal( + stats.energy_distance([1, -np.inf, np.inf], [1, 1]), + np.inf) + with suppress_warnings() as sup: + sup.record(RuntimeWarning, "invalid value*") + assert_equal( + stats.energy_distance([1, 2, np.inf], [np.inf, 1]), + np.nan) + + +class TestBrunnerMunzel: + # Data from (Lumley, 1996) + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + significant = 13 + + def test_brunnermunzel_one_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less') + u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater') + u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater') + u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less') + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(p3, p4, significant=self.significant) + assert_(p1 != p3) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(u3, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u4, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0028931043330757342, + significant=self.significant) + assert_approx_equal(p3, 0.99710689566692423, + significant=self.significant) + + def test_brunnermunzel_two_sided(self): + # Results are compared with R's lawstat package. + u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided') + u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided') + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0057862086661515377, + significant=self.significant) + + def test_brunnermunzel_default(self): + # The default value for alternative is two-sided + u1, p1 = stats.brunnermunzel(self.X, self.Y) + u2, p2 = stats.brunnermunzel(self.Y, self.X) + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0057862086661515377, + significant=self.significant) + + def test_brunnermunzel_alternative_error(self): + alternative = "error" + distribution = "t" + nan_policy = "propagate" + assert_(alternative not in ["two-sided", "greater", "less"]) + assert_raises(ValueError, + stats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution, + nan_policy) + + def test_brunnermunzel_distribution_norm(self): + u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution="normal") + u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution="normal") + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0017041417600383024, + significant=self.significant) + + def test_brunnermunzel_distribution_error(self): + alternative = "two-sided" + distribution = "error" + nan_policy = "propagate" + assert_(alternative not in ["t", "normal"]) + assert_raises(ValueError, + stats.brunnermunzel, + self.X, + self.Y, + alternative, + distribution, + nan_policy) + + def test_brunnermunzel_empty_imput(self): + u1, p1 = stats.brunnermunzel(self.X, []) + u2, p2 = stats.brunnermunzel([], self.Y) + u3, p3 = stats.brunnermunzel([], []) + + assert_equal(u1, np.nan) + assert_equal(p1, np.nan) + assert_equal(u2, np.nan) + assert_equal(p2, np.nan) + assert_equal(u3, np.nan) + assert_equal(p3, np.nan) + + def test_brunnermunzel_nan_input_propagate(self): + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + u1, p1 = stats.brunnermunzel(X, Y, nan_policy="propagate") + u2, p2 = stats.brunnermunzel(Y, X, nan_policy="propagate") + + assert_equal(u1, np.nan) + assert_equal(p1, np.nan) + assert_equal(u2, np.nan) + assert_equal(p2, np.nan) + + def test_brunnermunzel_nan_input_raise(self): + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + alternative = "two-sided" + distribution = "t" + nan_policy = "raise" + + assert_raises(ValueError, + stats.brunnermunzel, + X, + Y, + alternative, + distribution, + nan_policy) + assert_raises(ValueError, + stats.brunnermunzel, + Y, + X, + alternative, + distribution, + nan_policy) + + def test_brunnermunzel_nan_input_omit(self): + X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan] + Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4] + u1, p1 = stats.brunnermunzel(X, Y, nan_policy="omit") + u2, p2 = stats.brunnermunzel(Y, X, nan_policy="omit") + + assert_approx_equal(p1, p2, significant=self.significant) + assert_approx_equal(u1, 3.1374674823029505, + significant=self.significant) + assert_approx_equal(u2, -3.1374674823029505, + significant=self.significant) + assert_approx_equal(p1, 0.0057862086661515377, + significant=self.significant) + + def test_brunnermunzel_return_nan(self): + """ tests that a warning is emitted when p is nan + p-value with t-distributions can be nan (0/0) (see gh-15843) + """ + x = [1, 2, 3] + y = [5, 6, 7, 8, 9] + + msg = "p-value cannot be estimated|divide by zero|invalid value encountered" + with pytest.warns(RuntimeWarning, match=msg): + stats.brunnermunzel(x, y, distribution="t") + + def test_brunnermunzel_normal_dist(self): + """ tests that a p is 0 for datasets that cause p->nan + when t-distribution is used (see gh-15843) + """ + x = [1, 2, 3] + y = [5, 6, 7, 8, 9] + + with pytest.warns(RuntimeWarning, match='divide by zero'): + _, p = stats.brunnermunzel(x, y, distribution="normal") + assert_equal(p, 0) + + +class TestRatioUniforms: + """ Tests for rvs_ratio_uniforms are in test_sampling.py, + as rvs_ratio_uniforms is deprecated and moved to stats.sampling """ + def test_consistency(self): + f = stats.norm.pdf + v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + umax = np.sqrt(f(0)) + gen = stats.sampling.RatioUniforms(f, umax=umax, vmin=-v, vmax=v, + random_state=12345) + r1 = gen.rvs(10) + deprecation_msg = ("Please use `RatioUniforms` from the " + "`scipy.stats.sampling` namespace.") + with pytest.warns(DeprecationWarning, match=deprecation_msg): + r2 = stats.rvs_ratio_uniforms(f, umax, -v, v, size=10, + random_state=12345) + assert_equal(r1, r2) + + +class TestMGCErrorWarnings: + """ Tests errors and warnings derived from MGC. + """ + def test_error_notndarray(self): + # raises error if x or y is not a ndarray + x = np.arange(20) + y = [5] * 20 + assert_raises(ValueError, stats.multiscale_graphcorr, x, y) + assert_raises(ValueError, stats.multiscale_graphcorr, y, x) + + def test_error_shape(self): + # raises error if number of samples different (n) + x = np.arange(100).reshape(25, 4) + y = x.reshape(10, 10) + assert_raises(ValueError, stats.multiscale_graphcorr, x, y) + + def test_error_lowsamples(self): + # raises error if samples are low (< 3) + x = np.arange(3) + y = np.arange(3) + assert_raises(ValueError, stats.multiscale_graphcorr, x, y) + + def test_error_nans(self): + # raises error if inputs contain NaNs + x = np.arange(20, dtype=float) + x[0] = np.nan + assert_raises(ValueError, stats.multiscale_graphcorr, x, x) + + y = np.arange(20) + assert_raises(ValueError, stats.multiscale_graphcorr, x, y) + + def test_error_wrongdisttype(self): + # raises error if metric is not a function + x = np.arange(20) + compute_distance = 0 + assert_raises(ValueError, stats.multiscale_graphcorr, x, x, + compute_distance=compute_distance) + + @pytest.mark.parametrize("reps", [ + -1, # reps is negative + '1', # reps is not integer + ]) + def test_error_reps(self, reps): + # raises error if reps is negative + x = np.arange(20) + assert_raises(ValueError, stats.multiscale_graphcorr, x, x, reps=reps) + + def test_warns_reps(self): + # raises warning when reps is less than 1000 + x = np.arange(20) + reps = 100 + assert_warns(RuntimeWarning, stats.multiscale_graphcorr, x, x, reps=reps) + + def test_error_infty(self): + # raises error if input contains infinities + x = np.arange(20) + y = np.ones(20) * np.inf + assert_raises(ValueError, stats.multiscale_graphcorr, x, y) + + +class TestMGCStat: + """ Test validity of MGC test statistic + """ + def _simulations(self, samps=100, dims=1, sim_type=""): + # linear simulation + if sim_type == "linear": + x = np.random.uniform(-1, 1, size=(samps, 1)) + y = x + 0.3 * np.random.random_sample(size=(x.size, 1)) + + # spiral simulation + elif sim_type == "nonlinear": + unif = np.array(np.random.uniform(0, 5, size=(samps, 1))) + x = unif * np.cos(np.pi * unif) + y = (unif * np.sin(np.pi * unif) + + 0.4*np.random.random_sample(size=(x.size, 1))) + + # independence (tests type I simulation) + elif sim_type == "independence": + u = np.random.normal(0, 1, size=(samps, 1)) + v = np.random.normal(0, 1, size=(samps, 1)) + u_2 = np.random.binomial(1, p=0.5, size=(samps, 1)) + v_2 = np.random.binomial(1, p=0.5, size=(samps, 1)) + x = u/3 + 2*u_2 - 1 + y = v/3 + 2*v_2 - 1 + + # raises error if not approved sim_type + else: + raise ValueError("sim_type must be linear, nonlinear, or " + "independence") + + # add dimensions of noise for higher dimensions + if dims > 1: + dims_noise = np.random.normal(0, 1, size=(samps, dims-1)) + x = np.concatenate((x, dims_noise), axis=1) + + return x, y + + @pytest.mark.slow + @pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [ + ("linear", 0.97, 1/1000), # test linear simulation + ("nonlinear", 0.163, 1/1000), # test spiral simulation + ("independence", -0.0094, 0.78) # test independence simulation + ]) + def test_oned(self, sim_type, obs_stat, obs_pvalue): + np.random.seed(12345678) + + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type=sim_type) + + # test stat and pvalue + stat, pvalue, _ = stats.multiscale_graphcorr(x, y) + assert_approx_equal(stat, obs_stat, significant=1) + assert_approx_equal(pvalue, obs_pvalue, significant=1) + + @pytest.mark.slow + @pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [ + ("linear", 0.184, 1/1000), # test linear simulation + ("nonlinear", 0.0190, 0.117), # test spiral simulation + ]) + def test_fived(self, sim_type, obs_stat, obs_pvalue): + np.random.seed(12345678) + + # generate x and y + x, y = self._simulations(samps=100, dims=5, sim_type=sim_type) + + # test stat and pvalue + stat, pvalue, _ = stats.multiscale_graphcorr(x, y) + assert_approx_equal(stat, obs_stat, significant=1) + assert_approx_equal(pvalue, obs_pvalue, significant=1) + + @pytest.mark.xslow + def test_twosamp(self): + np.random.seed(12345678) + + # generate x and y + x = np.random.binomial(100, 0.5, size=(100, 5)) + y = np.random.normal(0, 1, size=(80, 5)) + + # test stat and pvalue + stat, pvalue, _ = stats.multiscale_graphcorr(x, y) + assert_approx_equal(stat, 1.0, significant=1) + assert_approx_equal(pvalue, 0.001, significant=1) + + # generate x and y + y = np.random.normal(0, 1, size=(100, 5)) + + # test stat and pvalue + stat, pvalue, _ = stats.multiscale_graphcorr(x, y, is_twosamp=True) + assert_approx_equal(stat, 1.0, significant=1) + assert_approx_equal(pvalue, 0.001, significant=1) + + @pytest.mark.slow + def test_workers(self): + np.random.seed(12345678) + + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type="linear") + + # test stat and pvalue + stat, pvalue, _ = stats.multiscale_graphcorr(x, y, workers=2) + assert_approx_equal(stat, 0.97, significant=1) + assert_approx_equal(pvalue, 0.001, significant=1) + + @pytest.mark.slow + def test_random_state(self): + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type="linear") + + # test stat and pvalue + stat, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1) + assert_approx_equal(stat, 0.97, significant=1) + assert_approx_equal(pvalue, 0.001, significant=1) + + @pytest.mark.slow + def test_dist_perm(self): + np.random.seed(12345678) + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type="nonlinear") + distx = cdist(x, x, metric="euclidean") + disty = cdist(y, y, metric="euclidean") + + stat_dist, pvalue_dist, _ = stats.multiscale_graphcorr(distx, disty, + compute_distance=None, + random_state=1) + assert_approx_equal(stat_dist, 0.163, significant=1) + assert_approx_equal(pvalue_dist, 0.001, significant=1) + + @pytest.mark.slow + def test_pvalue_literature(self): + np.random.seed(12345678) + + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type="linear") + + # test stat and pvalue + _, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1) + assert_allclose(pvalue, 1/1001) + + @pytest.mark.slow + def test_alias(self): + np.random.seed(12345678) + + # generate x and y + x, y = self._simulations(samps=100, dims=1, sim_type="linear") + + res = stats.multiscale_graphcorr(x, y, random_state=1) + assert_equal(res.stat, res.statistic) + + +class TestQuantileTest: + r""" Test the non-parametric quantile test, + including the computation of confidence intervals + """ + + def test_quantile_test_iv(self): + x = [1, 2, 3] + + message = "`x` must be a one-dimensional array of numbers." + with pytest.raises(ValueError, match=message): + stats.quantile_test([x]) + + message = "`q` must be a scalar." + with pytest.raises(ValueError, match=message): + stats.quantile_test(x, q=[1, 2]) + + message = "`p` must be a float strictly between 0 and 1." + with pytest.raises(ValueError, match=message): + stats.quantile_test(x, p=[0.5, 0.75]) + with pytest.raises(ValueError, match=message): + stats.quantile_test(x, p=2) + with pytest.raises(ValueError, match=message): + stats.quantile_test(x, p=-0.5) + + message = "`alternative` must be one of..." + with pytest.raises(ValueError, match=message): + stats.quantile_test(x, alternative='one-sided') + + message = "`confidence_level` must be a number between 0 and 1." + with pytest.raises(ValueError, match=message): + stats.quantile_test(x).confidence_interval(1) + + @pytest.mark.parametrize( + 'p, alpha, lb, ub, alternative', + [[0.3, 0.95, 1.221402758160170, 1.476980793882643, 'two-sided'], + [0.5, 0.9, 1.506817785112854, 1.803988415397857, 'two-sided'], + [0.25, 0.95, -np.inf, 1.39096812846378, 'less'], + [0.8, 0.9, 2.117000016612675, np.inf, 'greater']] + ) + def test_R_ci_quantile(self, p, alpha, lb, ub, alternative): + # Test against R library `confintr` function `ci_quantile`, e.g. + # library(confintr) + # options(digits=16) + # x <- exp(seq(0, 1, by = 0.01)) + # ci_quantile(x, q = 0.3)$interval + # ci_quantile(x, q = 0.5, probs = c(0.05, 0.95))$interval + # ci_quantile(x, q = 0.25, probs = c(0, 0.95))$interval + # ci_quantile(x, q = 0.8, probs = c(0.1, 1))$interval + x = np.exp(np.arange(0, 1.01, 0.01)) + res = stats.quantile_test(x, p=p, alternative=alternative) + assert_allclose(res.confidence_interval(alpha), [lb, ub], rtol=1e-15) + + @pytest.mark.parametrize( + 'q, p, alternative, ref', + [[1.2, 0.3, 'two-sided', 0.01515567517648], + [1.8, 0.5, 'two-sided', 0.1109183496606]] + ) + def test_R_pvalue(self, q, p, alternative, ref): + # Test against R library `snpar` function `quant.test`, e.g. + # library(snpar) + # options(digits=16) + # x < - exp(seq(0, 1, by=0.01)) + # quant.test(x, q=1.2, p=0.3, exact=TRUE, alternative='t') + x = np.exp(np.arange(0, 1.01, 0.01)) + res = stats.quantile_test(x, q=q, p=p, alternative=alternative) + assert_allclose(res.pvalue, ref, rtol=1e-12) + + @pytest.mark.parametrize('case', ['continuous', 'discrete']) + @pytest.mark.parametrize('alternative', ['less', 'greater']) + @pytest.mark.parametrize('alpha', [0.9, 0.95]) + def test_pval_ci_match(self, case, alternative, alpha): + # Verify that the following statement holds: + + # The 95% confidence interval corresponding with alternative='less' + # has -inf as its lower bound, and the upper bound `xu` is the greatest + # element from the sample `x` such that: + # `stats.quantile_test(x, q=xu, p=p, alternative='less').pvalue`` + # will be greater than 5%. + + # And the corresponding statement for the alternative='greater' case. + + seed = int((7**len(case) + len(alternative))*alpha) + rng = np.random.default_rng(seed) + if case == 'continuous': + p, q = rng.random(size=2) + rvs = rng.random(size=100) + else: + rvs = rng.integers(1, 11, size=100) + p = rng.random() + q = rng.integers(1, 11) + + res = stats.quantile_test(rvs, q=q, p=p, alternative=alternative) + ci = res.confidence_interval(confidence_level=alpha) + + # select elements inside the confidence interval based on alternative + if alternative == 'less': + i_inside = rvs <= ci.high + else: + i_inside = rvs >= ci.low + + for x in rvs[i_inside]: + res = stats.quantile_test(rvs, q=x, p=p, alternative=alternative) + assert res.pvalue > 1 - alpha + + for x in rvs[~i_inside]: + res = stats.quantile_test(rvs, q=x, p=p, alternative=alternative) + assert res.pvalue < 1 - alpha + + def test_match_conover_examples(self): + # Test against the examples in [1] (Conover Practical Nonparametric + # Statistics Third Edition) pg 139 + + # Example 1 + # Data is [189, 233, 195, 160, 212, 176, 231, 185, 199, 213, 202, 193, + # 174, 166, 248] + # Two-sided test of whether the upper quartile (p=0.75) equals 193 + # (q=193). Conover shows that 7 of the observations are less than or + # equal to 193, and "for the binomial random variable Y, P(Y<=7) = + # 0.0173", so the two-sided p-value is twice that, 0.0346. + x = [189, 233, 195, 160, 212, 176, 231, 185, 199, 213, 202, 193, + 174, 166, 248] + pvalue_expected = 0.0346 + res = stats.quantile_test(x, q=193, p=0.75, alternative='two-sided') + assert_allclose(res.pvalue, pvalue_expected, rtol=1e-5) + + # Example 2 + # Conover doesn't give explicit data, just that 8 out of 112 + # observations are 60 or less. The test is whether the median time is + # equal to 60 against the alternative that the median is greater than + # 60. The p-value is calculated as P(Y<=8), where Y is again a binomial + # distributed random variable, now with p=0.5 and n=112. Conover uses a + # normal approximation, but we can easily calculate the CDF of the + # binomial distribution. + x = [59]*8 + [61]*(112-8) + pvalue_expected = stats.binom(p=0.5, n=112).pmf(k=8) + res = stats.quantile_test(x, q=60, p=0.5, alternative='greater') + assert_allclose(res.pvalue, pvalue_expected, atol=1e-10) + + +class TestPageTrendTest: + # expected statistic and p-values generated using R at + # https://rdrr.io/cran/cultevo/, e.g. + # library(cultevo) + # data = rbind(c(72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81, 43, + # 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50), + # c(68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31, 67, + # 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43), + # c(81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68, 17, + # 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55)) + # result = page.test(data, verbose=FALSE) + # Most test cases generated to achieve common critical p-values so that + # results could be checked (to limited precision) against tables in + # scipy.stats.page_trend_test reference [1] + + np.random.seed(0) + data_3_25 = np.random.rand(3, 25) + data_10_26 = np.random.rand(10, 26) + + ts = [ + (12805, 0.3886487053947608, False, 'asymptotic', data_3_25), + (49140, 0.02888978556179862, False, 'asymptotic', data_10_26), + (12332, 0.7722477197436702, False, 'asymptotic', + [[72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81, + 43, 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50], + [68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31, + 67, 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43], + [81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68, + 17, 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55]]), + (266, 4.121656378600823e-05, False, 'exact', + [[1.5, 4., 8.3, 5, 19, 11], + [5, 4, 3.5, 10, 20, 21], + [8.4, 3.2, 10, 12, 14, 15]]), + (332, 0.9566400920502488, True, 'exact', + [[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], + [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], + [3, 4, 1, 2], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], + [1, 2, 3, 4], [1, 2, 3, 4]]), + (241, 0.9622210164861476, True, 'exact', + [[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], + [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], + [3, 2, 1], [2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], + [1, 2, 3], [1, 2, 3], [1, 2, 3]]), + (197, 0.9619432897162209, True, 'exact', + [[6, 5, 4, 3, 2, 1], [6, 5, 4, 3, 2, 1], [1, 3, 4, 5, 2, 6]]), + (423, 0.9590458306880073, True, 'exact', + [[5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1], + [5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1], + [4, 1, 3, 2, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5]]), + (217, 0.9693058575034678, True, 'exact', + [[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], + [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], + [2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], + [1, 2, 3]]), + (395, 0.991530289351305, True, 'exact', + [[7, 6, 5, 4, 3, 2, 1], [7, 6, 5, 4, 3, 2, 1], + [6, 5, 7, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7]]), + (117, 0.9997817843373017, True, 'exact', + [[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], + [3, 2, 1], [3, 2, 1], [3, 2, 1], [2, 1, 3], [1, 2, 3]]), + ] + + @pytest.mark.parametrize("L, p, ranked, method, data", ts) + def test_accuracy(self, L, p, ranked, method, data): + np.random.seed(42) + res = stats.page_trend_test(data, ranked=ranked, method=method) + assert_equal(L, res.statistic) + assert_allclose(p, res.pvalue) + assert_equal(method, res.method) + + ts2 = [ + (542, 0.9481266260876332, True, 'exact', + [[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], + [1, 8, 4, 7, 6, 5, 9, 3, 2, 10]]), + (1322, 0.9993113928199309, True, 'exact', + [[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [10, 9, 8, 7, 6, 5, 4, 3, 2, 1], + [10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 2, 8, 7, 6, 5, 4, 3, 10, 1], + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]), + (2286, 0.9908688345484833, True, 'exact', + [[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], + [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], + [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], + [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1], + [8, 7, 6, 5, 4, 3, 2, 1], [1, 3, 5, 6, 4, 7, 2, 8], + [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], + [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], + [1, 2, 3, 4, 5, 6, 7, 8]]), + ] + + # only the first of these appears slow because intermediate data are + # cached and used on the rest + @pytest.mark.parametrize("L, p, ranked, method, data", ts) + @pytest.mark.slow() + def test_accuracy2(self, L, p, ranked, method, data): + np.random.seed(42) + res = stats.page_trend_test(data, ranked=ranked, method=method) + assert_equal(L, res.statistic) + assert_allclose(p, res.pvalue) + assert_equal(method, res.method) + + def test_options(self): + np.random.seed(42) + m, n = 10, 20 + predicted_ranks = np.arange(1, n+1) + perm = np.random.permutation(np.arange(n)) + data = np.random.rand(m, n) + ranks = stats.rankdata(data, axis=1) + res1 = stats.page_trend_test(ranks) + res2 = stats.page_trend_test(ranks, ranked=True) + res3 = stats.page_trend_test(data, ranked=False) + res4 = stats.page_trend_test(ranks, predicted_ranks=predicted_ranks) + res5 = stats.page_trend_test(ranks[:, perm], + predicted_ranks=predicted_ranks[perm]) + assert_equal(res1.statistic, res2.statistic) + assert_equal(res1.statistic, res3.statistic) + assert_equal(res1.statistic, res4.statistic) + assert_equal(res1.statistic, res5.statistic) + + def test_Ames_assay(self): + # test from _page_trend_test.py [2] page 151; data on page 144 + np.random.seed(42) + + data = [[101, 117, 111], [91, 90, 107], [103, 133, 121], + [136, 140, 144], [190, 161, 201], [146, 120, 116]] + data = np.array(data).T + predicted_ranks = np.arange(1, 7) + + res = stats.page_trend_test(data, ranked=False, + predicted_ranks=predicted_ranks, + method="asymptotic") + assert_equal(res.statistic, 257) + assert_almost_equal(res.pvalue, 0.0035, decimal=4) + + res = stats.page_trend_test(data, ranked=False, + predicted_ranks=predicted_ranks, + method="exact") + assert_equal(res.statistic, 257) + assert_almost_equal(res.pvalue, 0.0023, decimal=4) + + def test_input_validation(self): + # test data not a 2d array + with assert_raises(ValueError, match="`data` must be a 2d array."): + stats.page_trend_test(None) + with assert_raises(ValueError, match="`data` must be a 2d array."): + stats.page_trend_test([]) + with assert_raises(ValueError, match="`data` must be a 2d array."): + stats.page_trend_test([1, 2]) + with assert_raises(ValueError, match="`data` must be a 2d array."): + stats.page_trend_test([[[1]]]) + + # test invalid dimensions + with assert_raises(ValueError, match="Page's L is only appropriate"): + stats.page_trend_test(np.random.rand(1, 3)) + with assert_raises(ValueError, match="Page's L is only appropriate"): + stats.page_trend_test(np.random.rand(2, 2)) + + # predicted ranks must include each integer [1, 2, 3] exactly once + message = "`predicted_ranks` must include each integer" + with assert_raises(ValueError, match=message): + stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]], + predicted_ranks=[0, 1, 2]) + with assert_raises(ValueError, match=message): + stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]], + predicted_ranks=[1.1, 2, 3]) + with assert_raises(ValueError, match=message): + stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]], + predicted_ranks=[1, 2, 3, 3]) + with assert_raises(ValueError, match=message): + stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]], + predicted_ranks="invalid") + + # test improperly ranked data + with assert_raises(ValueError, match="`data` is not properly ranked"): + stats.page_trend_test([[0, 2, 3], [1, 2, 3]], True) + with assert_raises(ValueError, match="`data` is not properly ranked"): + stats.page_trend_test([[1, 2, 3], [1, 2, 4]], True) + + # various + with assert_raises(ValueError, match="`data` contains NaNs"): + stats.page_trend_test([[1, 2, 3], [1, 2, np.nan]], + ranked=False) + with assert_raises(ValueError, match="`method` must be in"): + stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]], + method="ekki") + with assert_raises(TypeError, match="`ranked` must be boolean."): + stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]], + ranked="ekki") + + +rng = np.random.default_rng(902340982) +x = rng.random(10) +y = rng.random(10) + + +@pytest.mark.parametrize("fun, args", + [(stats.wilcoxon, (x,)), + (stats.ks_1samp, (x, stats.norm.cdf)), # type: ignore[attr-defined] # noqa: E501 + (stats.ks_2samp, (x, y)), + (stats.kstest, (x, y)), + ]) +def test_rename_mode_method(fun, args): + + res = fun(*args, method='exact') + res2 = fun(*args, mode='exact') + assert_equal(res, res2) + + err = rf"{fun.__name__}() got multiple values for argument" + with pytest.raises(TypeError, match=re.escape(err)): + fun(*args, method='exact', mode='exact') + + +class TestExpectile: + def test_same_as_mean(self): + rng = np.random.default_rng(42) + x = rng.random(size=20) + assert_allclose(stats.expectile(x, alpha=0.5), np.mean(x)) + + def test_minimum(self): + rng = np.random.default_rng(42) + x = rng.random(size=20) + assert_allclose(stats.expectile(x, alpha=0), np.amin(x)) + + def test_maximum(self): + rng = np.random.default_rng(42) + x = rng.random(size=20) + assert_allclose(stats.expectile(x, alpha=1), np.amax(x)) + + def test_weights(self): + # expectile should minimize `fun` defined below; see + # F. Sobotka and T. Kneib, "Geoadditive expectile regression", + # Computational Statistics and Data Analysis 56 (2012) 755-767 + # :doi:`10.1016/j.csda.2010.11.015` + rng = np.random.default_rng(1856392524598679138) + + def fun(u, a, alpha, weights): + w = np.full_like(a, fill_value=alpha) + w[a <= u] = 1 - alpha + return np.sum(w * weights * (a - u)**2) + + def expectile2(a, alpha, weights): + bracket = np.min(a), np.max(a) + return optimize.minimize_scalar(fun, bracket=bracket, + args=(a, alpha, weights)).x + + n = 10 + a = rng.random(n) + alpha = rng.random() + weights = rng.random(n) + + res = stats.expectile(a, alpha, weights=weights) + ref = expectile2(a, alpha, weights) + assert_allclose(res, ref) + + @pytest.mark.parametrize( + "alpha", [0.2, 0.5 - 1e-12, 0.5, 0.5 + 1e-12, 0.8] + ) + @pytest.mark.parametrize("n", [20, 2000]) + def test_expectile_properties(self, alpha, n): + """ + See Section 6 of + I. Steinwart, C. Pasin, R.C. Williamson & S. Zhang (2014). + "Elicitation and Identification of Properties". COLT. + http://proceedings.mlr.press/v35/steinwart14.html + + and + + Propositions 5, 6, 7 of + F. Bellini, B. Klar, and A. Müller and E. Rosazza Gianin (2013). + "Generalized Quantiles as Risk Measures" + http://doi.org/10.2139/ssrn.2225751 + """ + rng = np.random.default_rng(42) + x = rng.normal(size=n) + + # 0. definite / constancy + # Let T(X) denote the expectile of rv X ~ F. + # T(c) = c for constant c + for c in [-5, 0, 0.5]: + assert_allclose( + stats.expectile(np.full(shape=n, fill_value=c), alpha=alpha), + c + ) + + # 1. translation equivariance + # T(X + c) = T(X) + c + c = rng.exponential() + assert_allclose( + stats.expectile(x + c, alpha=alpha), + stats.expectile(x, alpha=alpha) + c, + ) + assert_allclose( + stats.expectile(x - c, alpha=alpha), + stats.expectile(x, alpha=alpha) - c, + ) + + # 2. positively homogeneity + # T(cX) = c * T(X) for c > 0 + assert_allclose( + stats.expectile(c * x, alpha=alpha), + c * stats.expectile(x, alpha=alpha), + ) + + # 3. subadditivity + # Note that subadditivity holds for alpha >= 0.5. + # T(X + Y) <= T(X) + T(Y) + # For alpha = 0.5, i.e. the mean, strict equality holds. + # For alpha < 0.5, one can use property 6. to show + # T(X + Y) >= T(X) + T(Y) + y = rng.logistic(size=n, loc=10) # different distribution than x + if alpha == 0.5: + def assert_op(a, b): + assert_allclose(a, b) + + elif alpha > 0.5: + def assert_op(a, b): + assert a < b + + else: + def assert_op(a, b): + assert a > b + + assert_op( + stats.expectile(np.r_[x + y], alpha=alpha), + stats.expectile(x, alpha=alpha) + + stats.expectile(y, alpha=alpha) + ) + + # 4. monotonicity + # This holds for first order stochastic dominance X: + # X >= Y whenever P(X <= x) < P(Y <= x) + # T(X) <= T(Y) whenever X <= Y + y = rng.normal(size=n, loc=5) + assert ( + stats.expectile(x, alpha=alpha) <= stats.expectile(y, alpha=alpha) + ) + + # 5. convexity for alpha > 0.5, concavity for alpha < 0.5 + # convexity is + # T((1 - c) X + c Y) <= (1 - c) T(X) + c T(Y) for 0 <= c <= 1 + y = rng.logistic(size=n, loc=10) + for c in [0.1, 0.5, 0.8]: + assert_op( + stats.expectile((1-c)*x + c*y, alpha=alpha), + (1-c) * stats.expectile(x, alpha=alpha) + + c * stats.expectile(y, alpha=alpha) + ) + + # 6. negative argument + # T_{alpha}(-X) = -T_{1-alpha}(X) + assert_allclose( + stats.expectile(-x, alpha=alpha), + -stats.expectile(x, alpha=1-alpha), + ) + + @pytest.mark.parametrize("n", [20, 2000]) + def test_monotonicity_in_alpha(self, n): + rng = np.random.default_rng(42) + x = rng.pareto(a=2, size=n) + e_list = [] + alpha_seq = np.logspace(-15, np.log10(0.5), 100) + # sorted list of unique alpha values in interval (0, 1) + for alpha in np.r_[0, alpha_seq, 1 - alpha_seq[:-1:-1], 1]: + e_list.append(stats.expectile(x, alpha=alpha)) + assert np.all(np.diff(e_list) > 0) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_survival.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_survival.py new file mode 100644 index 0000000000000000000000000000000000000000..f8360b9139286c130e8a4acd10aa69894c6bf9e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_survival.py @@ -0,0 +1,470 @@ +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy import stats +from scipy.stats import _survival + + +def _kaplan_meier_reference(times, censored): + # This is a very straightforward implementation of the Kaplan-Meier + # estimator that does almost everything differently from the implementation + # in stats.ecdf. + + # Begin by sorting the raw data. Note that the order of death and loss + # at a given time matters: death happens first. See [2] page 461: + # "These conventions may be paraphrased by saying that deaths recorded as + # of an age t are treated as if they occurred slightly before t, and losses + # recorded as of an age t are treated as occurring slightly after t." + # We implement this by sorting the data first by time, then by `censored`, + # (which is 0 when there is a death and 1 when there is only a loss). + dtype = [('time', float), ('censored', int)] + data = np.array([(t, d) for t, d in zip(times, censored)], dtype=dtype) + data = np.sort(data, order=('time', 'censored')) + times = data['time'] + died = np.logical_not(data['censored']) + + m = times.size + n = np.arange(m, 0, -1) # number at risk + sf = np.cumprod((n - died) / n) + + # Find the indices of the *last* occurrence of unique times. The + # corresponding entries of `times` and `sf` are what we want. + _, indices = np.unique(times[::-1], return_index=True) + ref_times = times[-indices - 1] + ref_sf = sf[-indices - 1] + return ref_times, ref_sf + + +class TestSurvival: + + @staticmethod + def get_random_sample(rng, n_unique): + # generate random sample + unique_times = rng.random(n_unique) + # convert to `np.int32` to resolve `np.repeat` failure in 32-bit CI + repeats = rng.integers(1, 4, n_unique).astype(np.int32) + times = rng.permuted(np.repeat(unique_times, repeats)) + censored = rng.random(size=times.size) > rng.random() + sample = stats.CensoredData.right_censored(times, censored) + return sample, times, censored + + def test_input_validation(self): + message = '`sample` must be a one-dimensional sequence.' + with pytest.raises(ValueError, match=message): + stats.ecdf([[1]]) + with pytest.raises(ValueError, match=message): + stats.ecdf(1) + + message = '`sample` must not contain nan' + with pytest.raises(ValueError, match=message): + stats.ecdf([np.nan]) + + message = 'Currently, only uncensored and right-censored data...' + with pytest.raises(NotImplementedError, match=message): + stats.ecdf(stats.CensoredData.left_censored([1], censored=[True])) + + message = 'method` must be one of...' + res = stats.ecdf([1, 2, 3]) + with pytest.raises(ValueError, match=message): + res.cdf.confidence_interval(method='ekki-ekki') + with pytest.raises(ValueError, match=message): + res.sf.confidence_interval(method='shrubbery') + + message = 'confidence_level` must be a scalar between 0 and 1' + with pytest.raises(ValueError, match=message): + res.cdf.confidence_interval(-1) + with pytest.raises(ValueError, match=message): + res.sf.confidence_interval([0.5, 0.6]) + + message = 'The confidence interval is undefined at some observations.' + with pytest.warns(RuntimeWarning, match=message): + ci = res.cdf.confidence_interval() + + message = 'Confidence interval bounds do not implement...' + with pytest.raises(NotImplementedError, match=message): + ci.low.confidence_interval() + with pytest.raises(NotImplementedError, match=message): + ci.high.confidence_interval() + + def test_edge_cases(self): + res = stats.ecdf([]) + assert_equal(res.cdf.quantiles, []) + assert_equal(res.cdf.probabilities, []) + + res = stats.ecdf([1]) + assert_equal(res.cdf.quantiles, [1]) + assert_equal(res.cdf.probabilities, [1]) + + def test_unique(self): + # Example with unique observations; `stats.ecdf` ref. [1] page 80 + sample = [6.23, 5.58, 7.06, 6.42, 5.20] + res = stats.ecdf(sample) + ref_x = np.sort(np.unique(sample)) + ref_cdf = np.arange(1, 6) / 5 + ref_sf = 1 - ref_cdf + assert_equal(res.cdf.quantiles, ref_x) + assert_equal(res.cdf.probabilities, ref_cdf) + assert_equal(res.sf.quantiles, ref_x) + assert_equal(res.sf.probabilities, ref_sf) + + def test_nonunique(self): + # Example with non-unique observations; `stats.ecdf` ref. [1] page 82 + sample = [0, 2, 1, 2, 3, 4] + res = stats.ecdf(sample) + ref_x = np.sort(np.unique(sample)) + ref_cdf = np.array([1/6, 2/6, 4/6, 5/6, 1]) + ref_sf = 1 - ref_cdf + assert_equal(res.cdf.quantiles, ref_x) + assert_equal(res.cdf.probabilities, ref_cdf) + assert_equal(res.sf.quantiles, ref_x) + assert_equal(res.sf.probabilities, ref_sf) + + def test_evaluate_methods(self): + # Test CDF and SF `evaluate` methods + rng = np.random.default_rng(1162729143302572461) + sample, _, _ = self.get_random_sample(rng, 15) + res = stats.ecdf(sample) + x = res.cdf.quantiles + xr = x + np.diff(x, append=x[-1]+1)/2 # right shifted points + + assert_equal(res.cdf.evaluate(x), res.cdf.probabilities) + assert_equal(res.cdf.evaluate(xr), res.cdf.probabilities) + assert_equal(res.cdf.evaluate(x[0]-1), 0) # CDF starts at 0 + assert_equal(res.cdf.evaluate([-np.inf, np.inf]), [0, 1]) + + assert_equal(res.sf.evaluate(x), res.sf.probabilities) + assert_equal(res.sf.evaluate(xr), res.sf.probabilities) + assert_equal(res.sf.evaluate(x[0]-1), 1) # SF starts at 1 + assert_equal(res.sf.evaluate([-np.inf, np.inf]), [1, 0]) + + # ref. [1] page 91 + t1 = [37, 43, 47, 56, 60, 62, 71, 77, 80, 81] # times + d1 = [0, 0, 1, 1, 0, 0, 0, 1, 1, 1] # 1 means deaths (not censored) + r1 = [1, 1, 0.875, 0.75, 0.75, 0.75, 0.75, 0.5, 0.25, 0] # reference SF + + # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html + t2 = [8, 12, 26, 14, 21, 27, 8, 32, 20, 40] + d2 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0] + r2 = [0.9, 0.788, 0.675, 0.675, 0.54, 0.405, 0.27, 0.27, 0.27] + t3 = [33, 28, 41, 48, 48, 25, 37, 48, 25, 43] + d3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0] + r3 = [1, 0.875, 0.75, 0.75, 0.6, 0.6, 0.6] + + # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/bs704_survival4.html + t4 = [24, 3, 11, 19, 24, 13, 14, 2, 18, 17, + 24, 21, 12, 1, 10, 23, 6, 5, 9, 17] + d4 = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1] + r4 = [0.95, 0.95, 0.897, 0.844, 0.844, 0.844, 0.844, 0.844, 0.844, + 0.844, 0.76, 0.676, 0.676, 0.676, 0.676, 0.507, 0.507] + + # https://www.real-statistics.com/survival-analysis/kaplan-meier-procedure/confidence-interval-for-the-survival-function/ + t5 = [3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11] + d5 = [1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1] + r5 = [0.944, 0.889, 0.722, 0.542, 0.542, 0.542, 0.361, 0.181, 0.181, 0.181] + + @pytest.mark.parametrize("case", [(t1, d1, r1), (t2, d2, r2), (t3, d3, r3), + (t4, d4, r4), (t5, d5, r5)]) + def test_right_censored_against_examples(self, case): + # test `ecdf` against other implementations on example problems + times, died, ref = case + sample = stats.CensoredData.right_censored(times, np.logical_not(died)) + res = stats.ecdf(sample) + assert_allclose(res.sf.probabilities, ref, atol=1e-3) + assert_equal(res.sf.quantiles, np.sort(np.unique(times))) + + # test reference implementation against other implementations + res = _kaplan_meier_reference(times, np.logical_not(died)) + assert_equal(res[0], np.sort(np.unique(times))) + assert_allclose(res[1], ref, atol=1e-3) + + @pytest.mark.parametrize('seed', [182746786639392128, 737379171436494115, + 576033618403180168, 308115465002673650]) + def test_right_censored_against_reference_implementation(self, seed): + # test `ecdf` against reference implementation on random problems + rng = np.random.default_rng(seed) + n_unique = rng.integers(10, 100) + sample, times, censored = self.get_random_sample(rng, n_unique) + res = stats.ecdf(sample) + ref = _kaplan_meier_reference(times, censored) + assert_allclose(res.sf.quantiles, ref[0]) + assert_allclose(res.sf.probabilities, ref[1]) + + # If all observations are uncensored, the KM estimate should match + # the usual estimate for uncensored data + sample = stats.CensoredData(uncensored=times) + res = _survival._ecdf_right_censored(sample) # force Kaplan-Meier + ref = stats.ecdf(times) + assert_equal(res[0], ref.sf.quantiles) + assert_allclose(res[1], ref.cdf.probabilities, rtol=1e-14) + assert_allclose(res[2], ref.sf.probabilities, rtol=1e-14) + + def test_right_censored_ci(self): + # test "greenwood" confidence interval against example 4 (URL above). + times, died = self.t4, self.d4 + sample = stats.CensoredData.right_censored(times, np.logical_not(died)) + res = stats.ecdf(sample) + ref_allowance = [0.096, 0.096, 0.135, 0.162, 0.162, 0.162, 0.162, + 0.162, 0.162, 0.162, 0.214, 0.246, 0.246, 0.246, + 0.246, 0.341, 0.341] + + sf_ci = res.sf.confidence_interval() + cdf_ci = res.cdf.confidence_interval() + allowance = res.sf.probabilities - sf_ci.low.probabilities + + assert_allclose(allowance, ref_allowance, atol=1e-3) + assert_allclose(sf_ci.low.probabilities, + np.clip(res.sf.probabilities - allowance, 0, 1)) + assert_allclose(sf_ci.high.probabilities, + np.clip(res.sf.probabilities + allowance, 0, 1)) + assert_allclose(cdf_ci.low.probabilities, + np.clip(res.cdf.probabilities - allowance, 0, 1)) + assert_allclose(cdf_ci.high.probabilities, + np.clip(res.cdf.probabilities + allowance, 0, 1)) + + # test "log-log" confidence interval against Mathematica + # e = {24, 3, 11, 19, 24, 13, 14, 2, 18, 17, 24, 21, 12, 1, 10, 23, 6, 5, + # 9, 17} + # ci = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0} + # R = EventData[e, ci] + # S = SurvivalModelFit[R] + # S["PointwiseIntervals", ConfidenceLevel->0.95, + # ConfidenceTransform->"LogLog"] + + ref_low = [0.694743, 0.694743, 0.647529, 0.591142, 0.591142, 0.591142, + 0.591142, 0.591142, 0.591142, 0.591142, 0.464605, 0.370359, + 0.370359, 0.370359, 0.370359, 0.160489, 0.160489] + ref_high = [0.992802, 0.992802, 0.973299, 0.947073, 0.947073, 0.947073, + 0.947073, 0.947073, 0.947073, 0.947073, 0.906422, 0.856521, + 0.856521, 0.856521, 0.856521, 0.776724, 0.776724] + sf_ci = res.sf.confidence_interval(method='log-log') + assert_allclose(sf_ci.low.probabilities, ref_low, atol=1e-6) + assert_allclose(sf_ci.high.probabilities, ref_high, atol=1e-6) + + def test_right_censored_ci_example_5(self): + # test "exponential greenwood" confidence interval against example 5 + times, died = self.t5, self.d5 + sample = stats.CensoredData.right_censored(times, np.logical_not(died)) + res = stats.ecdf(sample) + lower = np.array([0.66639, 0.624174, 0.456179, 0.287822, 0.287822, + 0.287822, 0.128489, 0.030957, 0.030957, 0.030957]) + upper = np.array([0.991983, 0.970995, 0.87378, 0.739467, 0.739467, + 0.739467, 0.603133, 0.430365, 0.430365, 0.430365]) + + sf_ci = res.sf.confidence_interval(method='log-log') + cdf_ci = res.cdf.confidence_interval(method='log-log') + + assert_allclose(sf_ci.low.probabilities, lower, atol=1e-5) + assert_allclose(sf_ci.high.probabilities, upper, atol=1e-5) + assert_allclose(cdf_ci.low.probabilities, 1-upper, atol=1e-5) + assert_allclose(cdf_ci.high.probabilities, 1-lower, atol=1e-5) + + # Test against R's `survival` library `survfit` function, 90%CI + # library(survival) + # options(digits=16) + # time = c(3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11) + # status = c(1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1) + # res = survfit(Surv(time, status) + # ~1, conf.type = "log-log", conf.int = 0.90) + # res$time; res$lower; res$upper + low = [0.74366748406861172, 0.68582332289196246, 0.50596835651480121, + 0.32913131413336727, 0.32913131413336727, 0.32913131413336727, + 0.15986912028781664, 0.04499539918147757, 0.04499539918147757, + 0.04499539918147757] + high = [0.9890291867238429, 0.9638835422144144, 0.8560366823086629, + 0.7130167643978450, 0.7130167643978450, 0.7130167643978450, + 0.5678602982997164, 0.3887616766886558, 0.3887616766886558, + 0.3887616766886558] + sf_ci = res.sf.confidence_interval(method='log-log', + confidence_level=0.9) + assert_allclose(sf_ci.low.probabilities, low) + assert_allclose(sf_ci.high.probabilities, high) + + # And with conf.type = "plain" + low = [0.8556383113628162, 0.7670478794850761, 0.5485720663578469, + 0.3441515412527123, 0.3441515412527123, 0.3441515412527123, + 0.1449184105424544, 0., 0., 0.] + high = [1., 1., 0.8958723780865975, 0.7391817920806210, + 0.7391817920806210, 0.7391817920806210, 0.5773038116797676, + 0.3642270254596720, 0.3642270254596720, 0.3642270254596720] + sf_ci = res.sf.confidence_interval(confidence_level=0.9) + assert_allclose(sf_ci.low.probabilities, low) + assert_allclose(sf_ci.high.probabilities, high) + + def test_right_censored_ci_nans(self): + # test `ecdf` confidence interval on a problem that results in NaNs + times, died = self.t1, self.d1 + sample = stats.CensoredData.right_censored(times, np.logical_not(died)) + res = stats.ecdf(sample) + + # Reference values generated with Matlab + # format long + # t = [37 43 47 56 60 62 71 77 80 81]; + # d = [0 0 1 1 0 0 0 1 1 1]; + # censored = ~d1; + # [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Alpha', 0.05); + x = [37, 47, 56, 77, 80, 81] + flo = [np.nan, 0, 0, 0.052701464070711, 0.337611126231790, np.nan] + fup = [np.nan, 0.35417230377, 0.5500569798, 0.9472985359, 1.0, np.nan] + i = np.searchsorted(res.cdf.quantiles, x) + + message = "The confidence interval is undefined at some observations" + with pytest.warns(RuntimeWarning, match=message): + ci = res.cdf.confidence_interval() + + # Matlab gives NaN as the first element of the CIs. Mathematica agrees, + # but R's survfit does not. It makes some sense, but it's not what the + # formula gives, so skip that element. + assert_allclose(ci.low.probabilities[i][1:], flo[1:]) + assert_allclose(ci.high.probabilities[i][1:], fup[1:]) + + # [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Function', + # 'survivor', 'Alpha', 0.05); + flo = [np.nan, 0.64582769623, 0.449943020228, 0.05270146407, 0, np.nan] + fup = [np.nan, 1.0, 1.0, 0.947298535929289, 0.662388873768210, np.nan] + i = np.searchsorted(res.cdf.quantiles, x) + + with pytest.warns(RuntimeWarning, match=message): + ci = res.sf.confidence_interval() + + assert_allclose(ci.low.probabilities[i][1:], flo[1:]) + assert_allclose(ci.high.probabilities[i][1:], fup[1:]) + + # With the same data, R's `survival` library `survfit` function + # doesn't produce the leading NaN + # library(survival) + # options(digits=16) + # time = c(37, 43, 47, 56, 60, 62, 71, 77, 80, 81) + # status = c(0, 0, 1, 1, 0, 0, 0, 1, 1, 1) + # res = survfit(Surv(time, status) + # ~1, conf.type = "plain", conf.int = 0.95) + # res$time + # res$lower + # res$upper + low = [1., 1., 0.64582769623233816, 0.44994302022779326, + 0.44994302022779326, 0.44994302022779326, 0.44994302022779326, + 0.05270146407071086, 0., np.nan] + high = [1., 1., 1., 1., 1., 1., 1., 0.9472985359292891, + 0.6623888737682101, np.nan] + assert_allclose(ci.low.probabilities, low) + assert_allclose(ci.high.probabilities, high) + + # It does with conf.type="log-log", as do we + with pytest.warns(RuntimeWarning, match=message): + ci = res.sf.confidence_interval(method='log-log') + low = [np.nan, np.nan, 0.38700001403202522, 0.31480711370551911, + 0.31480711370551911, 0.31480711370551911, 0.31480711370551911, + 0.08048821148507734, 0.01049958986680601, np.nan] + high = [np.nan, np.nan, 0.9813929658789660, 0.9308983170906275, + 0.9308983170906275, 0.9308983170906275, 0.9308983170906275, + 0.8263946341076415, 0.6558775085110887, np.nan] + assert_allclose(ci.low.probabilities, low) + assert_allclose(ci.high.probabilities, high) + + def test_right_censored_against_uncensored(self): + rng = np.random.default_rng(7463952748044886637) + sample = rng.integers(10, 100, size=1000) + censored = np.zeros_like(sample) + censored[np.argmax(sample)] = True + res = stats.ecdf(sample) + ref = stats.ecdf(stats.CensoredData.right_censored(sample, censored)) + assert_equal(res.sf.quantiles, ref.sf.quantiles) + assert_equal(res.sf._n, ref.sf._n) + assert_equal(res.sf._d[:-1], ref.sf._d[:-1]) # difference @ [-1] + assert_allclose(res.sf._sf[:-1], ref.sf._sf[:-1], rtol=1e-14) + + def test_plot_iv(self): + rng = np.random.default_rng(1769658657308472721) + n_unique = rng.integers(10, 100) + sample, _, _ = self.get_random_sample(rng, n_unique) + res = stats.ecdf(sample) + + try: + import matplotlib.pyplot as plt # noqa: F401 + res.sf.plot() # no other errors occur + except (ModuleNotFoundError, ImportError): + # Avoid trying to call MPL with numpy 2.0-dev, because that fails + # too often due to ABI mismatches and is hard to avoid. This test + # will work fine again once MPL has done a 2.0-compatible release. + if not np.__version__.startswith('2.0.0.dev0'): + message = r"matplotlib must be installed to use method `plot`." + with pytest.raises(ModuleNotFoundError, match=message): + res.sf.plot() + + +class TestLogRank: + + @pytest.mark.parametrize( + "x, y, statistic, pvalue", + # Results validate with R + # library(survival) + # options(digits=16) + # + # futime_1 <- c(8, 12, 26, 14, 21, 27, 8, 32, 20, 40) + # fustat_1 <- c(1, 1, 1, 1, 1, 1, 0, 0, 0, 0) + # rx_1 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + # + # futime_2 <- c(33, 28, 41, 48, 48, 25, 37, 48, 25, 43) + # fustat_2 <- c(1, 1, 1, 0, 0, 0, 0, 0, 0, 0) + # rx_2 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1) + # + # futime <- c(futime_1, futime_2) + # fustat <- c(fustat_1, fustat_2) + # rx <- c(rx_1, rx_2) + # + # survdiff(formula = Surv(futime, fustat) ~ rx) + # + # Also check against another library which handle alternatives + # library(nph) + # logrank.test(futime, fustat, rx, alternative = "two.sided") + # res["test"] + [( + # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html + # uncensored, censored + [[8, 12, 26, 14, 21, 27], [8, 32, 20, 40]], + [[33, 28, 41], [48, 48, 25, 37, 48, 25, 43]], + # chi2, ["two-sided", "less", "greater"] + 6.91598157449, + [0.008542873404, 0.9957285632979385, 0.004271436702061537] + ), + ( + # https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html + [[19, 6, 5, 4], [20, 19, 17, 14]], + [[16, 21, 7], [21, 15, 18, 18, 5]], + 0.835004855038, + [0.3608293039, 0.8195853480676912, 0.1804146519323088] + ), + ( + # Bland, Altman, "The logrank test", BMJ, 2004 + # https://www.bmj.com/content/328/7447/1073.short + [[6, 13, 21, 30, 37, 38, 49, 50, 63, 79, 86, 98, 202, 219], + [31, 47, 80, 82, 82, 149]], + [[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24, 25, 28, 30, + 33, 35, 37, 40, 40, 46, 48, 76, 81, 82, 91, 112, 181], + [34, 40, 70]], + 7.49659416854, + [0.006181578637, 0.003090789318730882, 0.9969092106812691] + )] + ) + def test_log_rank(self, x, y, statistic, pvalue): + x = stats.CensoredData(uncensored=x[0], right=x[1]) + y = stats.CensoredData(uncensored=y[0], right=y[1]) + + for i, alternative in enumerate(["two-sided", "less", "greater"]): + res = stats.logrank(x=x, y=y, alternative=alternative) + + # we return z and use the normal distribution while other framework + # return z**2. The p-value are directly comparable, but we have to + # square the statistic + assert_allclose(res.statistic**2, statistic, atol=1e-10) + assert_allclose(res.pvalue, pvalue[i], atol=1e-10) + + def test_raises(self): + sample = stats.CensoredData([1, 2]) + + msg = r"`y` must be" + with pytest.raises(ValueError, match=msg): + stats.logrank(x=sample, y=[[1, 2]]) + + msg = r"`x` must be" + with pytest.raises(ValueError, match=msg): + stats.logrank(x=[[1, 2]], y=sample) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_tukeylambda_stats.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_tukeylambda_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..8041658599a6ed5b2cd70def1a92bffe0d851792 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_tukeylambda_stats.py @@ -0,0 +1,85 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +from scipy.stats._tukeylambda_stats import (tukeylambda_variance, + tukeylambda_kurtosis) + + +def test_tukeylambda_stats_known_exact(): + """Compare results with some known exact formulas.""" + # Some exact values of the Tukey Lambda variance and kurtosis: + # lambda var kurtosis + # 0 pi**2/3 6/5 (logistic distribution) + # 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3 + # 1 1/3 -6/5 (uniform distribution on (-1,1)) + # 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2)) + + # lambda = 0 + var = tukeylambda_variance(0) + assert_allclose(var, np.pi**2 / 3, atol=1e-12) + kurt = tukeylambda_kurtosis(0) + assert_allclose(kurt, 1.2, atol=1e-10) + + # lambda = 0.5 + var = tukeylambda_variance(0.5) + assert_allclose(var, 4 - np.pi, atol=1e-12) + kurt = tukeylambda_kurtosis(0.5) + desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3 + assert_allclose(kurt, desired, atol=1e-10) + + # lambda = 1 + var = tukeylambda_variance(1) + assert_allclose(var, 1.0 / 3, atol=1e-12) + kurt = tukeylambda_kurtosis(1) + assert_allclose(kurt, -1.2, atol=1e-10) + + # lambda = 2 + var = tukeylambda_variance(2) + assert_allclose(var, 1.0 / 12, atol=1e-12) + kurt = tukeylambda_kurtosis(2) + assert_allclose(kurt, -1.2, atol=1e-10) + + +def test_tukeylambda_stats_mpmath(): + """Compare results with some values that were computed using mpmath.""" + a10 = dict(atol=1e-10, rtol=0) + a12 = dict(atol=1e-12, rtol=0) + data = [ + # lambda variance kurtosis + [-0.1, 4.78050217874253547, 3.78559520346454510], + [-0.0649, 4.16428023599895777, 2.52019675947435718], + [-0.05, 3.93672267890775277, 2.13129793057777277], + [-0.001, 3.30128380390964882, 1.21452460083542988], + [0.001, 3.27850775649572176, 1.18560634779287585], + [0.03125, 2.95927803254615800, 0.804487555161819980], + [0.05, 2.78281053405464501, 0.611604043886644327], + [0.0649, 2.65282386754100551, 0.476834119532774540], + [1.2, 0.242153920578588346, -1.23428047169049726], + [10.0, 0.00095237579757703597, 2.37810697355144933], + [20.0, 0.00012195121951131043, 7.37654321002709531], + ] + + for lam, var_expected, kurt_expected in data: + var = tukeylambda_variance(lam) + assert_allclose(var, var_expected, **a12) + kurt = tukeylambda_kurtosis(lam) + assert_allclose(kurt, kurt_expected, **a10) + + # Test with vector arguments (most of the other tests are for single + # values). + lam, var_expected, kurt_expected = zip(*data) + var = tukeylambda_variance(lam) + assert_allclose(var, var_expected, **a12) + kurt = tukeylambda_kurtosis(lam) + assert_allclose(kurt, kurt_expected, **a10) + + +def test_tukeylambda_stats_invalid(): + """Test values of lambda outside the domains of the functions.""" + lam = [-1.0, -0.5] + var = tukeylambda_variance(lam) + assert_equal(var, np.array([np.nan, np.inf])) + + lam = [-1.0, -0.25] + kurt = tukeylambda_kurtosis(lam) + assert_equal(kurt, np.array([np.nan, np.inf])) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/test_variation.py b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_variation.py new file mode 100644 index 0000000000000000000000000000000000000000..133978d681f09cc71e7b8667a8a0bc1b12b3a395 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/test_variation.py @@ -0,0 +1,159 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +import pytest +from scipy.stats import variation +from scipy._lib._util import AxisError + + +class TestVariation: + """ + Test class for scipy.stats.variation + """ + + def test_ddof(self): + x = np.arange(9.0) + assert_allclose(variation(x, ddof=1), np.sqrt(60/8)/4) + + @pytest.mark.parametrize('sgn', [1, -1]) + def test_sign(self, sgn): + x = np.array([1, 2, 3, 4, 5]) + v = variation(sgn*x) + expected = sgn*np.sqrt(2)/3 + assert_allclose(v, expected, rtol=1e-10) + + def test_scalar(self): + # A scalar is treated like a 1-d sequence with length 1. + assert_equal(variation(4.0), 0.0) + + @pytest.mark.parametrize('nan_policy, expected', + [('propagate', np.nan), + ('omit', np.sqrt(20/3)/4)]) + def test_variation_nan(self, nan_policy, expected): + x = np.arange(10.) + x[9] = np.nan + assert_allclose(variation(x, nan_policy=nan_policy), expected) + + def test_nan_policy_raise(self): + x = np.array([1.0, 2.0, np.nan, 3.0]) + with pytest.raises(ValueError, match='input contains nan'): + variation(x, nan_policy='raise') + + def test_bad_nan_policy(self): + with pytest.raises(ValueError, match='must be one of'): + variation([1, 2, 3], nan_policy='foobar') + + def test_keepdims(self): + x = np.arange(10).reshape(2, 5) + y = variation(x, axis=1, keepdims=True) + expected = np.array([[np.sqrt(2)/2], + [np.sqrt(2)/7]]) + assert_allclose(y, expected) + + @pytest.mark.parametrize('axis, expected', + [(0, np.empty((1, 0))), + (1, np.full((5, 1), fill_value=np.nan))]) + def test_keepdims_size0(self, axis, expected): + x = np.zeros((5, 0)) + y = variation(x, axis=axis, keepdims=True) + assert_equal(y, expected) + + @pytest.mark.parametrize('incr, expected_fill', [(0, np.inf), (1, np.nan)]) + def test_keepdims_and_ddof_eq_len_plus_incr(self, incr, expected_fill): + x = np.array([[1, 1, 2, 2], [1, 2, 3, 3]]) + y = variation(x, axis=1, ddof=x.shape[1] + incr, keepdims=True) + assert_equal(y, np.full((2, 1), fill_value=expected_fill)) + + def test_propagate_nan(self): + # Check that the shape of the result is the same for inputs + # with and without nans, cf gh-5817 + a = np.arange(8).reshape(2, -1).astype(float) + a[1, 0] = np.nan + v = variation(a, axis=1, nan_policy="propagate") + assert_allclose(v, [np.sqrt(5/4)/1.5, np.nan], atol=1e-15) + + def test_axis_none(self): + # Check that `variation` computes the result on the flattened + # input when axis is None. + y = variation([[0, 1], [2, 3]], axis=None) + assert_allclose(y, np.sqrt(5/4)/1.5) + + def test_bad_axis(self): + # Check that an invalid axis raises np.exceptions.AxisError. + x = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(AxisError): + variation(x, axis=10) + + def test_mean_zero(self): + # Check that `variation` returns inf for a sequence that is not + # identically zero but whose mean is zero. + x = np.array([10, -3, 1, -4, -4]) + y = variation(x) + assert_equal(y, np.inf) + + x2 = np.array([x, -10*x]) + y2 = variation(x2, axis=1) + assert_equal(y2, [np.inf, np.inf]) + + @pytest.mark.parametrize('x', [np.zeros(5), [], [1, 2, np.inf, 9]]) + def test_return_nan(self, x): + # Test some cases where `variation` returns nan. + y = variation(x) + assert_equal(y, np.nan) + + @pytest.mark.parametrize('axis, expected', + [(0, []), (1, [np.nan]*3), (None, np.nan)]) + def test_2d_size_zero_with_axis(self, axis, expected): + x = np.empty((3, 0)) + y = variation(x, axis=axis) + assert_equal(y, expected) + + def test_neg_inf(self): + # Edge case that produces -inf: ddof equals the number of non-nan + # values, the values are not constant, and the mean is negative. + x1 = np.array([-3, -5]) + assert_equal(variation(x1, ddof=2), -np.inf) + + x2 = np.array([[np.nan, 1, -10, np.nan], + [-20, -3, np.nan, np.nan]]) + assert_equal(variation(x2, axis=1, ddof=2, nan_policy='omit'), + [-np.inf, -np.inf]) + + @pytest.mark.parametrize("nan_policy", ['propagate', 'omit']) + def test_combined_edge_cases(self, nan_policy): + x = np.array([[0, 10, np.nan, 1], + [0, -5, np.nan, 2], + [0, -5, np.nan, 3]]) + y = variation(x, axis=0, nan_policy=nan_policy) + assert_allclose(y, [np.nan, np.inf, np.nan, np.sqrt(2/3)/2]) + + @pytest.mark.parametrize( + 'ddof, expected', + [(0, [np.sqrt(1/6), np.sqrt(5/8), np.inf, 0, np.nan, 0.0, np.nan]), + (1, [0.5, np.sqrt(5/6), np.inf, 0, np.nan, 0, np.nan]), + (2, [np.sqrt(0.5), np.sqrt(5/4), np.inf, np.nan, np.nan, 0, np.nan])] + ) + def test_more_nan_policy_omit_tests(self, ddof, expected): + # The slightly strange formatting in the follow array is my attempt to + # maintain a clean tabular arrangement of the data while satisfying + # the demands of pycodestyle. Currently, E201 and E241 are not + # disabled by the `# noqa` annotation. + nan = np.nan + x = np.array([[1.0, 2.0, nan, 3.0], + [0.0, 4.0, 3.0, 1.0], + [nan, -.5, 0.5, nan], + [nan, 9.0, 9.0, nan], + [nan, nan, nan, nan], + [3.0, 3.0, 3.0, 3.0], + [0.0, 0.0, 0.0, 0.0]]) + v = variation(x, axis=1, ddof=ddof, nan_policy='omit') + assert_allclose(v, expected) + + def test_variation_ddof(self): + # test variation with delta degrees of freedom + # regression test for gh-13341 + a = np.array([1, 2, 3, 4, 5]) + nan_a = np.array([1, 2, 3, np.nan, 4, 5, np.nan]) + y = variation(a, ddof=1) + nan_y = variation(nan_a, nan_policy="omit", ddof=1) + assert_allclose(y, np.sqrt(5/2)/3) + assert y == nan_y