diff --git a/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2c7662695d4753b753841037f71f68a3884d4abb --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ce095ba3f812e1f5c4bd6c5354a43479377a35af095e54f711e27fab55d9eed +size 9293 diff --git a/ckpts/universal/global_step40/zero/24.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/24.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1813ffbd3515615c90d65c16012537e1bb8ce0c2 --- /dev/null +++ b/ckpts/universal/global_step40/zero/24.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba12dbb337083af28f9fb2481c9fcf4045221c8e23e0f118f3a87dd8d3547d0b +size 16778411 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e39ca4850f71f2fcb8b188a91f839bb742b21a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a168464f0989feda64b0df3e5d39ecccf30b39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1328bd1492950e00745284a3606369908233916 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_binomtest.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_binomtest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faa2008224051a3e281fafbda11af0d1b8c0a2d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_binomtest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_bws_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_bws_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5f7e5ce562b8b42aa0d2ebfc835e0ff916b17cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_bws_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_censored_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_censored_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9deebe4574d2649007ac074d131eb47cc3adc76d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_censored_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab5f5d45453bbff21da6f6c0120a1994cee9c067 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551e182d22b61a408da2c9394d113fb0886366fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_covariance.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcb98d0ed618dc794f03f9bb2ce19fd61cddb381 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_covariance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_crosstab.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_crosstab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be48f3d95722c585cc982dfb256b29d6804d487 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_crosstab.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f88591b65cf29934ac287db635f8e4d6b55e0c73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3d3c9c8c136a937e148074ebc9a97f28a014650 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_distr_params.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_distr_params.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9a4b20e8a69c57d3d45e485f1e88aa742ab4bfd Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_distr_params.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_fit.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_fit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca6920c112274bdf8d3e600c3d3151949b52ede4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_fit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0c30c0bb904ffe050d3cf898e7ff7b40971ae3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_hypotests.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_hypotests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91d97399c0ac68a7acbac8a029c22889b50c37ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_hypotests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_kde.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_kde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9a65a86cc46e4dfe34e84cad710f0a50bca2886 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_kde.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_ksstats.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_ksstats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..694e641ee41c78c1b2778a5b826933a11e27f71b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_ksstats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0ceb628f4653f8bcae194051e2bbdd20059b27c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c446c86f20dc316b547f6eff1c6c7b42f0f276 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3e04e694aa4470139c1b30cb25ca0392a673319 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_multicomp.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_multicomp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3b7ab982ad935c57eb4cf18766572f416ecbf8e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_multicomp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44835a7fa5dc8ec4dd06d42bce1856024708698f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5160b28a727e166d571f45e33bfe8d15e7ed6174 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_qmc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_qmc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e45ffec7f7035164685039d6d6e586c27ce7b24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_qmc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_qmvnt.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_qmvnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1cd7ac42aadd8648c49adf3e665929854bfd8af Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_qmvnt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_relative_risk.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_relative_risk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caf613f4b9f2fea4bd7aa3299ab88eb25044c78f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_relative_risk.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_resampling.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_resampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c463bf6849226bdab8f85f6240d3112f0ac9b13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_resampling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_result_classes.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_result_classes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e52861b71798441c515e53478a2cf88bb249e72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_result_classes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..091befe4d2c39c8d661555bad9e9fbb256a367fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_sampling.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_sampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68c69ba78258dea77efc6bb59916654e03d7503d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_sampling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_sensitivity_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_sensitivity_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c3abdaffeb4f42a60c419862132b9a722e59d6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_sensitivity_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c10eac130ebc0d192cb6ce559a4e6e28ab09193 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_survival.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_survival.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a2cabd50a5ae1dc3393f059f19db0ae1a5ef044 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_survival.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70e7cef40af675384ce8c31b9cd65ecb9ca3b5f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_variation.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_variation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6f936650594bf4d2fa59aad88cf5b5922490703 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_variation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca4bbf7f03800dcd7121d19b5960998bb46d7ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_wilcoxon.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_wilcoxon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24023216b1848cf81570ae2093769794b51bd7a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/_wilcoxon.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/biasedurn.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/biasedurn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc3f73c5245f0aff3da409005994afde14779729 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/biasedurn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/contingency.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/contingency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdcf4dd70565ff9a22937e23606e554d2594071b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/contingency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/distributions.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1025670dcebdf1ce6be78b5f86a131061ec9e868 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/distributions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/kde.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/kde.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7486784071b1bd153861c042215407bcfec13f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/kde.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cf2416871a8dcbb6f935cf529da1937730148fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3b166a19170db69ef86ce400338e2b3c63058ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats_extras.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats_extras.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05e4b135cadd8ffc2c7b73d2897c71a746506cac Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mstats_extras.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mvn.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mvn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e9e6a465b914e3c38efd8ceb5eba38dca40565c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/mvn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/qmc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/qmc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a902ebdb547744cd5c207a84320ae966c9628b6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/qmc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/sampling.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/sampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a67e7b6caf10d3261341e31a1e2f3b359a9e640c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/__pycache__/sampling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd b/venv/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd new file mode 100644 index 0000000000000000000000000000000000000000..92785f08dbec30a4db286fcb85b42d7221e2228e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_biasedurn.pxd @@ -0,0 +1,27 @@ +# Declare the class with cdef +cdef extern from "biasedurn/stocc.h" nogil: + cdef cppclass CFishersNCHypergeometric: + CFishersNCHypergeometric(int, int, int, double, double) except + + int mode() + double mean() + double variance() + double probability(int x) + double moments(double * mean, double * var) + + cdef cppclass CWalleniusNCHypergeometric: + CWalleniusNCHypergeometric() except + + CWalleniusNCHypergeometric(int, int, int, double, double) except + + int mode() + double mean() + double variance() + double probability(int x) + double moments(double * mean, double * var) + + cdef cppclass StochasticLib3: + StochasticLib3(int seed) except + + double Random() except + + void SetAccuracy(double accur) + int FishersNCHyp (int n, int m, int N, double odds) except + + int WalleniusNCHyp (int n, int m, int N, double odds) except + + double(*next_double)() + double(*next_normal)(const double m, const double s) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py b/venv/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py new file mode 100644 index 0000000000000000000000000000000000000000..c624bb8c79be23ad515d003649f9efe9ea3e775d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_binned_statistic.py @@ -0,0 +1,795 @@ +import builtins +from warnings import catch_warnings, simplefilter +import numpy as np +from operator import index +from collections import namedtuple + +__all__ = ['binned_statistic', + 'binned_statistic_2d', + 'binned_statistic_dd'] + + +BinnedStatisticResult = namedtuple('BinnedStatisticResult', + ('statistic', 'bin_edges', 'binnumber')) + + +def binned_statistic(x, values, statistic='mean', + bins=10, range=None): + """ + Compute a binned statistic for one or more sets of data. + + This is a generalization of a histogram function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a set of sequences - each the same shape as + `x`. If `values` is a set of sequences, the statistic will be computed + on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or sequence of scalars, optional + If `bins` is an int, it defines the number of equal-width bins in the + given range (10 by default). If `bins` is a sequence, it defines the + bin edges, including the rightmost edge, allowing for non-uniform bin + widths. Values in `x` that are smaller than lowest bin edge are + assigned to bin number 0, values beyond the highest bin are assigned to + ``bins[-1]``. If the bin edges are specified, the number of bins will + be, (nx = len(bins)-1). + range : (float, float) or [(float, float)], optional + The lower and upper range of the bins. If not provided, range + is simply ``(x.min(), x.max())``. Values outside the range are + ignored. + + Returns + ------- + statistic : array + The values of the selected statistic in each bin. + bin_edges : array of dtype float + Return the bin edges ``(length(statistic)+1)``. + binnumber: 1-D ndarray of ints + Indices of the bins (corresponding to `bin_edges`) in which each value + of `x` belongs. Same length as `values`. A binnumber of `i` means the + corresponding value is between (bin_edges[i-1], bin_edges[i]). + + See Also + -------- + numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + First some basic examples: + + Create two evenly spaced bins in the range of the given sample, and sum the + corresponding values in each of those bins: + + >>> values = [1.0, 1.0, 2.0, 1.5, 3.0] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + BinnedStatisticResult(statistic=array([4. , 4.5]), + bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2])) + + Multiple arrays of values can also be passed. The statistic is calculated + on each set independently: + + >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]] + >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) + BinnedStatisticResult(statistic=array([[4. , 4.5], + [8. , 9. ]]), bin_edges=array([1., 4., 7.]), + binnumber=array([1, 1, 1, 2, 2])) + + >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', + ... bins=3) + BinnedStatisticResult(statistic=array([1., 2., 4.]), + bin_edges=array([1., 2., 3., 4.]), + binnumber=array([1, 2, 1, 2, 3])) + + As a second example, we now generate some random data of sailing boat speed + as a function of wind speed, and then determine how fast our boat is for + certain wind speeds: + + >>> rng = np.random.default_rng() + >>> windspeed = 8 * rng.random(500) + >>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500) + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, + ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) + >>> plt.figure() + >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, + ... label='binned statistic of data') + >>> plt.legend() + + Now we can use ``binnumber`` to select all datapoints with a windspeed + below 1: + + >>> low_boatspeed = boatspeed[binnumber == 0] + + As a final example, we will use ``bin_edges`` and ``binnumber`` to make a + plot of a distribution that shows the mean and distribution around that + mean per bin, on top of a regular histogram and the probability + distribution function: + + >>> x = np.linspace(0, 5, num=500) + >>> x_pdf = stats.maxwell.pdf(x) + >>> samples = stats.maxwell.rvs(size=10000) + + >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, + ... statistic='mean', bins=25) + >>> bin_width = (bin_edges[1] - bin_edges[0]) + >>> bin_centers = bin_edges[1:] - bin_width/2 + + >>> plt.figure() + >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled', + ... alpha=0.2, label='histogram of data') + >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') + >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, + ... label='binned statistic of data') + >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) + >>> plt.legend(fontsize=10) + >>> plt.show() + + """ + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1: + bins = [np.asarray(bins, float)] + + if range is not None: + if len(range) == 2: + range = [range] + + medians, edges, binnumbers = binned_statistic_dd( + [x], values, statistic, bins, range) + + return BinnedStatisticResult(medians, edges[0], binnumbers) + + +BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', + ('statistic', 'x_edge', 'y_edge', + 'binnumber')) + + +def binned_statistic_2d(x, y, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False): + """ + Compute a bidimensional binned statistic for one or more sets of data. + + This is a generalization of a histogram2d function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values (or set of values) within each bin. + + Parameters + ---------- + x : (N,) array_like + A sequence of values to be binned along the first dimension. + y : (N,) array_like + A sequence of values to be binned along the second dimension. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `x`, or a list of sequences - each with the same + shape as `x`. If `values` is such a list, the statistic will be + computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : int or [int, int] or array_like or [array, array], optional + The bin specification: + + * the number of bins for the two dimensions (nx = ny = bins), + * the number of bins in each dimension (nx, ny = bins), + * the bin edges for the two dimensions (x_edge = y_edge = bins), + * the bin edges in each dimension (x_edge, y_edge = bins). + + If the bin edges are specified, the number of bins will be, + (nx = len(x_edge)-1, ny = len(y_edge)-1). + + range : (2,2) array_like, optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be + considered outliers and not tallied in the histogram. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (2,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section. + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : (nx, ny) ndarray + The values of the selected statistic in each two-dimensional bin. + x_edge : (nx + 1) ndarray + The bin edges along the first dimension. + y_edge : (ny + 1) ndarray + The bin edges along the second dimension. + binnumber : (N,) array of ints or (2,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open. In other words, if + `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, + but excluding 2) and the second ``[2, 3)``. The last bin, however, is + ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + Note that the returned linearized bin indices are used for an array with + extra bins on the outer binedges to capture values outside of the defined + bin bounds. + If 'True': The returned `binnumber` is a shape (2,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy import stats + + Calculate the counts with explicit bin-edges: + + >>> x = [0.1, 0.1, 0.1, 0.6] + >>> y = [2.1, 2.6, 2.1, 2.1] + >>> binx = [0.0, 0.5, 1.0] + >>> biny = [2.0, 2.5, 3.0] + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny]) + >>> ret.statistic + array([[2., 1.], + [1., 0.]]) + + The bin in which each sample is placed is given by the `binnumber` + returned parameter. By default, these are the linearized bin indices: + + >>> ret.binnumber + array([5, 6, 5, 9]) + + The bin indices can also be expanded into separate entries for each + dimension using the `expand_binnumbers` parameter: + + >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny], + ... expand_binnumbers=True) + >>> ret.binnumber + array([[1, 1, 1, 2], + [1, 2, 1, 1]]) + + Which shows that the first three elements belong in the xbin 1, and the + fourth into xbin 2; and so on for y. + + """ + + # This code is based on np.histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = np.asarray(bins, float) + bins = [xedges, yedges] + + medians, edges, binnumbers = binned_statistic_dd( + [x, y], values, statistic, bins, range, + expand_binnumbers=expand_binnumbers) + + return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) + + +BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', + ('statistic', 'bin_edges', + 'binnumber')) + + +def _bincount(x, weights): + if np.iscomplexobj(weights): + a = np.bincount(x, np.real(weights)) + b = np.bincount(x, np.imag(weights)) + z = a + b*1j + + else: + z = np.bincount(x, weights) + return z + + +def binned_statistic_dd(sample, values, statistic='mean', + bins=10, range=None, expand_binnumbers=False, + binned_statistic_result=None): + """ + Compute a multidimensional binned statistic for a set of data. + + This is a generalization of a histogramdd function. A histogram divides + the space into bins, and returns the count of the number of points in + each bin. This function allows the computation of the sum, mean, median, + or other statistic of the values within each bin. + + Parameters + ---------- + sample : array_like + Data to histogram passed as a sequence of N arrays of length D, or + as an (N,D) array. + values : (N,) array_like or list of (N,) array_like + The data on which the statistic will be computed. This must be + the same shape as `sample`, or a list of sequences - each with the + same shape as `sample`. If `values` is such a list, the statistic + will be computed on each independently. + statistic : string or callable, optional + The statistic to compute (default is 'mean'). + The following statistics are available: + + * 'mean' : compute the mean of values for points within each bin. + Empty bins will be represented by NaN. + * 'median' : compute the median of values for points within each + bin. Empty bins will be represented by NaN. + * 'count' : compute the count of points within each bin. This is + identical to an unweighted histogram. `values` array is not + referenced. + * 'sum' : compute the sum of values for points within each bin. + This is identical to a weighted histogram. + * 'std' : compute the standard deviation within each bin. This + is implicitly calculated with ddof=0. If the number of values + within a given bin is 0 or 1, the computed standard deviation value + will be 0 for the bin. + * 'min' : compute the minimum of values for points within each bin. + Empty bins will be represented by NaN. + * 'max' : compute the maximum of values for point within each bin. + Empty bins will be represented by NaN. + * function : a user-defined function which takes a 1D array of + values, and outputs a single numerical statistic. This function + will be called on the values in each bin. Empty bins will be + represented by function([]), or NaN if this returns an error. + + bins : sequence or positive int, optional + The bin specification must be in one of the following forms: + + * A sequence of arrays describing the bin edges along each dimension. + * The number of bins for each dimension (nx, ny, ... = bins). + * The number of bins for all dimensions (nx = ny = ... = bins). + range : sequence, optional + A sequence of lower and upper bin edges to be used if the edges are + not given explicitly in `bins`. Defaults to the minimum and maximum + values along each dimension. + expand_binnumbers : bool, optional + 'False' (default): the returned `binnumber` is a shape (N,) array of + linearized bin indices. + 'True': the returned `binnumber` is 'unraveled' into a shape (D,N) + ndarray, where each row gives the bin numbers in the corresponding + dimension. + See the `binnumber` returned value, and the `Examples` section of + `binned_statistic_2d`. + binned_statistic_result : binnedStatisticddResult + Result of a previous call to the function in order to reuse bin edges + and bin numbers with new values and/or a different statistic. + To reuse bin numbers, `expand_binnumbers` must have been set to False + (the default) + + .. versionadded:: 0.17.0 + + Returns + ------- + statistic : ndarray, shape(nx1, nx2, nx3,...) + The values of the selected statistic in each two-dimensional bin. + bin_edges : list of ndarrays + A list of D arrays describing the (nxi + 1) bin edges for each + dimension. + binnumber : (N,) array of ints or (D,N) ndarray of ints + This assigns to each element of `sample` an integer that represents the + bin in which this observation falls. The representation depends on the + `expand_binnumbers` argument. See `Notes` for details. + + + See Also + -------- + numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d + + Notes + ----- + Binedges: + All but the last (righthand-most) bin is half-open in each dimension. In + other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is + ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The + last bin, however, is ``[3, 4]``, which *includes* 4. + + `binnumber`: + This returned argument assigns to each element of `sample` an integer that + represents the bin in which it belongs. The representation depends on the + `expand_binnumbers` argument. If 'False' (default): The returned + `binnumber` is a shape (N,) array of linearized indices mapping each + element of `sample` to its corresponding bin (using row-major ordering). + If 'True': The returned `binnumber` is a shape (D,N) ndarray where + each row indicates bin placements for each dimension respectively. In each + dimension, a binnumber of `i` means the corresponding value is between + (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.mplot3d import Axes3D + + Take an array of 600 (x, y) coordinates as an example. + `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot + of dimension `D+1` is required. + + >>> mu = np.array([0., 1.]) + >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]]) + >>> multinormal = stats.multivariate_normal(mu, sigma) + >>> data = multinormal.rvs(size=600, random_state=235412) + >>> data.shape + (600, 2) + + Create bins and count how many arrays fall in each bin: + + >>> N = 60 + >>> x = np.linspace(-3, 3, N) + >>> y = np.linspace(-3, 4, N) + >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y], + ... statistic='count') + >>> bincounts = ret.statistic + + Set the volume and the location of bars: + + >>> dx = x[1] - x[0] + >>> dy = y[1] - y[0] + >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2) + >>> z = 0 + + >>> bincounts = bincounts.ravel() + >>> x = x.ravel() + >>> y = y.ravel() + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111, projection='3d') + >>> with np.errstate(divide='ignore'): # silence random axes3d warning + ... ax.bar3d(x, y, z, dx, dy, bincounts) + + Reuse bin numbers and bin edges with new values: + + >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600), + ... binned_statistic_result=ret, + ... statistic='mean') + """ + known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max'] + if not callable(statistic) and statistic not in known_stats: + raise ValueError(f'invalid statistic {statistic!r}') + + try: + bins = index(bins) + except TypeError: + # bins is not an integer + pass + # If bins was an integer-like object, now it is an actual Python int. + + # NOTE: for _bin_edges(), see e.g. gh-11365 + if isinstance(bins, int) and not np.isfinite(sample).all(): + raise ValueError(f'{sample!r} contains non-finite values.') + + # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`) + # `Dlen` is the length of elements along each dimension. + # This code is based on np.histogramdd + try: + # `sample` is an ND-array. + Dlen, Ndim = sample.shape + except (AttributeError, ValueError): + # `sample` is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + Dlen, Ndim = sample.shape + + # Store initial shape of `values` to preserve it in the output + values = np.asarray(values) + input_shape = list(values.shape) + # Make sure that `values` is 2D to iterate over rows + values = np.atleast_2d(values) + Vdim, Vlen = values.shape + + # Make sure `values` match `sample` + if statistic != 'count' and Vlen != Dlen: + raise AttributeError('The number of `values` elements must match the ' + 'length of each `sample` dimension.') + + try: + M = len(bins) + if M != Ndim: + raise AttributeError('The dimension of bins must be equal ' + 'to the dimension of the sample x.') + except TypeError: + bins = Ndim * [bins] + + if binned_statistic_result is None: + nbin, edges, dedges = _bin_edges(sample, bins, range) + binnumbers = _bin_numbers(sample, nbin, edges, dedges) + else: + edges = binned_statistic_result.bin_edges + nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)]) + # +1 for outlier bins + dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)] + binnumbers = binned_statistic_result.binnumber + + # Avoid overflow with double precision. Complex `values` -> `complex128`. + result_type = np.result_type(values, np.float64) + result = np.empty([Vdim, nbin.prod()], dtype=result_type) + + if statistic in {'mean', np.mean}: + result.fill(np.nan) + flatcount = _bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + result[vv, a] = flatsum[a] / flatcount[a] + elif statistic in {'std', np.std}: + result.fill(np.nan) + flatcount = _bincount(binnumbers, None) + a = flatcount.nonzero() + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers] + std = np.sqrt( + _bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a] + ) + result[vv, a] = std + result = np.real(result) + elif statistic == 'count': + result = np.empty([Vdim, nbin.prod()], dtype=np.float64) + result.fill(0) + flatcount = _bincount(binnumbers, None) + a = np.arange(len(flatcount)) + result[:, a] = flatcount[np.newaxis, :] + elif statistic in {'sum', np.sum}: + result.fill(0) + for vv in builtins.range(Vdim): + flatsum = _bincount(binnumbers, values[vv]) + a = np.arange(len(flatsum)) + result[vv, a] = flatsum + elif statistic in {'median', np.median}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.lexsort((values[vv], binnumbers)) + _, j, counts = np.unique(binnumbers[i], + return_index=True, return_counts=True) + mid = j + (counts - 1) / 2 + mid_a = values[vv, i][np.floor(mid).astype(int)] + mid_b = values[vv, i][np.ceil(mid).astype(int)] + medians = (mid_a + mid_b) / 2 + result[vv, binnumbers[i][j]] = medians + elif statistic in {'min', np.min}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.argsort(values[vv])[::-1] # Reversed so the min is last + result[vv, binnumbers[i]] = values[vv, i] + elif statistic in {'max', np.max}: + result.fill(np.nan) + for vv in builtins.range(Vdim): + i = np.argsort(values[vv]) + result[vv, binnumbers[i]] = values[vv, i] + elif callable(statistic): + with np.errstate(invalid='ignore'), catch_warnings(): + simplefilter("ignore", RuntimeWarning) + try: + null = statistic([]) + except Exception: + null = np.nan + if np.iscomplexobj(null): + result = result.astype(np.complex128) + result.fill(null) + try: + _calc_binned_statistic( + Vdim, binnumbers, result, values, statistic + ) + except ValueError: + result = result.astype(np.complex128) + _calc_binned_statistic( + Vdim, binnumbers, result, values, statistic + ) + + # Shape into a proper matrix + result = result.reshape(np.append(Vdim, nbin)) + + # Remove outliers (indices 0 and -1 for each bin-dimension). + core = tuple([slice(None)] + Ndim * [slice(1, -1)]) + result = result[core] + + # Unravel binnumbers into an ndarray, each row the bins for each dimension + if expand_binnumbers and Ndim > 1: + binnumbers = np.asarray(np.unravel_index(binnumbers, nbin)) + + if np.any(result.shape[1:] != nbin - 2): + raise RuntimeError('Internal Shape Error') + + # Reshape to have output (`result`) match input (`values`) shape + result = result.reshape(input_shape[:-1] + list(nbin-2)) + + return BinnedStatisticddResult(result, edges, binnumbers) + + +def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func): + unique_bin_numbers = np.unique(bin_numbers) + for vv in builtins.range(Vdim): + bin_map = _create_binned_data(bin_numbers, unique_bin_numbers, + values, vv) + for i in unique_bin_numbers: + stat = stat_func(np.array(bin_map[i])) + if np.iscomplexobj(stat) and not np.iscomplexobj(result): + raise ValueError("The statistic function returns complex ") + result[vv, i] = stat + + +def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv): + """ Create hashmap of bin ids to values in bins + key: bin number + value: list of binned data + """ + bin_map = dict() + for i in unique_bin_numbers: + bin_map[i] = [] + for i in builtins.range(len(bin_numbers)): + bin_map[bin_numbers[i]].append(values[vv, i]) + return bin_map + + +def _bin_edges(sample, bins=None, range=None): + """ Create edge arrays + """ + Dlen, Ndim = sample.shape + + nbin = np.empty(Ndim, int) # Number of bins in each dimension + edges = Ndim * [None] # Bin edges for each dim (will be 2D array) + dedges = Ndim * [None] # Spacing between edges (will be 2D array) + + # Select range for each dimension + # Used only if number of bins is given. + if range is None: + smin = np.atleast_1d(np.array(sample.min(axis=0), float)) + smax = np.atleast_1d(np.array(sample.max(axis=0), float)) + else: + if len(range) != Ndim: + raise ValueError( + f"range given for {len(range)} dimensions; {Ndim} required") + smin = np.empty(Ndim) + smax = np.empty(Ndim) + for i in builtins.range(Ndim): + if range[i][1] < range[i][0]: + raise ValueError( + "In {}range, start must be <= stop".format( + f"dimension {i + 1} of " if Ndim > 1 else "")) + smin[i], smax[i] = range[i] + + # Make sure the bins have a finite width. + for i in builtins.range(len(smin)): + if smin[i] == smax[i]: + smin[i] = smin[i] - .5 + smax[i] = smax[i] + .5 + + # Preserve sample floating point precision in bin edges + edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating) + else float) + + # Create edge arrays + for i in builtins.range(Ndim): + if np.isscalar(bins[i]): + nbin[i] = bins[i] + 2 # +2 for outlier bins + edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1, + dtype=edges_dtype) + else: + edges[i] = np.asarray(bins[i], edges_dtype) + nbin[i] = len(edges[i]) + 1 # +1 for outlier bins + dedges[i] = np.diff(edges[i]) + + nbin = np.asarray(nbin) + + return nbin, edges, dedges + + +def _bin_numbers(sample, nbin, edges, dedges): + """Compute the bin number each sample falls into, in each dimension + """ + Dlen, Ndim = sample.shape + + sampBin = [ + np.digitize(sample[:, i], edges[i]) + for i in range(Ndim) + ] + + # Using `digitize`, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right + # edge to be counted in the last bin, and not as an outlier. + for i in range(Ndim): + # Find the rounding precision + dedges_min = dedges[i].min() + if dedges_min == 0: + raise ValueError('The smallest edge difference is numerically 0.') + decimal = int(-np.log10(dedges_min)) + 6 + # Find which points are on the rightmost edge. + on_edge = np.where((sample[:, i] >= edges[i][-1]) & + (np.around(sample[:, i], decimal) == + np.around(edges[i][-1], decimal)))[0] + # Shift these points one bin to the left. + sampBin[i][on_edge] -= 1 + + # Compute the sample indices in the flattened statistic matrix. + binnumbers = np.ravel_multi_index(sampBin, nbin) + + return binnumbers diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_binomtest.py b/venv/lib/python3.10/site-packages/scipy/stats/_binomtest.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf21117383374e730ab052fcbb0b5b7fca029c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_binomtest.py @@ -0,0 +1,375 @@ +from math import sqrt +import numpy as np +from scipy._lib._util import _validate_int +from scipy.optimize import brentq +from scipy.special import ndtri +from ._discrete_distns import binom +from ._common import ConfidenceInterval + + +class BinomTestResult: + """ + Result of `scipy.stats.binomtest`. + + Attributes + ---------- + k : int + The number of successes (copied from `binomtest` input). + n : int + The number of trials (copied from `binomtest` input). + alternative : str + Indicates the alternative hypothesis specified in the input + to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, + or ``'less'``. + statistic: float + The estimate of the proportion of successes. + pvalue : float + The p-value of the hypothesis test. + + """ + def __init__(self, k, n, alternative, statistic, pvalue): + self.k = k + self.n = n + self.alternative = alternative + self.statistic = statistic + self.pvalue = pvalue + + # add alias for backward compatibility + self.proportion_estimate = statistic + + def __repr__(self): + s = ("BinomTestResult(" + f"k={self.k}, " + f"n={self.n}, " + f"alternative={self.alternative!r}, " + f"statistic={self.statistic}, " + f"pvalue={self.pvalue})") + return s + + def proportion_ci(self, confidence_level=0.95, method='exact'): + """ + Compute the confidence interval for ``statistic``. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval + of the estimated proportion. Default is 0.95. + method : {'exact', 'wilson', 'wilsoncc'}, optional + Selects the method used to compute the confidence interval + for the estimate of the proportion: + + 'exact' : + Use the Clopper-Pearson exact method [1]_. + 'wilson' : + Wilson's method, without continuity correction ([2]_, [3]_). + 'wilsoncc' : + Wilson's method, with continuity correction ([2]_, [3]_). + + Default is ``'exact'``. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence interval. + + References + ---------- + .. [1] C. J. Clopper and E. S. Pearson, The use of confidence or + fiducial limits illustrated in the case of the binomial, + Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934). + .. [2] E. B. Wilson, Probable inference, the law of succession, and + statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212 + (1927). + .. [3] Robert G. Newcombe, Two-sided confidence intervals for the + single proportion: comparison of seven methods, Statistics + in Medicine, 17, pp 857-872 (1998). + + Examples + -------- + >>> from scipy.stats import binomtest + >>> result = binomtest(k=7, n=50, p=0.1) + >>> result.statistic + 0.14 + >>> result.proportion_ci() + ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846) + """ + if method not in ('exact', 'wilson', 'wilsoncc'): + raise ValueError(f"method ('{method}') must be one of 'exact', " + "'wilson' or 'wilsoncc'.") + if not (0 <= confidence_level <= 1): + raise ValueError(f'confidence_level ({confidence_level}) must be in ' + 'the interval [0, 1].') + if method == 'exact': + low, high = _binom_exact_conf_int(self.k, self.n, + confidence_level, + self.alternative) + else: + # method is 'wilson' or 'wilsoncc' + low, high = _binom_wilson_conf_int(self.k, self.n, + confidence_level, + self.alternative, + correction=method == 'wilsoncc') + return ConfidenceInterval(low=low, high=high) + + +def _findp(func): + try: + p = brentq(func, 0, 1) + except RuntimeError: + raise RuntimeError('numerical solver failed to converge when ' + 'computing the confidence limits') from None + except ValueError as exc: + raise ValueError('brentq raised a ValueError; report this to the ' + 'SciPy developers') from exc + return p + + +def _binom_exact_conf_int(k, n, confidence_level, alternative): + """ + Compute the estimate and confidence interval for the binomial test. + + Returns proportion, prop_low, prop_high + """ + if alternative == 'two-sided': + alpha = (1 - confidence_level) / 2 + if k == 0: + plow = 0.0 + else: + plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha) + if k == n: + phigh = 1.0 + else: + phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha) + elif alternative == 'less': + alpha = 1 - confidence_level + plow = 0.0 + if k == n: + phigh = 1.0 + else: + phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha) + elif alternative == 'greater': + alpha = 1 - confidence_level + if k == 0: + plow = 0.0 + else: + plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha) + phigh = 1.0 + return plow, phigh + + +def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction): + # This function assumes that the arguments have already been validated. + # In particular, `alternative` must be one of 'two-sided', 'less' or + # 'greater'. + p = k / n + if alternative == 'two-sided': + z = ndtri(0.5 + 0.5*confidence_level) + else: + z = ndtri(confidence_level) + + # For reference, the formulas implemented here are from + # Newcombe (1998) (ref. [3] in the proportion_ci docstring). + denom = 2*(n + z**2) + center = (2*n*p + z**2)/denom + q = 1 - p + if correction: + if alternative == 'less' or k == 0: + lo = 0.0 + else: + dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom + lo = center - dlo + if alternative == 'greater' or k == n: + hi = 1.0 + else: + dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom + hi = center + dhi + else: + delta = z/denom * sqrt(4*n*p*q + z**2) + if alternative == 'less' or k == 0: + lo = 0.0 + else: + lo = center - delta + if alternative == 'greater' or k == n: + hi = 1.0 + else: + hi = center + delta + + return lo, hi + + +def binomtest(k, n, p=0.5, alternative='two-sided'): + """ + Perform a test that the probability of success is p. + + The binomial test [1]_ is a test of the null hypothesis that the + probability of success in a Bernoulli experiment is `p`. + + Details of the test can be found in many texts on statistics, such + as section 24.5 of [2]_. + + Parameters + ---------- + k : int + The number of successes. + n : int + The number of trials. + p : float, optional + The hypothesized probability of success, i.e. the expected + proportion of successes. The value must be in the interval + ``0 <= p <= 1``. The default value is ``p = 0.5``. + alternative : {'two-sided', 'greater', 'less'}, optional + Indicates the alternative hypothesis. The default value is + 'two-sided'. + + Returns + ------- + result : `~scipy.stats._result_classes.BinomTestResult` instance + The return value is an object with the following attributes: + + k : int + The number of successes (copied from `binomtest` input). + n : int + The number of trials (copied from `binomtest` input). + alternative : str + Indicates the alternative hypothesis specified in the input + to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, + or ``'less'``. + statistic : float + The estimate of the proportion of successes. + pvalue : float + The p-value of the hypothesis test. + + The object has the following methods: + + proportion_ci(confidence_level=0.95, method='exact') : + Compute the confidence interval for ``statistic``. + + Notes + ----- + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test + .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition), + Prentice Hall, Upper Saddle River, New Jersey USA (2010) + + Examples + -------- + >>> from scipy.stats import binomtest + + A car manufacturer claims that no more than 10% of their cars are unsafe. + 15 cars are inspected for safety, 3 were found to be unsafe. Test the + manufacturer's claim: + + >>> result = binomtest(3, n=15, p=0.1, alternative='greater') + >>> result.pvalue + 0.18406106910639114 + + The null hypothesis cannot be rejected at the 5% level of significance + because the returned p-value is greater than the critical value of 5%. + + The test statistic is equal to the estimated proportion, which is simply + ``3/15``: + + >>> result.statistic + 0.2 + + We can use the `proportion_ci()` method of the result to compute the + confidence interval of the estimate: + + >>> result.proportion_ci(confidence_level=0.95) + ConfidenceInterval(low=0.05684686759024681, high=1.0) + + """ + k = _validate_int(k, 'k', minimum=0) + n = _validate_int(n, 'n', minimum=1) + if k > n: + raise ValueError(f'k ({k}) must not be greater than n ({n}).') + + if not (0 <= p <= 1): + raise ValueError(f"p ({p}) must be in range [0,1]") + + if alternative not in ('two-sided', 'less', 'greater'): + raise ValueError(f"alternative ('{alternative}') not recognized; \n" + "must be 'two-sided', 'less' or 'greater'") + if alternative == 'less': + pval = binom.cdf(k, n, p) + elif alternative == 'greater': + pval = binom.sf(k-1, n, p) + else: + # alternative is 'two-sided' + d = binom.pmf(k, n, p) + rerr = 1 + 1e-7 + if k == p * n: + # special case as shortcut, would also be handled by `else` below + pval = 1. + elif k < p * n: + ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p), + -d*rerr, np.ceil(p * n), n) + # y is the number of terms between mode and n that are <= d*rerr. + # ix gave us the first term where a(ix) <= d*rerr < a(ix-1) + # if the first equality doesn't hold, y=n-ix. Otherwise, we + # need to include ix as well as the equality holds. Note that + # the equality will hold in very very rare situations due to rerr. + y = n - ix + int(d*rerr == binom.pmf(ix, n, p)) + pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p) + else: + ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p), + d*rerr, 0, np.floor(p * n)) + # y is the number of terms between 0 and mode that are <= d*rerr. + # we need to add a 1 to account for the 0 index. + # For comparing this with old behavior, see + # tst_binary_srch_for_binom_tst method in test_morestats. + y = ix + 1 + pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p) + + pval = min(1.0, pval) + + result = BinomTestResult(k=k, n=n, alternative=alternative, + statistic=k/n, pvalue=pval) + return result + + +def _binary_search_for_binom_tst(a, d, lo, hi): + """ + Conducts an implicit binary search on a function specified by `a`. + + Meant to be used on the binomial PMF for the case of two-sided tests + to obtain the value on the other side of the mode where the tail + probability should be computed. The values on either side of + the mode are always in order, meaning binary search is applicable. + + Parameters + ---------- + a : callable + The function over which to perform binary search. Its values + for inputs lo and hi should be in ascending order. + d : float + The value to search. + lo : int + The lower end of range to search. + hi : int + The higher end of the range to search. + + Returns + ------- + int + The index, i between lo and hi + such that a(i)<=d d: + hi = mid-1 + else: + return mid + if a(lo) <= d: + return lo + else: + return lo-1 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py b/venv/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb1e85be30c7e4ebc67113ede53e687f48d955d --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py @@ -0,0 +1,4098 @@ +# +# Author: Travis Oliphant 2002-2011 with contributions from +# SciPy Developers 2004-2011 +# +from scipy._lib._util import getfullargspec_no_self as _getfullargspec + +import sys +import keyword +import re +import types +import warnings +from itertools import zip_longest + +from scipy._lib import doccer +from ._distr_params import distcont, distdiscrete +from scipy._lib._util import check_random_state + +from scipy.special import comb, entr + + +# for root finding for continuous distribution ppf, and maximum likelihood +# estimation +from scipy import optimize + +# for functions of continuous distributions (e.g. moments, entropy, cdf) +from scipy import integrate + +# to approximate the pdf of a continuous distribution given its cdf +from scipy._lib._finite_differences import _derivative + +# for scipy.stats.entropy. Attempts to import just that function or file +# have cause import problems +from scipy import stats + +from numpy import (arange, putmask, ones, shape, ndarray, zeros, floor, + logical_and, log, sqrt, place, argmax, vectorize, asarray, + nan, inf, isinf, empty) + +import numpy as np +from ._constants import _XMAX, _LOGXMAX +from ._censored_data import CensoredData +from scipy.stats._warnings_errors import FitError + +# These are the docstring parts used for substitution in specific +# distribution docstrings + +docheaders = {'methods': """\nMethods\n-------\n""", + 'notes': """\nNotes\n-----\n""", + 'examples': """\nExamples\n--------\n"""} + +_doc_rvs = """\ +rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None) + Random variates. +""" +_doc_pdf = """\ +pdf(x, %(shapes)s, loc=0, scale=1) + Probability density function. +""" +_doc_logpdf = """\ +logpdf(x, %(shapes)s, loc=0, scale=1) + Log of the probability density function. +""" +_doc_pmf = """\ +pmf(k, %(shapes)s, loc=0, scale=1) + Probability mass function. +""" +_doc_logpmf = """\ +logpmf(k, %(shapes)s, loc=0, scale=1) + Log of the probability mass function. +""" +_doc_cdf = """\ +cdf(x, %(shapes)s, loc=0, scale=1) + Cumulative distribution function. +""" +_doc_logcdf = """\ +logcdf(x, %(shapes)s, loc=0, scale=1) + Log of the cumulative distribution function. +""" +_doc_sf = """\ +sf(x, %(shapes)s, loc=0, scale=1) + Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). +""" # noqa: E501 +_doc_logsf = """\ +logsf(x, %(shapes)s, loc=0, scale=1) + Log of the survival function. +""" +_doc_ppf = """\ +ppf(q, %(shapes)s, loc=0, scale=1) + Percent point function (inverse of ``cdf`` --- percentiles). +""" +_doc_isf = """\ +isf(q, %(shapes)s, loc=0, scale=1) + Inverse survival function (inverse of ``sf``). +""" +_doc_moment = """\ +moment(order, %(shapes)s, loc=0, scale=1) + Non-central moment of the specified order. +""" +_doc_stats = """\ +stats(%(shapes)s, loc=0, scale=1, moments='mv') + Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). +""" +_doc_entropy = """\ +entropy(%(shapes)s, loc=0, scale=1) + (Differential) entropy of the RV. +""" +_doc_fit = """\ +fit(data) + Parameter estimates for generic data. + See `scipy.stats.rv_continuous.fit `__ for detailed documentation of the + keyword arguments. +""" # noqa: E501 +_doc_expect = """\ +expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) + Expected value of a function (of one argument) with respect to the distribution. +""" # noqa: E501 +_doc_expect_discrete = """\ +expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False) + Expected value of a function (of one argument) with respect to the distribution. +""" +_doc_median = """\ +median(%(shapes)s, loc=0, scale=1) + Median of the distribution. +""" +_doc_mean = """\ +mean(%(shapes)s, loc=0, scale=1) + Mean of the distribution. +""" +_doc_var = """\ +var(%(shapes)s, loc=0, scale=1) + Variance of the distribution. +""" +_doc_std = """\ +std(%(shapes)s, loc=0, scale=1) + Standard deviation of the distribution. +""" +_doc_interval = """\ +interval(confidence, %(shapes)s, loc=0, scale=1) + Confidence interval with equal areas around the median. +""" +_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, + _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, + _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, + _doc_stats, _doc_entropy, _doc_fit, + _doc_expect, _doc_median, + _doc_mean, _doc_var, _doc_std, _doc_interval]) + +_doc_default_longsummary = """\ +As an instance of the `rv_continuous` class, `%(name)s` object inherits from it +a collection of generic methods (see below for the full list), +and completes them with details specific for this particular distribution. +""" + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape, +location, and scale parameters returning a "frozen" continuous RV object: + +rv = %(name)s(%(shapes)s, loc=0, scale=1) + - Frozen RV object with the same methods but holding the given shape, + location, and scale fixed. +""" +_doc_default_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability density function (``pdf``): + +>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s), 100) +>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), +... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') + +Alternatively, the distribution object can be called (as a function) +to fix the shape, location and scale parameters. This returns a "frozen" +RV object holding the given parameters fixed. + +Freeze the distribution and display the frozen ``pdf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') + +Check accuracy of ``cdf`` and ``ppf``: + +>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) +>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) + +And compare the histogram: + +>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2) +>>> ax.set_xlim([x[0], x[-1]]) +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +""" + +_doc_default_locscale = """\ +The probability density above is defined in the "standardized" form. To shift +and/or scale the distribution use the ``loc`` and ``scale`` parameters. +Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically +equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with +``y = (x - loc) / scale``. Note that shifting the location of a distribution +does not make it a "noncentral" distribution; noncentral generalizations of +some distributions are available in separate classes. +""" + +_doc_default = ''.join([_doc_default_longsummary, + _doc_allmethods, + '\n', + _doc_default_example]) + +_doc_default_before_notes = ''.join([_doc_default_longsummary, + _doc_allmethods]) + +docdict = { + 'rvs': _doc_rvs, + 'pdf': _doc_pdf, + 'logpdf': _doc_logpdf, + 'cdf': _doc_cdf, + 'logcdf': _doc_logcdf, + 'sf': _doc_sf, + 'logsf': _doc_logsf, + 'ppf': _doc_ppf, + 'isf': _doc_isf, + 'stats': _doc_stats, + 'entropy': _doc_entropy, + 'fit': _doc_fit, + 'moment': _doc_moment, + 'expect': _doc_expect, + 'interval': _doc_interval, + 'mean': _doc_mean, + 'std': _doc_std, + 'var': _doc_var, + 'median': _doc_median, + 'allmethods': _doc_allmethods, + 'longsummary': _doc_default_longsummary, + 'frozennote': _doc_default_frozen_note, + 'example': _doc_default_example, + 'default': _doc_default, + 'before_notes': _doc_default_before_notes, + 'after_notes': _doc_default_locscale +} + +# Reuse common content between continuous and discrete docs, change some +# minor bits. +docdict_discrete = docdict.copy() + +docdict_discrete['pmf'] = _doc_pmf +docdict_discrete['logpmf'] = _doc_logpmf +docdict_discrete['expect'] = _doc_expect_discrete +_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', + 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', + 'mean', 'var', 'std', 'interval'] +for obj in _doc_disc_methods: + docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') + +_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] +for obj in _doc_disc_methods_err_varname: + docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') + +docdict_discrete.pop('pdf') +docdict_discrete.pop('logpdf') + +_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) +docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods + +docdict_discrete['longsummary'] = _doc_default_longsummary.replace( + 'rv_continuous', 'rv_discrete') + +_doc_default_frozen_note = """ +Alternatively, the object may be called (as a function) to fix the shape and +location parameters returning a "frozen" discrete RV object: + +rv = %(name)s(%(shapes)s, loc=0) + - Frozen RV object with the same methods but holding the given shape and + location fixed. +""" +docdict_discrete['frozennote'] = _doc_default_frozen_note + +_doc_default_discrete_example = """\ +Examples +-------- +>>> import numpy as np +>>> from scipy.stats import %(name)s +>>> import matplotlib.pyplot as plt +>>> fig, ax = plt.subplots(1, 1) + +Calculate the first four moments: + +%(set_vals_stmt)s +>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') + +Display the probability mass function (``pmf``): + +>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), +... %(name)s.ppf(0.99, %(shapes)s)) +>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') +>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) + +Alternatively, the distribution object can be called (as a function) +to fix the shape and location. This returns a "frozen" RV object holding +the given parameters fixed. + +Freeze the distribution and display the frozen ``pmf``: + +>>> rv = %(name)s(%(shapes)s) +>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, +... label='frozen pmf') +>>> ax.legend(loc='best', frameon=False) +>>> plt.show() + +Check accuracy of ``cdf`` and ``ppf``: + +>>> prob = %(name)s.cdf(x, %(shapes)s) +>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) +True + +Generate random numbers: + +>>> r = %(name)s.rvs(%(shapes)s, size=1000) +""" + + +_doc_default_discrete_locscale = """\ +The probability mass function above is defined in the "standardized" form. +To shift distribution use the ``loc`` parameter. +Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically +equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. +""" + +docdict_discrete['example'] = _doc_default_discrete_example +docdict_discrete['after_notes'] = _doc_default_discrete_locscale + +_doc_default_before_notes = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods']]) +docdict_discrete['before_notes'] = _doc_default_before_notes + +_doc_default_disc = ''.join([docdict_discrete['longsummary'], + docdict_discrete['allmethods'], + docdict_discrete['frozennote'], + docdict_discrete['example']]) +docdict_discrete['default'] = _doc_default_disc + +# clean up all the separate docstring elements, we do not need them anymore +for obj in [s for s in dir() if s.startswith('_doc_')]: + exec('del ' + obj) +del obj + + +def _moment(data, n, mu=None): + if mu is None: + mu = data.mean() + return ((data - mu)**n).mean() + + +def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): + if (n == 0): + return 1.0 + elif (n == 1): + if mu is None: + val = moment_func(1, *args) + else: + val = mu + elif (n == 2): + if mu2 is None or mu is None: + val = moment_func(2, *args) + else: + val = mu2 + mu*mu + elif (n == 3): + if g1 is None or mu2 is None or mu is None: + val = moment_func(3, *args) + else: + mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment + val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment + elif (n == 4): + if g1 is None or g2 is None or mu2 is None or mu is None: + val = moment_func(4, *args) + else: + mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment + mu3 = g1*np.power(mu2, 1.5) # 3rd central moment + val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu + else: + val = moment_func(n, *args) + + return val + + +def _skew(data): + """ + skew is third central moment / variance**(1.5) + """ + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m3 = ((data - mu)**3).mean() + return m3 / np.power(m2, 1.5) + + +def _kurtosis(data): + """kurtosis is fourth central moment / variance**2 - 3.""" + data = np.ravel(data) + mu = data.mean() + m2 = ((data - mu)**2).mean() + m4 = ((data - mu)**4).mean() + return m4 / m2**2 - 3 + + +def _fit_determine_optimizer(optimizer): + if not callable(optimizer) and isinstance(optimizer, str): + if not optimizer.startswith('fmin_'): + optimizer = "fmin_"+optimizer + if optimizer == 'fmin_': + optimizer = 'fmin' + try: + optimizer = getattr(optimize, optimizer) + except AttributeError as e: + raise ValueError("%s is not a valid optimizer" % optimizer) from e + return optimizer + + +def _sum_finite(x): + """ + For a 1D array x, return a tuple containing the sum of the + finite values of x and the number of nonfinite values. + + This is a utility function used when evaluating the negative + loglikelihood for a distribution and an array of samples. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import _sum_finite + >>> tot, nbad = _sum_finite(np.array([-2, -np.inf, 5, 1])) + >>> tot + 4.0 + >>> nbad + 1 + """ + finite_x = np.isfinite(x) + bad_count = finite_x.size - np.count_nonzero(finite_x) + return np.sum(x[finite_x]), bad_count + + +# Frozen RV class +class rv_frozen: + + def __init__(self, dist, *args, **kwds): + self.args = args + self.kwds = kwds + + # create a new instance + self.dist = dist.__class__(**dist._updated_ctor_param()) + + shapes, _, _ = self.dist._parse_args(*args, **kwds) + self.a, self.b = self.dist._get_support(*shapes) + + @property + def random_state(self): + return self.dist._random_state + + @random_state.setter + def random_state(self, seed): + self.dist._random_state = check_random_state(seed) + + def cdf(self, x): + return self.dist.cdf(x, *self.args, **self.kwds) + + def logcdf(self, x): + return self.dist.logcdf(x, *self.args, **self.kwds) + + def ppf(self, q): + return self.dist.ppf(q, *self.args, **self.kwds) + + def isf(self, q): + return self.dist.isf(q, *self.args, **self.kwds) + + def rvs(self, size=None, random_state=None): + kwds = self.kwds.copy() + kwds.update({'size': size, 'random_state': random_state}) + return self.dist.rvs(*self.args, **kwds) + + def sf(self, x): + return self.dist.sf(x, *self.args, **self.kwds) + + def logsf(self, x): + return self.dist.logsf(x, *self.args, **self.kwds) + + def stats(self, moments='mv'): + kwds = self.kwds.copy() + kwds.update({'moments': moments}) + return self.dist.stats(*self.args, **kwds) + + def median(self): + return self.dist.median(*self.args, **self.kwds) + + def mean(self): + return self.dist.mean(*self.args, **self.kwds) + + def var(self): + return self.dist.var(*self.args, **self.kwds) + + def std(self): + return self.dist.std(*self.args, **self.kwds) + + def moment(self, order=None): + return self.dist.moment(order, *self.args, **self.kwds) + + def entropy(self): + return self.dist.entropy(*self.args, **self.kwds) + + def interval(self, confidence=None): + return self.dist.interval(confidence, *self.args, **self.kwds) + + def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): + # expect method only accepts shape parameters as positional args + # hence convert self.args, self.kwds, also loc/scale + # See the .expect method docstrings for the meaning of + # other parameters. + a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) + if isinstance(self.dist, rv_discrete): + return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) + else: + return self.dist.expect(func, a, loc, scale, lb, ub, + conditional, **kwds) + + def support(self): + return self.dist.support(*self.args, **self.kwds) + + +class rv_discrete_frozen(rv_frozen): + + def pmf(self, k): + return self.dist.pmf(k, *self.args, **self.kwds) + + def logpmf(self, k): # No error + return self.dist.logpmf(k, *self.args, **self.kwds) + + +class rv_continuous_frozen(rv_frozen): + + def pdf(self, x): + return self.dist.pdf(x, *self.args, **self.kwds) + + def logpdf(self, x): + return self.dist.logpdf(x, *self.args, **self.kwds) + + +def argsreduce(cond, *args): + """Clean arguments to: + + 1. Ensure all arguments are iterable (arrays of dimension at least one + 2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is + True, in 1D. + + Return list of processed arguments. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import argsreduce + >>> rng = np.random.default_rng() + >>> A = rng.random((4, 5)) + >>> B = 2 + >>> C = rng.random((1, 5)) + >>> cond = np.ones(A.shape) + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (4, 5) + >>> B1.shape + (1,) + >>> C1.shape + (1, 5) + >>> cond[2,:] = 0 + >>> [A1, B1, C1] = argsreduce(cond, A, B, C) + >>> A1.shape + (15,) + >>> B1.shape + (1,) + >>> C1.shape + (15,) + + """ + # some distributions assume arguments are iterable. + newargs = np.atleast_1d(*args) + + # np.atleast_1d returns an array if only one argument, or a list of arrays + # if more than one argument. + if not isinstance(newargs, (list, tuple)): + newargs = (newargs,) + + if np.all(cond): + # broadcast arrays with cond + *newargs, cond = np.broadcast_arrays(*newargs, cond) + return [arg.ravel() for arg in newargs] + + s = cond.shape + # np.extract returns flattened arrays, which are not broadcastable together + # unless they are either the same size or size == 1. + return [(arg if np.size(arg) == 1 + else np.extract(cond, np.broadcast_to(arg, s))) + for arg in newargs] + + +parse_arg_template = """ +def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): + return (%(shape_arg_str)s), %(locscale_out)s + +def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): + return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) + +def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): + return (%(shape_arg_str)s), %(locscale_out)s, moments +""" + + +class rv_generic: + """Class which encapsulates common functionality between rv_discrete + and rv_continuous. + + """ + + def __init__(self, seed=None): + super().__init__() + + # figure out if _stats signature has 'moments' keyword + sig = _getfullargspec(self._stats) + self._stats_has_moments = ((sig.varkw is not None) or + ('moments' in sig.args) or + ('moments' in sig.kwonlyargs)) + self._random_state = check_random_state(seed) + + @property + def random_state(self): + """Get or set the generator object for generating random variates. + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + """ + return self._random_state + + @random_state.setter + def random_state(self, seed): + self._random_state = check_random_state(seed) + + def __setstate__(self, state): + try: + self.__dict__.update(state) + # attaches the dynamically created methods on each instance. + # if a subclass overrides rv_generic.__setstate__, or implements + # it's own _attach_methods, then it must make sure that + # _attach_argparser_methods is called. + self._attach_methods() + except ValueError: + # reconstitute an old pickle scipy<1.6, that contains + # (_ctor_param, random_state) as state + self._ctor_param = state[0] + self._random_state = state[1] + self.__init__() + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_* instance. + + This method must be overridden by subclasses, and must itself call + _attach_argparser_methods. This method is called in __init__ in + subclasses, and in __setstate__ + """ + raise NotImplementedError + + def _attach_argparser_methods(self): + """ + Generates the argument-parsing functions dynamically and attaches + them to the instance. + + Should be called from `_attach_methods`, typically in __init__ and + during unpickling (__setstate__) + """ + ns = {} + exec(self._parse_arg_template, ns) + # NB: attach to the instance, not class + for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: + setattr(self, name, types.MethodType(ns[name], self)) + + def _construct_argparser( + self, meths_to_inspect, locscale_in, locscale_out): + """Construct the parser string for the shape arguments. + + This method should be called in __init__ of a class for each + distribution. It creates the `_parse_arg_template` attribute that is + then used by `_attach_argparser_methods` to dynamically create and + attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs` + methods to the instance. + + If self.shapes is a non-empty string, interprets it as a + comma-separated list of shape parameters. + + Otherwise inspects the call signatures of `meths_to_inspect` + and constructs the argument-parsing functions from these. + In this case also sets `shapes` and `numargs`. + """ + + if self.shapes: + # sanitize the user-supplied shapes + if not isinstance(self.shapes, str): + raise TypeError('shapes must be a string.') + + shapes = self.shapes.replace(',', ' ').split() + + for field in shapes: + if keyword.iskeyword(field): + raise SyntaxError('keywords cannot be used as shapes.') + if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): + raise SyntaxError( + 'shapes must be valid python identifiers') + else: + # find out the call signatures (_pdf, _cdf etc), deduce shape + # arguments. Generic methods only have 'self, x', any further args + # are shapes. + shapes_list = [] + for meth in meths_to_inspect: + shapes_args = _getfullargspec(meth) # NB does not contain self + args = shapes_args.args[1:] # peel off 'x', too + + if args: + shapes_list.append(args) + + # *args or **kwargs are not allowed w/automatic shapes + if shapes_args.varargs is not None: + raise TypeError( + '*args are not allowed w/out explicit shapes') + if shapes_args.varkw is not None: + raise TypeError( + '**kwds are not allowed w/out explicit shapes') + if shapes_args.kwonlyargs: + raise TypeError( + 'kwonly args are not allowed w/out explicit shapes') + if shapes_args.defaults is not None: + raise TypeError('defaults are not allowed for shapes') + + if shapes_list: + shapes = shapes_list[0] + + # make sure the signatures are consistent + for item in shapes_list: + if item != shapes: + raise TypeError('Shape arguments are inconsistent.') + else: + shapes = [] + + # have the arguments, construct the method from template + shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None + dct = dict(shape_arg_str=shapes_str, + locscale_in=locscale_in, + locscale_out=locscale_out, + ) + + # this string is used by _attach_argparser_methods + self._parse_arg_template = parse_arg_template % dct + + self.shapes = ', '.join(shapes) if shapes else None + if not hasattr(self, 'numargs'): + # allows more general subclassing with *args + self.numargs = len(shapes) + + def _construct_doc(self, docdict, shapes_vals=None): + """Construct the instance docstring with string substitutions.""" + tempdict = docdict.copy() + tempdict['name'] = self.name or 'distname' + tempdict['shapes'] = self.shapes or '' + + if shapes_vals is None: + shapes_vals = () + vals = ', '.join('%.3g' % val for val in shapes_vals) + tempdict['vals'] = vals + + tempdict['shapes_'] = self.shapes or '' + if self.shapes and self.numargs == 1: + tempdict['shapes_'] += ',' + + if self.shapes: + tempdict['set_vals_stmt'] = f'>>> {self.shapes} = {vals}' + else: + tempdict['set_vals_stmt'] = '' + + if self.shapes is None: + # remove shapes from call parameters if there are none + for item in ['default', 'before_notes']: + tempdict[item] = tempdict[item].replace( + "\n%(shapes)s : array_like\n shape parameters", "") + for i in range(2): + if self.shapes is None: + # necessary because we use %(shapes)s in two forms (w w/o ", ") + self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") + try: + self.__doc__ = doccer.docformat(self.__doc__, tempdict) + except TypeError as e: + raise Exception("Unable to construct docstring for " + f"distribution \"{self.name}\": {repr(e)}") from e + + # correct for empty shapes + self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') + + def _construct_default_doc(self, longname=None, + docdict=None, discrete='continuous'): + """Construct instance docstring from the default template.""" + if longname is None: + longname = 'A' + self.__doc__ = ''.join([f'{longname} {discrete} random variable.', + '\n\n%(before_notes)s\n', docheaders['notes'], + '\n%(example)s']) + self._construct_doc(docdict) + + def freeze(self, *args, **kwds): + """Freeze the distribution for the given arguments. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution. Should include all + the non-optional arguments, may include ``loc`` and ``scale``. + + Returns + ------- + rv_frozen : rv_frozen instance + The frozen distribution. + + """ + if isinstance(self, rv_continuous): + return rv_continuous_frozen(self, *args, **kwds) + else: + return rv_discrete_frozen(self, *args, **kwds) + + def __call__(self, *args, **kwds): + return self.freeze(*args, **kwds) + __call__.__doc__ = freeze.__doc__ + + # The actual calculation functions (no basic checking need be done) + # If these are defined, the others won't be looked at. + # Otherwise, the other set can be defined. + def _stats(self, *args, **kwds): + return None, None, None, None + + # Noncentral moments (also known as the moment about the origin). + # Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime". + # The primed mu is a widely used notation for the noncentral moment. + def _munp(self, n, *args): + # Silence floating point warnings from integration. + with np.errstate(all='ignore'): + vals = self.generic_moment(n, *args) + return vals + + def _argcheck_rvs(self, *args, **kwargs): + # Handle broadcasting and size validation of the rvs method. + # Subclasses should not have to override this method. + # The rule is that if `size` is not None, then `size` gives the + # shape of the result (integer values of `size` are treated as + # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) + # + # `args` is expected to contain the shape parameters (if any), the + # location and the scale in a flat tuple (e.g. if there are two + # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). + # The only keyword argument expected is 'size'. + size = kwargs.get('size', None) + all_bcast = np.broadcast_arrays(*args) + + def squeeze_left(a): + while a.ndim > 0 and a.shape[0] == 1: + a = a[0] + return a + + # Eliminate trivial leading dimensions. In the convention + # used by numpy's random variate generators, trivial leading + # dimensions are effectively ignored. In other words, when `size` + # is given, trivial leading dimensions of the broadcast parameters + # in excess of the number of dimensions in size are ignored, e.g. + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) + # array([ 1.00104267, 3.00422496, 4.99799278]) + # If `size` is not given, the exact broadcast shape is preserved: + # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) + # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) + # + all_bcast = [squeeze_left(a) for a in all_bcast] + bcast_shape = all_bcast[0].shape + bcast_ndim = all_bcast[0].ndim + + if size is None: + size_ = bcast_shape + else: + size_ = tuple(np.atleast_1d(size)) + + # Check compatibility of size_ with the broadcast shape of all + # the parameters. This check is intended to be consistent with + # how the numpy random variate generators (e.g. np.random.normal, + # np.random.beta) handle their arguments. The rule is that, if size + # is given, it determines the shape of the output. Broadcasting + # can't change the output size. + + # This is the standard broadcasting convention of extending the + # shape with fewer dimensions with enough dimensions of length 1 + # so that the two shapes have the same number of dimensions. + ndiff = bcast_ndim - len(size_) + if ndiff < 0: + bcast_shape = (1,)*(-ndiff) + bcast_shape + elif ndiff > 0: + size_ = (1,)*ndiff + size_ + + # This compatibility test is not standard. In "regular" broadcasting, + # two shapes are compatible if for each dimension, the lengths are the + # same or one of the lengths is 1. Here, the length of a dimension in + # size_ must not be less than the corresponding length in bcast_shape. + ok = all([bcdim == 1 or bcdim == szdim + for (bcdim, szdim) in zip(bcast_shape, size_)]) + if not ok: + raise ValueError("size does not match the broadcast shape of " + f"the parameters. {size}, {size_}, {bcast_shape}") + + param_bcast = all_bcast[:-2] + loc_bcast = all_bcast[-2] + scale_bcast = all_bcast[-1] + + return param_bcast, loc_bcast, scale_bcast, size_ + + # These are the methods you must define (standard form functions) + # NB: generic _pdf, _logpdf, _cdf are different for + # rv_continuous and rv_discrete hence are defined in there + def _argcheck(self, *args): + """Default check for correct values on args and keywords. + + Returns condition array of 1's where arguments are correct and + 0's where they are not. + + """ + cond = 1 + for arg in args: + cond = logical_and(cond, (asarray(arg) > 0)) + return cond + + def _get_support(self, *args, **kwargs): + """Return the support of the (unscaled, unshifted) distribution. + + *Must* be overridden by distributions which have support dependent + upon the shape parameters of the distribution. Any such override + *must not* set or change any of the class members, as these members + are shared amongst all instances of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support for the specified + shape parameters. + """ + return self.a, self.b + + def _support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a <= x) & (x <= b) + + def _open_support_mask(self, x, *args): + a, b = self._get_support(*args) + with np.errstate(invalid='ignore'): + return (a < x) & (x < b) + + def _rvs(self, *args, size=None, random_state=None): + # This method must handle size being a tuple, and it must + # properly broadcast *args and size. size might be + # an empty tuple, which means a scalar random variate is to be + # generated. + + # Use basic inverse cdf algorithm for RV generation as default. + U = random_state.uniform(size=size) + Y = self._ppf(U, *args) + return Y + + def _logcdf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._cdf(x, *args)) + + def _sf(self, x, *args): + return 1.0-self._cdf(x, *args) + + def _logsf(self, x, *args): + with np.errstate(divide='ignore'): + return log(self._sf(x, *args)) + + def _ppf(self, q, *args): + return self._ppfvec(q, *args) + + def _isf(self, q, *args): + return self._ppf(1.0-q, *args) # use correct _ppf for subclasses + + # These are actually called, and should not be overwritten if you + # want to keep error checking. + def rvs(self, *args, **kwds): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional + Scale parameter (default=1). + size : int or tuple of ints, optional + Defining number of random variates (default is 1). + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + discrete = kwds.pop('discrete', None) + rndm = kwds.pop('random_state', None) + args, loc, scale, size = self._parse_args_rvs(*args, **kwds) + cond = logical_and(self._argcheck(*args), (scale >= 0)) + if not np.all(cond): + message = ("Domain error in arguments. The `scale` parameter must " + "be positive for all distributions, and many " + "distributions have restrictions on shape parameters. " + f"Please see the `scipy.stats.{self.name}` " + "documentation for details.") + raise ValueError(message) + + if np.all(scale == 0): + return loc*ones(size, 'd') + + # extra gymnastics needed for a custom random_state + if rndm is not None: + random_state_saved = self._random_state + random_state = check_random_state(rndm) + else: + random_state = self._random_state + + vals = self._rvs(*args, size=size, random_state=random_state) + + vals = vals * scale + loc + + # do not forget to restore the _random_state + if rndm is not None: + self._random_state = random_state_saved + + # Cast to int if discrete + if discrete and not isinstance(self, rv_sample): + if size == (): + vals = int(vals) + else: + vals = vals.astype(np.int64) + + return vals + + def stats(self, *args, **kwds): + """Some statistics of the given RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional (continuous RVs only) + scale parameter (default=1) + moments : str, optional + composed of letters ['mvsk'] defining which moments to compute: + 'm' = mean, + 'v' = variance, + 's' = (Fisher's) skew, + 'k' = (Fisher's) kurtosis. + (default is 'mv') + + Returns + ------- + stats : sequence + of requested moments. + + """ + args, loc, scale, moments = self._parse_args_stats(*args, **kwds) + # scale = 1 by construction for discrete RVs + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = [] + default = np.full(shape(cond), fill_value=self.badvalue) + + # Use only entries that are valid in calculation + if np.any(cond): + goodargs = argsreduce(cond, *(args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + + if self._stats_has_moments: + mu, mu2, g1, g2 = self._stats(*goodargs, + **{'moments': moments}) + else: + mu, mu2, g1, g2 = self._stats(*goodargs) + + if 'm' in moments: + if mu is None: + mu = self._munp(1, *goodargs) + out0 = default.copy() + place(out0, cond, mu * scale + loc) + output.append(out0) + + if 'v' in moments: + if mu2 is None: + mu2p = self._munp(2, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + # if mean is inf then var is also inf + with np.errstate(invalid='ignore'): + mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf) + out0 = default.copy() + place(out0, cond, mu2 * scale * scale) + output.append(out0) + + if 's' in moments: + if g1 is None: + mu3p = self._munp(3, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + with np.errstate(invalid='ignore'): + mu3 = (-mu*mu - 3*mu2)*mu + mu3p + g1 = mu3 / np.power(mu2, 1.5) + out0 = default.copy() + place(out0, cond, g1) + output.append(out0) + + if 'k' in moments: + if g2 is None: + mu4p = self._munp(4, *goodargs) + if mu is None: + mu = self._munp(1, *goodargs) + if mu2 is None: + mu2p = self._munp(2, *goodargs) + with np.errstate(invalid='ignore'): + mu2 = mu2p - mu * mu + if g1 is None: + mu3 = None + else: + # (mu2**1.5) breaks down for nan and inf + mu3 = g1 * np.power(mu2, 1.5) + if mu3 is None: + mu3p = self._munp(3, *goodargs) + with np.errstate(invalid='ignore'): + mu3 = (-mu * mu - 3 * mu2) * mu + mu3p + with np.errstate(invalid='ignore'): + mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p + g2 = mu4 / mu2**2.0 - 3.0 + out0 = default.copy() + place(out0, cond, g2) + output.append(out0) + else: # no valid args + output = [default.copy() for _ in moments] + + output = [out[()] for out in output] + if len(output) == 1: + return output[0] + else: + return tuple(output) + + def entropy(self, *args, **kwds): + """Differential entropy of the RV. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + scale : array_like, optional (continuous distributions only). + Scale parameter (default=1). + + Notes + ----- + Entropy is defined base `e`: + + >>> import numpy as np + >>> from scipy.stats._distn_infrastructure import rv_discrete + >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) + >>> np.allclose(drv.entropy(), np.log(2.0)) + True + + """ + args, loc, scale = self._parse_args(*args, **kwds) + # NB: for discrete distributions scale=1 by construction in _parse_args + loc, scale = map(asarray, (loc, scale)) + args = tuple(map(asarray, args)) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + output = zeros(shape(cond0), 'd') + place(output, (1-cond0), self.badvalue) + goodargs = argsreduce(cond0, scale, *args) + goodscale = goodargs[0] + goodargs = goodargs[1:] + place(output, cond0, self.vecentropy(*goodargs) + log(goodscale)) + return output[()] + + def moment(self, order, *args, **kwds): + """non-central moment of distribution of specified order. + + Parameters + ---------- + order : int, order >= 1 + Order of moment. + arg1, arg2, arg3,... : float + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + """ + n = order + shapes, loc, scale = self._parse_args(*args, **kwds) + args = np.broadcast_arrays(*(*shapes, loc, scale)) + *shapes, loc, scale = args + + i0 = np.logical_and(self._argcheck(*shapes), scale > 0) + i1 = np.logical_and(i0, loc == 0) + i2 = np.logical_and(i0, loc != 0) + + args = argsreduce(i0, *shapes, loc, scale) + *shapes, loc, scale = args + + if (floor(n) != n): + raise ValueError("Moment must be an integer.") + if (n < 0): + raise ValueError("Moment must be positive.") + mu, mu2, g1, g2 = None, None, None, None + if (n > 0) and (n < 5): + if self._stats_has_moments: + mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]} + else: + mdict = {} + mu, mu2, g1, g2 = self._stats(*shapes, **mdict) + val = np.empty(loc.shape) # val needs to be indexed by loc + val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes) + + # Convert to transformed X = L + S*Y + # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) + result = zeros(i0.shape) + place(result, ~i0, self.badvalue) + + if i1.any(): + res1 = scale[loc == 0]**n * val[loc == 0] + place(result, i1, res1) + + if i2.any(): + mom = [mu, mu2, g1, g2] + arrs = [i for i in mom if i is not None] + idx = [i for i in range(4) if mom[i] is not None] + if any(idx): + arrs = argsreduce(loc != 0, *arrs) + j = 0 + for i in idx: + mom[i] = arrs[j] + j += 1 + mu, mu2, g1, g2 = mom + args = argsreduce(loc != 0, *shapes, loc, scale, val) + *shapes, loc, scale, val = args + + res2 = zeros(loc.shape, dtype='d') + fac = scale / loc + for k in range(n): + valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, + shapes) + res2 += comb(n, k, exact=True)*fac**k * valk + res2 += fac**n * val + res2 *= loc**n + place(result, i2, res2) + + return result[()] + + def median(self, *args, **kwds): + """Median of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter, Default is 0. + scale : array_like, optional + Scale parameter, Default is 1. + + Returns + ------- + median : float + The median of the distribution. + + See Also + -------- + rv_discrete.ppf + Inverse of the CDF + + """ + return self.ppf(0.5, *args, **kwds) + + def mean(self, *args, **kwds): + """Mean of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + mean : float + the mean of the distribution + + """ + kwds['moments'] = 'm' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def var(self, *args, **kwds): + """Variance of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + var : float + the variance of the distribution + + """ + kwds['moments'] = 'v' + res = self.stats(*args, **kwds) + if isinstance(res, ndarray) and res.ndim == 0: + return res[()] + return res + + def std(self, *args, **kwds): + """Standard deviation of the distribution. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + std : float + standard deviation of the distribution + + """ + kwds['moments'] = 'v' + res = sqrt(self.stats(*args, **kwds)) + return res + + def interval(self, confidence, *args, **kwds): + """Confidence interval with equal areas around the median. + + Parameters + ---------- + confidence : array_like of float + Probability that an rv will be drawn from the returned range. + Each value should be in the range [0, 1]. + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : ndarray of float + end-points of range that contain ``100 * alpha %`` of the rv's + possible values. + + Notes + ----- + This is implemented as ``ppf([p_tail, 1-p_tail])``, where + ``ppf`` is the inverse cumulative distribution function and + ``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a + discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore, + when ``confidence=1`` and the distribution is discrete, the left end + of the interval will be beyond the support of the distribution. + For discrete distributions, the interval will limit the probability + in each tail to be less than or equal to ``p_tail`` (usually + strictly less). + + """ + alpha = confidence + + alpha = asarray(alpha) + if np.any((alpha > 1) | (alpha < 0)): + raise ValueError("alpha must be between 0 and 1 inclusive") + q1 = (1.0-alpha)/2 + q2 = (1.0+alpha)/2 + a = self.ppf(q1, *args, **kwds) + b = self.ppf(q2, *args, **kwds) + return a, b + + def support(self, *args, **kwargs): + """Support of the distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + location parameter, Default is 0. + scale : array_like, optional + scale parameter, Default is 1. + + Returns + ------- + a, b : array_like + end-points of the distribution's support. + + """ + args, loc, scale = self._parse_args(*args, **kwargs) + arrs = np.broadcast_arrays(*args, loc, scale) + args, loc, scale = arrs[:-2], arrs[-2], arrs[-1] + cond = self._argcheck(*args) & (scale > 0) + _a, _b = self._get_support(*args) + if cond.all(): + return _a * scale + loc, _b * scale + loc + elif cond.ndim == 0: + return self.badvalue, self.badvalue + # promote bounds to at least float to fill in the badvalue + _a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d') + out_a, out_b = _a * scale + loc, _b * scale + loc + place(out_a, 1-cond, self.badvalue) + place(out_b, 1-cond, self.badvalue) + return out_a, out_b + + def nnlf(self, theta, x): + """Negative loglikelihood function. + Notes + ----- + This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the + parameters (including loc and scale). + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (asarray(x)-loc) / scale + n_log_scale = len(x) * log(scale) + if np.any(~self._support_mask(x, *args)): + return inf + return self._nnlf(x, *args) + n_log_scale + + def _nnlf(self, x, *args): + return -np.sum(self._logpxf(x, *args), axis=0) + + def _nlff_and_penalty(self, x, args, log_fitfun): + # negative log fit function + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0, axis=0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + logff = log_fitfun(x, *args) + finite_logff = np.isfinite(logff) + n_bad += np.sum(~finite_logff, axis=0) + if n_bad > 0: + penalty = n_bad * log(_XMAX) * 100 + return -np.sum(logff[finite_logff], axis=0) + penalty + return -np.sum(logff, axis=0) + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = asarray((x-loc) / scale) + n_log_scale = len(x) * log(scale) + return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale + + def _penalized_nlpsf(self, theta, x): + """Penalized negative log product spacing function. + i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty + where theta are the parameters (including loc and scale) + Follows reference [1] of scipy.stats.fit + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + x = (np.sort(x) - loc)/scale + + def log_psf(x, *args): + x, lj = np.unique(x, return_counts=True) # fast for sorted x + cdf_data = self._cdf(x, *args) if x.size else [] + if not (x.size and 1 - cdf_data[-1] <= 0): + cdf = np.concatenate(([0], cdf_data, [1])) + lj = np.concatenate((lj, [1])) + else: + cdf = np.concatenate(([0], cdf_data)) + # here we could use logcdf w/ logsumexp trick to take differences, + # but in the context of the method, it seems unlikely to matter + return lj * np.log(np.diff(cdf) / lj) + + return self._nlff_and_penalty(x, args, log_psf) + + +class _ShapeInfo: + def __init__(self, name, integrality=False, domain=(-np.inf, np.inf), + inclusive=(True, True)): + self.name = name + self.integrality = integrality + + domain = list(domain) + if np.isfinite(domain[0]) and not inclusive[0]: + domain[0] = np.nextafter(domain[0], np.inf) + if np.isfinite(domain[1]) and not inclusive[1]: + domain[1] = np.nextafter(domain[1], -np.inf) + self.domain = domain + + +def _get_fixed_fit_value(kwds, names): + """ + Given names such as `['f0', 'fa', 'fix_a']`, check that there is + at most one non-None value in `kwds` associaed with those names. + Return that value, or None if none of the names occur in `kwds`. + As a side effect, all occurrences of those names in `kwds` are + removed. + """ + vals = [(name, kwds.pop(name)) for name in names if name in kwds] + if len(vals) > 1: + repeated = [name for name, val in vals] + raise ValueError("fit method got multiple keyword arguments to " + "specify the same fixed parameter: " + + ', '.join(repeated)) + return vals[0][1] if vals else None + + +# continuous random variables: implement maybe later +# +# hf --- Hazard Function (PDF / SF) +# chf --- Cumulative hazard function (-log(SF)) +# psf --- Probability sparsity function (reciprocal of the pdf) in +# units of percent-point-function (as a function of q). +# Also, the derivative of the percent-point function. + + +class rv_continuous(rv_generic): + """A generic continuous random variable class meant for subclassing. + + `rv_continuous` is a base class to construct specific distribution classes + and instances for continuous random variables. It cannot be used + directly as a distribution. + + Parameters + ---------- + momtype : int, optional + The type of generic moment calculation to use: 0 for pdf, 1 (default) + for ppf. + a : float, optional + Lower bound of the support of the distribution, default is minus + infinity. + b : float, optional + Upper bound of the support of the distribution, default is plus + infinity. + xtol : float, optional + The tolerance for fixed point calculation for generic ppf. + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example ``"m, n"`` for a + distribution that takes two integers as the two shape arguments for all + its methods. If not provided, shape parameters will be inferred from + the signature of the private methods, ``_pdf`` and ``_cdf`` of the + instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pdf + logpdf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + fit + fit_loc_scale + nnlf + support + + Notes + ----- + Public methods of an instance of a distribution class (e.g., ``pdf``, + ``cdf``) check their arguments and pass valid arguments to private, + computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid + if it is within the support of the distribution. + Whether a shape parameter is valid is decided by an ``_argcheck`` method + (which defaults to checking that its arguments are strictly positive.) + + **Subclassing** + + New random variables can be defined by subclassing the `rv_continuous` class + and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized + to location 0 and scale 1). + + If positive argument checking is not correct for your RV + then you will also need to re-define the ``_argcheck`` method. + + For most of the scipy.stats distributions, the support interval doesn't + depend on the shape parameters. ``x`` being in the support interval is + equivalent to ``self.a <= x <= self.b``. If either of the endpoints of + the support do depend on the shape parameters, then + i) the distribution must implement the ``_get_support`` method; and + ii) those dependent endpoints must be omitted from the distribution's + call to the ``rv_continuous`` initializer. + + Correct, but potentially slow defaults exist for the remaining + methods but for speed and/or accuracy you can over-ride:: + + _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf + + The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``, + applied to a uniform random variate. In order to generate random variates + efficiently, either the default ``_ppf`` needs to be overwritten (e.g. + if the inverse cdf can expressed in an explicit form) or a sampling + method needs to be implemented in a custom ``_rvs`` method. + + If possible, you should override ``_isf``, ``_sf`` or ``_logsf``. + The main reason would be to improve numerical accuracy: for example, + the survival function ``_sf`` is computed as ``1 - _cdf`` which can + result in loss of precision if ``_cdf(x)`` is close to one. + + **Methods that can be overwritten by subclasses** + :: + + _rvs + _pdf + _cdf + _sf + _ppf + _isf + _stats + _munp + _entropy + _argcheck + _get_support + + There are additional (internal and private) generic methods that can + be useful for cross-checking and for debugging, but might work in all + cases when directly called. + + A note on ``shapes``: subclasses need not specify them explicitly. In this + case, `shapes` will be automatically deduced from the signatures of the + overridden methods (`pdf`, `cdf` etc). + If, for some reason, you prefer to avoid relying on introspection, you can + specify ``shapes`` explicitly as an argument to the instance constructor. + + + **Frozen Distributions** + + Normally, you must provide shape parameters (and, optionally, location and + scale parameters to each call of a method of a distribution. + + Alternatively, the object may be called (as a function) to fix the shape, + location, and scale parameters returning a "frozen" continuous RV object: + + rv = generic(, loc=0, scale=1) + `rv_frozen` object with the same methods but holding the given shape, + location, and scale fixed + + **Statistics** + + Statistics are computed using numerical integration by default. + For speed you can redefine this using ``_stats``: + + - take shape parameters and return mu, mu2, g1, g2 + - If you can't compute one of these, return it as None + - Can also be defined with a keyword argument ``moments``, which is a + string composed of "m", "v", "s", and/or "k". + Only the components appearing in string should be computed and + returned in the order "m", "v", "s", or "k" with missing values + returned as None. + + Alternatively, you can override ``_munp``, which takes ``n`` and shape + parameters and returns the n-th non-central moment of the distribution. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + To create a new Gaussian distribution, we would do the following: + + >>> from scipy.stats import rv_continuous + >>> class gaussian_gen(rv_continuous): + ... "Gaussian distribution" + ... def _pdf(self, x): + ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) + >>> gaussian = gaussian_gen(name='gaussian') + + ``scipy.stats`` distributions are *instances*, so here we subclass + `rv_continuous` and create an instance. With this, we now have + a fully functional distribution with all relevant methods automagically + generated by the framework. + + Note that above we defined a standard normal distribution, with zero mean + and unit variance. Shifting and scaling of the distribution can be done + by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` + essentially computes ``y = (x - loc) / scale`` and + ``gaussian._pdf(y) / scale``. + + """ + + def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, + badvalue=None, name=None, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # save the ctor parameters, cf generic freeze + self._ctor_param = dict( + momtype=momtype, a=a, b=b, xtol=xtol, + badvalue=badvalue, name=name, longname=longname, + shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + if name is None: + name = 'Distribution' + self.badvalue = badvalue + self.name = name + self.a = a + self.b = b + if a is None: + self.a = -inf + if b is None: + self.b = inf + self.xtol = xtol + self.moment_type = momtype + self.shapes = shapes + + self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], + locscale_in='loc=0, scale=1', + locscale_out='loc, scale') + self._attach_methods() + + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict, + discrete='continuous') + else: + dct = dict(distcont) + self._construct_doc(docdict, dct.get(self.name)) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in __setstate__ + # _random_state attribute is taken care of by rv_generic + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "vecentropy", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """ + Attaches dynamically created methods to the rv_continuous instance. + """ + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction + self._ppfvec = vectorize(self._ppf_single, otypes='d') + self._ppfvec.nin = self.numargs + 1 + self.vecentropy = vectorize(self._entropy, otypes='d') + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self._cdfvec.nin = self.numargs + 1 + + if self.moment_type == 0: + self.generic_moment = vectorize(self._mom0_sc, otypes='d') + else: + self.generic_moment = vectorize(self._mom1_sc, otypes='d') + # Because of the *args argument of _mom0_sc, vectorize cannot count the + # number of arguments correctly. + self.generic_moment.nin = self.numargs + 1 + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['xtol'] = self.xtol + dct['badvalue'] = self.badvalue + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _ppf_to_solve(self, x, q, *args): + return self.cdf(*(x, )+args)-q + + def _ppf_single(self, q, *args): + factor = 10. + left, right = self._get_support(*args) + + if np.isinf(left): + left = min(-factor, right) + while self._ppf_to_solve(left, q, *args) > 0.: + left, right = left * factor, left + # left is now such that cdf(left) <= q + # if right has changed, then cdf(right) > q + + if np.isinf(right): + right = max(factor, left) + while self._ppf_to_solve(right, q, *args) < 0.: + left, right = right, right * factor + # right is now such that cdf(right) >= q + + return optimize.brentq(self._ppf_to_solve, + left, right, args=(q,)+args, xtol=self.xtol) + + # moment from definition + def _mom_integ0(self, x, m, *args): + return x**m * self.pdf(x, *args) + + def _mom0_sc(self, m, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._mom_integ0, _a, _b, + args=(m,)+args)[0] + + # moment calculated using ppf + def _mom_integ1(self, q, m, *args): + return (self.ppf(q, *args))**m + + def _mom1_sc(self, m, *args): + return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] + + def _pdf(self, x, *args): + return _derivative(self._cdf, x, dx=1e-5, args=args, order=5) + + # Could also define any of these + def _logpdf(self, x, *args): + p = self._pdf(x, *args) + with np.errstate(divide='ignore'): + return log(p) + + def _logpxf(self, x, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpdf(x, *args) + + def _cdf_single(self, x, *args): + _a, _b = self._get_support(*args) + return integrate.quad(self._pdf, _a, x, args=args)[0] + + def _cdf(self, x, *args): + return self._cdfvec(x, *args) + + # generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined + # in rv_generic + + def pdf(self, x, *args, **kwds): + """Probability density function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + pdf : ndarray + Probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._pdf(*goodargs) / scale) + if output.ndim == 0: + return output[()] + return output + + def logpdf(self, x, *args, **kwds): + """Log of the probability density function at x of the given RV. + + This uses a more numerically accurate calculation if available. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logpdf : array_like + Log of the probability density function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._support_mask(x, *args) & (scale > 0) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + putmask(output, (1-cond0)+np.isnan(x), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args+(scale,))) + scale, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._logpdf(*goodargs) - log(scale)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, x, *args, **kwds): + """ + Cumulative distribution function of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `x` + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= np.asarray(_b)) & cond0 + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._cdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, x, *args, **kwds): + """Log of the cumulative distribution function at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = (x >= _b) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, x, *args, **kwds): + """Survival function (1 - `cdf`) at x of the given RV. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + sf : array_like + Survival function evaluated at x + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = zeros(shape(cond), dtyp) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._sf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, x, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as (1 - `cdf`), + evaluated at `x`. + + Parameters + ---------- + x : array_like + quantiles + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `x`. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + x, loc, scale = map(asarray, (x, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + dtyp = np.promote_types(x.dtype, np.float64) + x = np.asarray((x - loc)/scale, dtype=dtyp) + cond0 = self._argcheck(*args) & (scale > 0) + cond1 = self._open_support_mask(x, *args) & (scale > 0) + cond2 = cond0 & (x <= _a) + cond = cond0 & cond1 + output = empty(shape(cond), dtyp) + output.fill(-inf) + place(output, (1-cond0)+np.isnan(x), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((x,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + lower tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 0) + cond3 = cond0 & (q == 1) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): # call only if at least 1 entry + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._ppf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + upper tail probability + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + location parameter (default=0) + scale : array_like, optional + scale parameter (default=1) + + Returns + ------- + x : ndarray or scalar + Quantile corresponding to the upper tail probability q. + + """ + args, loc, scale = self._parse_args(*args, **kwds) + q, loc, scale = map(asarray, (q, loc, scale)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) + cond1 = (0 < q) & (q < 1) + cond2 = cond0 & (q == 1) + cond3 = cond0 & (q == 0) + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue) + + lower_bound = _a * scale + loc + upper_bound = _b * scale + loc + place(output, cond2, argsreduce(cond2, lower_bound)[0]) + place(output, cond3, argsreduce(cond3, upper_bound)[0]) + + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) + scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] + place(output, cond, self._isf(*goodargs) * scale + loc) + if output.ndim == 0: + return output[()] + return output + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-2] + scale = theta[-1] + args = tuple(theta[:-2]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _nnlf_and_penalty(self, x, args): + """ + Compute the penalized negative log-likelihood for the + "standardized" data (i.e. already shifted by loc and + scaled by scale) for the shape parameters in `args`. + + `x` can be a 1D numpy array or a CensoredData instance. + """ + if isinstance(x, CensoredData): + # Filter out the data that is not in the support. + xs = x._supported(*self._get_support(*args)) + n_bad = len(x) - len(xs) + i1, i2 = xs._interval.T + terms = [ + # logpdf of the noncensored data. + self._logpdf(xs._uncensored, *args), + # logcdf of the left-censored data. + self._logcdf(xs._left, *args), + # logsf of the right-censored data. + self._logsf(xs._right, *args), + # log of probability of the interval-censored data. + np.log(self._delta_cdf(i1, i2, *args)), + ] + else: + cond0 = ~self._support_mask(x, *args) + n_bad = np.count_nonzero(cond0) + if n_bad > 0: + x = argsreduce(~cond0, x)[0] + terms = [self._logpdf(x, *args)] + + totals, bad_counts = zip(*[_sum_finite(term) for term in terms]) + total = sum(totals) + n_bad += sum(bad_counts) + + return -total + n_bad * _LOGXMAX * 100 + + def _penalized_nnlf(self, theta, x): + """Penalized negative loglikelihood function. + + i.e., - sum (log pdf(x, theta), axis=0) + penalty + where theta are the parameters (including loc and scale) + """ + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + if isinstance(x, CensoredData): + x = (x - loc) / scale + n_log_scale = (len(x) - x.num_censored()) * log(scale) + else: + x = (x - loc) / scale + n_log_scale = len(x) * log(scale) + + return self._nnlf_and_penalty(x, args) + n_log_scale + + def _fitstart(self, data, args=None): + """Starting point for fit (shape arguments + loc + scale).""" + if args is None: + args = (1.0,)*self.numargs + loc, scale = self._fit_loc_scale_support(data, *args) + return args + (loc, scale) + + def _reduce_func(self, args, kwds, data=None): + """ + Return the (possibly reduced) function to optimize in order to find MLE + estimates for the .fit method. + """ + # Convert fixed shape parameters to the standard numeric form: e.g. for + # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value + # for `f0`, `fa` or 'fix_a'. The following converts the latter two + # into the first (numeric) form. + shapes = [] + if self.shapes: + shapes = self.shapes.replace(',', ' ').split() + for j, s in enumerate(shapes): + key = 'f' + str(j) + names = [key, 'f' + s, 'fix_' + s] + val = _get_fixed_fit_value(kwds, names) + if val is not None: + kwds[key] = val + + args = list(args) + Nargs = len(args) + fixedn = [] + names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] + x0 = [] + for n, key in enumerate(names): + if key in kwds: + fixedn.append(n) + args[n] = kwds.pop(key) + else: + x0.append(args[n]) + + methods = {"mle", "mm"} + method = kwds.pop('method', "mle").lower() + if method == "mm": + n_params = len(shapes) + 2 - len(fixedn) + exponents = (np.arange(1, n_params+1))[:, np.newaxis] + data_moments = np.sum(data[None, :]**exponents/len(data), axis=1) + + def objective(theta, x): + return self._moment_error(theta, x, data_moments) + + elif method == "mle": + objective = self._penalized_nnlf + else: + raise ValueError(f"Method '{method}' not available; " + f"must be one of {methods}") + + if len(fixedn) == 0: + func = objective + restore = None + else: + if len(fixedn) == Nargs: + raise ValueError( + "All parameters fixed. There is nothing to optimize.") + + def restore(args, theta): + # Replace with theta for all numbers not in fixedn + # This allows the non-fixed values to vary, but + # we still call self.nnlf with all parameters. + i = 0 + for n in range(Nargs): + if n not in fixedn: + args[n] = theta[i] + i += 1 + return args + + def func(theta, x): + newtheta = restore(args[:], theta) + return objective(newtheta, x) + + return x0, func, restore, args + + def _moment_error(self, theta, x, data_moments): + loc, scale, args = self._unpack_loc_scale(theta) + if not self._argcheck(*args) or scale <= 0: + return inf + + dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale) + for i in range(len(data_moments))]) + if np.any(np.isnan(dist_moments)): + raise ValueError("Method of moments encountered a non-finite " + "distribution moment and cannot continue. " + "Consider trying method='MLE'.") + + return (((data_moments - dist_moments) / + np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + def fit(self, data, *args, **kwds): + r""" + Return estimates of shape (if applicable), location, and scale + parameters from data. The default estimation method is Maximum + Likelihood Estimation (MLE), but Method of Moments (MM) + is also available. + + Starting estimates for the fit are given by input arguments; + for any arguments not provided with starting estimates, + ``self._fitstart(data)`` is called to generate such. + + One can hold some parameters fixed to specific values by passing in + keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) + and ``floc`` and ``fscale`` (for location and scale parameters, + respectively). + + Parameters + ---------- + data : array_like or `CensoredData` instance + Data to use in estimating the distribution parameters. + arg1, arg2, arg3,... : floats, optional + Starting value(s) for any shape-characterizing arguments (those not + provided will be determined by a call to ``_fitstart(data)``). + No default value. + **kwds : floats, optional + - `loc`: initial guess of the distribution's location parameter. + - `scale`: initial guess of the distribution's scale parameter. + + Special keyword arguments are recognized as holding certain + parameters fixed: + + - f0...fn : hold respective shape parameters fixed. + Alternatively, shape parameters to fix can be specified by name. + For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a`` + are equivalent to ``f0``, and ``fb`` and ``fix_b`` are + equivalent to ``f1``. + + - floc : hold location parameter fixed to specified value. + + - fscale : hold scale parameter fixed to specified value. + + - optimizer : The optimizer to use. The optimizer must take + ``func`` and starting position as the first two arguments, + plus ``args`` (for extra arguments to pass to the + function to be optimized) and ``disp``. + The ``fit`` method calls the optimizer with ``disp=0`` to suppress output. + The optimizer must return the estimated parameters. + + - method : The method to use. The default is "MLE" (Maximum + Likelihood Estimate); "MM" (Method of Moments) + is also available. + + Raises + ------ + TypeError, ValueError + If an input is invalid + `~scipy.stats.FitError` + If fitting fails or the fit produced would be invalid + + Returns + ------- + parameter_tuple : tuple of floats + Estimates for any shape parameters (if applicable), followed by + those for location and scale. For most random variables, shape + statistics will be returned, but there are exceptions (e.g. + ``norm``). + + Notes + ----- + With ``method="MLE"`` (default), the fit is computed by minimizing + the negative log-likelihood function. A large, finite penalty + (rather than infinite negative log-likelihood) is applied for + observations beyond the support of the distribution. + + With ``method="MM"``, the fit is computed by minimizing the L2 norm + of the relative errors between the first *k* raw (about zero) data + moments and the corresponding distribution moments, where *k* is the + number of non-fixed parameters. + More precisely, the objective function is:: + + (((data_moments - dist_moments) + / np.maximum(np.abs(data_moments), 1e-8))**2).sum() + + where the constant ``1e-8`` avoids division by zero in case of + vanishing data moments. Typically, this error norm can be reduced to + zero. + Note that the standard method of moments can produce parameters for + which some data are outside the support of the fitted distribution; + this implementation does nothing to prevent this. + + For either method, + the returned answer is not guaranteed to be globally optimal; it + may only be locally optimal, or the optimization may fail altogether. + If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``, + the `fit` method will raise a ``RuntimeError``. + + When passing a ``CensoredData`` instance to ``data``, the log-likelihood + function is defined as: + + .. math:: + + l(\pmb{\theta}; k) & = \sum + \log(f(k_u; \pmb{\theta})) + + \sum + \log(F(k_l; \pmb{\theta})) \\ + & + \sum + \log(1 - F(k_r; \pmb{\theta})) \\ + & + \sum + \log(F(k_{\text{high}, i}; \pmb{\theta}) + - F(k_{\text{low}, i}; \pmb{\theta})) + + where :math:`f` and :math:`F` are the pdf and cdf, respectively, of the + function being fitted, :math:`\pmb{\theta}` is the parameter vector, + :math:`u` are the indices of uncensored observations, + :math:`l` are the indices of left-censored observations, + :math:`r` are the indices of right-censored observations, + subscripts "low"/"high" denote endpoints of interval-censored observations, and + :math:`i` are the indices of interval-censored observations. + + Examples + -------- + + Generate some data to fit: draw random variates from the `beta` + distribution + + >>> import numpy as np + >>> from scipy.stats import beta + >>> a, b = 1., 2. + >>> rng = np.random.default_rng(172786373191770012695001057628748821561) + >>> x = beta.rvs(a, b, size=1000, random_state=rng) + + Now we can fit all four parameters (``a``, ``b``, ``loc`` and + ``scale``): + + >>> a1, b1, loc1, scale1 = beta.fit(x) + >>> a1, b1, loc1, scale1 + (1.0198945204435628, 1.9484708982737828, 4.372241314917588e-05, 0.9979078845964814) + + The fit can be done also using a custom optimizer: + + >>> from scipy.optimize import minimize + >>> def custom_optimizer(func, x0, args=(), disp=0): + ... res = minimize(func, x0, args, method="slsqp", options={"disp": disp}) + ... if res.success: + ... return res.x + ... raise RuntimeError('optimization routine failed') + >>> a1, b1, loc1, scale1 = beta.fit(x, method="MLE", optimizer=custom_optimizer) + >>> a1, b1, loc1, scale1 + (1.0198821087258905, 1.948484145914738, 4.3705304486881485e-05, 0.9979104663953395) + + We can also use some prior knowledge about the dataset: let's keep + ``loc`` and ``scale`` fixed: + + >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) + >>> loc1, scale1 + (0, 1) + + We can also keep shape parameters fixed by using ``f``-keywords. To + keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, + equivalently, ``fa=1``: + + >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) + >>> a1 + 1 + + Not all distributions return estimates for the shape parameters. + ``norm`` for example just returns estimates for location and scale: + + >>> from scipy.stats import norm + >>> x = norm.rvs(a, b, size=1000, random_state=123) + >>> loc1, scale1 = norm.fit(x) + >>> loc1, scale1 + (0.92087172783841631, 2.0015750750324668) + """ # noqa: E501 + method = kwds.get('method', "mle").lower() + + censored = isinstance(data, CensoredData) + if censored: + if method != 'mle': + raise ValueError('For censored data, the method must' + ' be "MLE".') + if data.num_censored() == 0: + # There are no censored values in data, so replace the + # CensoredData instance with a regular array. + data = data._uncensored + censored = False + + Narg = len(args) + if Narg > self.numargs: + raise TypeError("Too many input arguments.") + + # Check the finiteness of data only if data is not an instance of + # CensoredData. The arrays in a CensoredData instance have already + # been validated. + if not censored: + # Note: `ravel()` is called for backwards compatibility. + data = np.asarray(data).ravel() + if not np.isfinite(data).all(): + raise ValueError("The data contains non-finite values.") + + start = [None]*2 + if (Narg < self.numargs) or not ('loc' in kwds and + 'scale' in kwds): + # get distribution specific starting locations + start = self._fitstart(data) + args += start[Narg:-2] + loc = kwds.pop('loc', start[-2]) + scale = kwds.pop('scale', start[-1]) + args += (loc, scale) + x0, func, restore, args = self._reduce_func(args, kwds, data=data) + optimizer = kwds.pop('optimizer', optimize.fmin) + # convert string to function in scipy.optimize + optimizer = _fit_determine_optimizer(optimizer) + # by now kwds must be empty, since everybody took what they needed + if kwds: + raise TypeError("Unknown arguments: %s." % kwds) + + # In some cases, method of moments can be done with fsolve/root + # instead of an optimizer, but sometimes no solution exists, + # especially when the user fixes parameters. Minimizing the sum + # of squares of the error generalizes to these cases. + vals = optimizer(func, x0, args=(data,), disp=0) + obj = func(vals, data) + + if restore is not None: + vals = restore(args, vals) + vals = tuple(vals) + + loc, scale, shapes = self._unpack_loc_scale(vals) + if not (np.all(self._argcheck(*shapes)) and scale > 0): + raise FitError("Optimization converged to parameters that are " + "outside the range allowed by the distribution.") + + if method == 'mm': + if not np.isfinite(obj): + raise FitError("Optimization failed: either a data moment " + "or fitted distribution moment is " + "non-finite.") + + return vals + + def _fit_loc_scale_support(self, data, *args): + """Estimate loc and scale parameters from data accounting for support. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + if isinstance(data, CensoredData): + # For this estimate, "uncensor" the data by taking the + # given endpoints as the data for the left- or right-censored + # data, and the mean for the interval-censored data. + data = data._uncensor() + else: + data = np.asarray(data) + + # Estimate location and scale according to the method of moments. + loc_hat, scale_hat = self.fit_loc_scale(data, *args) + + # Compute the support according to the shape parameters. + self._argcheck(*args) + _a, _b = self._get_support(*args) + a, b = _a, _b + support_width = b - a + + # If the support is empty then return the moment-based estimates. + if support_width <= 0: + return loc_hat, scale_hat + + # Compute the proposed support according to the loc and scale + # estimates. + a_hat = loc_hat + a * scale_hat + b_hat = loc_hat + b * scale_hat + + # Use the moment-based estimates if they are compatible with the data. + data_a = np.min(data) + data_b = np.max(data) + if a_hat < data_a and data_b < b_hat: + return loc_hat, scale_hat + + # Otherwise find other estimates that are compatible with the data. + data_width = data_b - data_a + rel_margin = 0.1 + margin = data_width * rel_margin + + # For a finite interval, both the location and scale + # should have interesting values. + if support_width < np.inf: + loc_hat = (data_a - a) - margin + scale_hat = (data_width + 2 * margin) / support_width + return loc_hat, scale_hat + + # For a one-sided interval, use only an interesting location parameter. + if a > -np.inf: + return (data_a - a) - margin, 1 + elif b < np.inf: + return (data_b - b) + margin, 1 + else: + raise RuntimeError + + def fit_loc_scale(self, data, *args): + """ + Estimate loc and scale parameters from data using 1st and 2nd moments. + + Parameters + ---------- + data : array_like + Data to fit. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + Lhat : float + Estimated location parameter for the data. + Shat : float + Estimated scale parameter for the data. + + """ + mu, mu2 = self.stats(*args, **{'moments': 'mv'}) + tmp = asarray(data) + muhat = tmp.mean() + mu2hat = tmp.var() + Shat = sqrt(mu2hat / mu2) + with np.errstate(invalid='ignore'): + Lhat = muhat - Shat*mu + if not np.isfinite(Lhat): + Lhat = 0 + if not (np.isfinite(Shat) and (0 < Shat)): + Shat = 1 + return Lhat, Shat + + def _entropy(self, *args): + def integ(x): + val = self._pdf(x, *args) + return entr(val) + + # upper limit is often inf, so suppress warnings when integrating + _a, _b = self._get_support(*args) + with np.errstate(over='ignore'): + h = integrate.quad(integ, _a, _b)[0] + + if not np.isnan(h): + return h + else: + # try with different limits if integration problems + low, upp = self.ppf([1e-10, 1. - 1e-10], *args) + if np.isinf(_b): + upper = upp + else: + upper = _b + if np.isinf(_a): + lower = low + else: + lower = _a + return integrate.quad(integ, lower, upper)[0] + + def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, + conditional=False, **kwds): + """Calculate expected value of a function with respect to the + distribution by numerical integration. + + The expected value of a function ``f(x)`` with respect to a + distribution ``dist`` is defined as:: + + ub + E[f(x)] = Integral(f(x) * dist.pdf(x)), + lb + + where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)`` + distribution. If the bounds ``lb`` and ``ub`` correspond to the + support of the distribution, e.g. ``[-inf, inf]`` in the default + case, then the integral is the unrestricted expectation of ``f(x)``. + Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0`` + outside a finite interval in which case the expectation is + calculated within the finite range ``[lb, ub]``. + + Parameters + ---------- + func : callable, optional + Function for which integral is calculated. Takes only one argument. + The default is the identity mapping f(x) = x. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter (default=0). + scale : float, optional + Scale parameter (default=1). + lb, ub : scalar, optional + Lower and upper bound for integration. Default is set to the + support of the distribution. + conditional : bool, optional + If True, the integral is corrected by the conditional probability + of the integration interval. The return value is the expectation + of the function, conditional on being in the given interval. + Default is False. + + Additional keyword arguments are passed to the integration routine. + + Returns + ------- + expect : float + The calculated expected value. + + Notes + ----- + The integration behavior of this function is inherited from + `scipy.integrate.quad`. Neither this function nor + `scipy.integrate.quad` can verify whether the integral exists or is + finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and + ``cauchy(0).expect()`` returns ``0.0``. + + Likewise, the accuracy of results is not verified by the function. + `scipy.integrate.quad` is typically reliable for integrals that are + numerically favorable, but it is not guaranteed to converge + to a correct value for all possible intervals and integrands. This + function is provided for convenience; for critical applications, + check results against other integration methods. + + The function is not vectorized. + + Examples + -------- + + To understand the effect of the bounds of integration consider + + >>> from scipy.stats import expon + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0) + 0.6321205588285578 + + This is close to + + >>> expon(1).cdf(2.0) - expon(1).cdf(0.0) + 0.6321205588285577 + + If ``conditional=True`` + + >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True) + 1.0000000000000002 + + The slight deviation from 1 is due to numerical integration. + + The integrand can be treated as a complex-valued function + by passing ``complex_func=True`` to `scipy.integrate.quad` . + + >>> import numpy as np + >>> from scipy.stats import vonmises + >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x), + ... complex_func=True) + >>> res + (-0.18576377217422957+0.40590124735052263j) + + >>> np.angle(res) # location of the (circular) distribution + 2.0 + + """ + lockwds = {'loc': loc, + 'scale': scale} + self._argcheck(*args) + _a, _b = self._get_support(*args) + if func is None: + def fun(x, *args): + return x * self.pdf(x, *args, **lockwds) + else: + def fun(x, *args): + return func(x) * self.pdf(x, *args, **lockwds) + if lb is None: + lb = loc + _a * scale + if ub is None: + ub = loc + _b * scale + + cdf_bounds = self.cdf([lb, ub], *args, **lockwds) + invfac = cdf_bounds[1] - cdf_bounds[0] + + kwds['args'] = args + + # split interval to help integrator w/ infinite support; see gh-8928 + alpha = 0.05 # split body from tails at probability mass `alpha` + inner_bounds = np.array([alpha, 1-alpha]) + cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds + c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale + + # Do not silence warnings from integration. + lbc = integrate.quad(fun, lb, c, **kwds)[0] + cd = integrate.quad(fun, c, d, **kwds)[0] + dub = integrate.quad(fun, d, ub, **kwds)[0] + vals = (lbc + cd + dub) + + if conditional: + vals /= invfac + return np.array(vals)[()] # make it a numpy scalar like other methods + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False)) + scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False)) + param_info = shape_info + [loc_info, scale_info] + return param_info + + # For now, _delta_cdf is a private method. + def _delta_cdf(self, x1, x2, *args, loc=0, scale=1): + """ + Compute CDF(x2) - CDF(x1). + + Where x1 is greater than the median, compute SF(x1) - SF(x2), + otherwise compute CDF(x2) - CDF(x1). + + This function is only useful if `dist.sf(x, ...)` has an implementation + that is numerically more accurate than `1 - dist.cdf(x, ...)`. + """ + cdf1 = self.cdf(x1, *args, loc=loc, scale=scale) + # Possible optimizations (needs investigation-these might not be + # better): + # * Use _lazywhere instead of np.where + # * Instead of cdf1 > 0.5, compare x1 to the median. + result = np.where(cdf1 > 0.5, + (self.sf(x1, *args, loc=loc, scale=scale) + - self.sf(x2, *args, loc=loc, scale=scale)), + self.cdf(x2, *args, loc=loc, scale=scale) - cdf1) + if result.ndim == 0: + result = result[()] + return result + + +# Helpers for the discrete distributions +def _drv2_moment(self, n, *args): + """Non-central moment of discrete distribution.""" + def fun(x): + return np.power(x, n) * self._pmf(x, *args) + + _a, _b = self._get_support(*args) + return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc) + + +def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm + _a, _b = self._get_support(*args) + b = _b + a = _a + if isinf(b): # Be sure ending point is > q + b = int(max(100*q, 10)) + while 1: + if b >= _b: + qb = 1.0 + break + qb = self._cdf(b, *args) + if (qb < q): + b += 10 + else: + break + else: + qb = 1.0 + if isinf(a): # be sure starting point < q + a = int(min(-100*q, -10)) + while 1: + if a <= _a: + qb = 0.0 + break + qa = self._cdf(a, *args) + if (qa > q): + a -= 10 + else: + break + else: + qa = self._cdf(a, *args) + + while 1: + if (qa == q): + return a + if (qb == q): + return b + if b <= a+1: + if qa > q: + return a + else: + return b + c = int((a+b)/2.0) + qc = self._cdf(c, *args) + if (qc < q): + if a != c: + a = c + else: + raise RuntimeError('updating stopped, endless loop') + qa = qc + elif (qc > q): + if b != c: + b = c + else: + raise RuntimeError('updating stopped, endless loop') + qb = qc + else: + return c + + +# Must over-ride one of _pmf or _cdf or pass in +# x_k, p(x_k) lists in initialization + + +class rv_discrete(rv_generic): + """A generic discrete random variable class meant for subclassing. + + `rv_discrete` is a base class to construct specific distribution classes + and instances for discrete random variables. It can also be used + to construct an arbitrary distribution defined by a list of support + points and corresponding probabilities. + + Parameters + ---------- + a : float, optional + Lower bound of the support of the distribution, default: 0 + b : float, optional + Upper bound of the support of the distribution, default: plus infinity + moment_tol : float, optional + The tolerance for the generic calculation of moments. + values : tuple of two array_like, optional + ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero + probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk`` + and ``pk`` must have the same shape, and ``xk`` must be unique. + inc : integer, optional + Increment for the support of the distribution. + Default is 1. (other values have not been tested) + badvalue : float, optional + The value in a result arrays that indicates a value that for which + some argument restriction is violated, default is np.nan. + name : str, optional + The name of the instance. This string is used to construct the default + example for distributions. + longname : str, optional + This string is used as part of the first line of the docstring returned + when a subclass has no docstring of its own. Note: `longname` exists + for backwards compatibility, do not use for new subclasses. + shapes : str, optional + The shape of the distribution. For example "m, n" for a distribution + that takes two integers as the two shape arguments for all its methods + If not provided, shape parameters will be inferred from + the signatures of the private methods, ``_pmf`` and ``_cdf`` of + the instance. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + pmf + logpmf + cdf + logcdf + sf + logsf + ppf + isf + moment + stats + entropy + expect + median + mean + std + var + interval + __call__ + support + + Notes + ----- + This class is similar to `rv_continuous`. Whether a shape parameter is + valid is decided by an ``_argcheck`` method (which defaults to checking + that its arguments are strictly positive.) + The main differences are as follows. + + - The support of the distribution is a set of integers. + - Instead of the probability density function, ``pdf`` (and the + corresponding private ``_pdf``), this class defines the + *probability mass function*, `pmf` (and the corresponding + private ``_pmf``.) + - There is no ``scale`` parameter. + - The default implementations of methods (e.g. ``_cdf``) are not designed + for distributions with support that is unbounded below (i.e. + ``a=-np.inf``), so they must be overridden. + + To create a new discrete distribution, we would do the following: + + >>> from scipy.stats import rv_discrete + >>> class poisson_gen(rv_discrete): + ... "Poisson distribution" + ... def _pmf(self, k, mu): + ... return exp(-mu) * mu**k / factorial(k) + + and create an instance:: + + >>> poisson = poisson_gen(name="poisson") + + Note that above we defined the Poisson distribution in the standard form. + Shifting the distribution can be done by providing the ``loc`` parameter + to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` + delegates the work to ``poisson._pmf(x-loc, mu)``. + + **Discrete distributions from a list of probabilities** + + Alternatively, you can construct an arbitrary discrete rv defined + on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the + ``values`` keyword argument to the `rv_discrete` constructor. + + **Deepcopying / Pickling** + + If a distribution or frozen distribution is deepcopied (pickled/unpickled, + etc.), any underlying random number generator is deepcopied with it. An + implication is that if a distribution relies on the singleton RandomState + before copying, it will rely on a copy of that random state after copying, + and ``np.random.seed`` will no longer control the state. + + Examples + -------- + Custom made discrete distribution: + + >>> import numpy as np + >>> from scipy import stats + >>> xk = np.arange(7) + >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) + >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) + >>> + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') + >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) + >>> plt.show() + + Random number generation: + + >>> R = custm.rvs(size=100) + + """ + def __new__(cls, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + if values is not None: + # dispatch to a subclass + return super().__new__(rv_sample) + else: + # business as usual + return super().__new__(cls) + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super().__init__(seed) + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.a = a + self.b = b + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + + if values is not None: + raise ValueError("rv_discrete.__init__(..., values != None, ...)") + + self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + self._attach_methods() + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + # these methods will be remade in __setstate__ + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs", + "_cdfvec", "_ppfvec", "generic_moment"] + [dct.pop(attr, None) for attr in attrs] + return dct + + def _attach_methods(self): + """Attaches dynamically created methods to the rv_discrete instance.""" + self._cdfvec = vectorize(self._cdf_single, otypes='d') + self.vecentropy = vectorize(self._entropy) + + # _attach_methods is responsible for calling _attach_argparser_methods + self._attach_argparser_methods() + + # nin correction needs to be after we know numargs + # correct nin for generic moment vectorization + _vec_generic_moment = vectorize(_drv2_moment, otypes='d') + _vec_generic_moment.nin = self.numargs + 2 + self.generic_moment = types.MethodType(_vec_generic_moment, self) + + # correct nin for ppf vectorization + _vppf = vectorize(_drv2_ppfsingle, otypes='d') + _vppf.nin = self.numargs + 2 + self._ppfvec = types.MethodType(_vppf, self) + + # now that self.numargs is defined, we can adjust nin + self._cdfvec.nin = self.numargs + 1 + + def _construct_docstrings(self, name, longname): + if name is None: + name = 'Distribution' + self.name = name + + # generate docstring for subclass instances + if longname is None: + if name[0] in ['aeiouAEIOU']: + hstr = "An " + else: + hstr = "A " + longname = hstr + name + + if sys.flags.optimize < 2: + # Skip adding docstrings if interpreter is run with -OO + if self.__doc__ is None: + self._construct_default_doc(longname=longname, + docdict=docdict_discrete, + discrete='discrete') + else: + dct = dict(distdiscrete) + self._construct_doc(docdict_discrete, dct.get(self.name)) + + # discrete RV do not have the scale parameter, remove it + self.__doc__ = self.__doc__.replace( + '\n scale : array_like, ' + 'optional\n scale parameter (default=1)', '') + + def _updated_ctor_param(self): + """Return the current version of _ctor_param, possibly updated by user. + + Used by freezing. + Keep this in sync with the signature of __init__. + """ + dct = self._ctor_param.copy() + dct['a'] = self.a + dct['b'] = self.b + dct['badvalue'] = self.badvalue + dct['moment_tol'] = self.moment_tol + dct['inc'] = self.inc + dct['name'] = self.name + dct['shapes'] = self.shapes + return dct + + def _nonzero(self, k, *args): + return floor(k) == k + + def _pmf(self, k, *args): + return self._cdf(k, *args) - self._cdf(k-1, *args) + + def _logpmf(self, k, *args): + return log(self._pmf(k, *args)) + + def _logpxf(self, k, *args): + # continuous distributions have PDF, discrete have PMF, but sometimes + # the distinction doesn't matter. This lets us use `_logpxf` for both + # discrete and continuous distributions. + return self._logpmf(k, *args) + + def _unpack_loc_scale(self, theta): + try: + loc = theta[-1] + scale = 1 + args = tuple(theta[:-1]) + except IndexError as e: + raise ValueError("Not enough input arguments.") from e + return loc, scale, args + + def _cdf_single(self, k, *args): + _a, _b = self._get_support(*args) + m = arange(int(_a), k+1) + return np.sum(self._pmf(m, *args), axis=0) + + def _cdf(self, x, *args): + k = floor(x) + return self._cdfvec(k, *args) + + # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic + + def rvs(self, *args, **kwargs): + """Random variates of given type. + + Parameters + ---------- + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + size : int or tuple of ints, optional + Defining number of random variates (Default is 1). Note that `size` + has to be given as keyword, not as positional argument. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `random_state` is None (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is + used, seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, that instance is used. + + Returns + ------- + rvs : ndarray or scalar + Random variates of given `size`. + + """ + kwargs['discrete'] = True + return super().rvs(*args, **kwargs) + + def pmf(self, k, *args, **kwds): + """Probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information) + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + pmf : array_like + Probability mass function evaluated at k + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logpmf(self, k, *args, **kwds): + """Log of the probability mass function at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter. Default is 0. + + Returns + ------- + logpmf : array_like + Log of the probability mass function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k <= _b) + if not isinstance(self, rv_sample): + cond1 = cond1 & self._nonzero(k, *args) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logpmf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def cdf(self, k, *args, **kwds): + """Cumulative distribution function of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + cdf : ndarray + Cumulative distribution function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond3 = np.isneginf(k) + cond = cond0 & cond1 & np.isfinite(k) + + output = zeros(shape(cond), 'd') + place(output, cond2*(cond0 == cond0), 1.0) + place(output, cond3*(cond0 == cond0), 0.0) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logcdf(self, k, *args, **kwds): + """Log of the cumulative distribution function at k of the given RV. + + Parameters + ---------- + k : array_like, int + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logcdf : array_like + Log of the cumulative distribution function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k >= _b) + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2*(cond0 == cond0), 0.0) + + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logcdf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def sf(self, k, *args, **kwds): + """Survival function (1 - `cdf`) at k of the given RV. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + sf : array_like + Survival function evaluated at k. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = ((k < _a) | np.isneginf(k)) & cond0 + cond = cond0 & cond1 & np.isfinite(k) + output = zeros(shape(cond), 'd') + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 1.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) + if output.ndim == 0: + return output[()] + return output + + def logsf(self, k, *args, **kwds): + """Log of the survival function of the given RV. + + Returns the log of the "survival function," defined as 1 - `cdf`, + evaluated at `k`. + + Parameters + ---------- + k : array_like + Quantiles. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + logsf : ndarray + Log of the survival function evaluated at `k`. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + k, loc = map(asarray, (k, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + k = asarray(k-loc) + cond0 = self._argcheck(*args) + cond1 = (k >= _a) & (k < _b) + cond2 = (k < _a) & cond0 + cond = cond0 & cond1 + output = empty(shape(cond), 'd') + output.fill(-inf) + place(output, (1-cond0) + np.isnan(k), self.badvalue) + place(output, cond2, 0.0) + if np.any(cond): + goodargs = argsreduce(cond, *((k,)+args)) + place(output, cond, self._logsf(*goodargs)) + if output.ndim == 0: + return output[()] + return output + + def ppf(self, q, *args, **kwds): + """Percent point function (inverse of `cdf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Lower tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : array_like + Quantile corresponding to the lower tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond = cond0 & cond1 + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + place(output, (q == 0)*(cond == cond), _a-1 + loc) + place(output, cond2, _b + loc) + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + place(output, cond, self._ppf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def isf(self, q, *args, **kwds): + """Inverse survival function (inverse of `sf`) at q of the given RV. + + Parameters + ---------- + q : array_like + Upper tail probability. + arg1, arg2, arg3,... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + loc : array_like, optional + Location parameter (default=0). + + Returns + ------- + k : ndarray or scalar + Quantile corresponding to the upper tail probability, q. + + """ + args, loc, _ = self._parse_args(*args, **kwds) + q, loc = map(asarray, (q, loc)) + args = tuple(map(asarray, args)) + _a, _b = self._get_support(*args) + cond0 = self._argcheck(*args) & (loc == loc) + cond1 = (q > 0) & (q < 1) + cond2 = (q == 1) & cond0 + cond3 = (q == 0) & cond0 + cond = cond0 & cond1 + + # same problem as with ppf; copied from ppf and changed + output = np.full(shape(cond), fill_value=self.badvalue, dtype='d') + # output type 'd' to handle nin and inf + lower_bound = _a - 1 + loc + upper_bound = _b + loc + place(output, cond2*(cond == cond), lower_bound) + place(output, cond3*(cond == cond), upper_bound) + + # call place only if at least 1 valid argument + if np.any(cond): + goodargs = argsreduce(cond, *((q,)+args+(loc,))) + loc, goodargs = goodargs[-1], goodargs[:-1] + # PB same as ticket 766 + place(output, cond, self._isf(*goodargs) + loc) + + if output.ndim == 0: + return output[()] + return output + + def _entropy(self, *args): + if hasattr(self, 'pk'): + return stats.entropy(self.pk) + else: + _a, _b = self._get_support(*args) + return _expect(lambda x: entr(self.pmf(x, *args)), + _a, _b, self.ppf(0.5, *args), self.inc) + + def expect(self, func=None, args=(), loc=0, lb=None, ub=None, + conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): + """ + Calculate expected value of a function with respect to the distribution + for discrete distribution by numerical summation. + + Parameters + ---------- + func : callable, optional + Function for which the expectation value is calculated. + Takes only one argument. + The default is the identity mapping f(k) = k. + args : tuple, optional + Shape parameters of the distribution. + loc : float, optional + Location parameter. + Default is 0. + lb, ub : int, optional + Lower and upper bound for the summation, default is set to the + support of the distribution, inclusive (``lb <= k <= ub``). + conditional : bool, optional + If true then the expectation is corrected by the conditional + probability of the summation interval. The return value is the + expectation of the function, `func`, conditional on being in + the given interval (k such that ``lb <= k <= ub``). + Default is False. + maxcount : int, optional + Maximal number of terms to evaluate (to avoid an endless loop for + an infinite sum). Default is 1000. + tolerance : float, optional + Absolute tolerance for the summation. Default is 1e-10. + chunksize : int, optional + Iterate over the support of a distributions in chunks of this size. + Default is 32. + + Returns + ------- + expect : float + Expected value. + + Notes + ----- + For heavy-tailed distributions, the expected value may or + may not exist, + depending on the function, `func`. If it does exist, but the + sum converges + slowly, the accuracy of the result may be rather low. For instance, for + ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. + increasing `maxcount` and/or `chunksize` may improve the result, + but may also make zipf very slow. + + The function is not vectorized. + + """ + if func is None: + def fun(x): + # loc and args from outer scope + return (x+loc)*self._pmf(x, *args) + else: + def fun(x): + # loc and args from outer scope + return func(x+loc)*self._pmf(x, *args) + # used pmf because _pmf does not check support in randint and there + # might be problems(?) with correct self.a, self.b at this stage maybe + # not anymore, seems to work now with _pmf + + _a, _b = self._get_support(*args) + if lb is None: + lb = _a + else: + lb = lb - loc # convert bound for standardized distribution + if ub is None: + ub = _b + else: + ub = ub - loc # convert bound for standardized distribution + if conditional: + invfac = self.sf(lb-1, *args) - self.sf(ub, *args) + else: + invfac = 1.0 + + if isinstance(self, rv_sample): + res = self._expect(fun, lb, ub) + return res / invfac + + # iterate over the support, starting from the median + x0 = self.ppf(0.5, *args) + res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) + return res / invfac + + def _param_info(self): + shape_info = self._shape_info() + loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False)) + param_info = shape_info + [loc_info] + return param_info + + +def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, + chunksize=32): + """Helper for computing the expectation value of `fun`.""" + # short-circuit if the support size is small enough + if (ub - lb) <= chunksize: + supp = np.arange(lb, ub+1, inc) + vals = fun(supp) + return np.sum(vals) + + # otherwise, iterate starting from x0 + if x0 < lb: + x0 = lb + if x0 > ub: + x0 = ub + + count, tot = 0, 0. + # iterate over [x0, ub] inclusive + for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + return tot + + # iterate over [lb, x0) + for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): + count += x.size + delta = np.sum(fun(x)) + tot += delta + if abs(delta) < tolerance * x.size: + break + if count > maxcount: + warnings.warn('expect(): sum did not converge', + RuntimeWarning, stacklevel=3) + break + + return tot + + +def _iter_chunked(x0, x1, chunksize=4, inc=1): + """Iterate from x0 to x1 in chunks of chunksize and steps inc. + + x0 must be finite, x1 need not be. In the latter case, the iterator is + infinite. + Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards + (make sure to set inc < 0.) + + >>> from scipy.stats._distn_infrastructure import _iter_chunked + >>> [x for x in _iter_chunked(2, 5, inc=2)] + [array([2, 4])] + >>> [x for x in _iter_chunked(2, 11, inc=2)] + [array([2, 4, 6, 8]), array([10])] + >>> [x for x in _iter_chunked(2, -5, inc=-2)] + [array([ 2, 0, -2, -4])] + >>> [x for x in _iter_chunked(2, -9, inc=-2)] + [array([ 2, 0, -2, -4]), array([-6, -8])] + + """ + if inc == 0: + raise ValueError('Cannot increment by zero.') + if chunksize <= 0: + raise ValueError('Chunk size must be positive; got %s.' % chunksize) + + s = 1 if inc > 0 else -1 + stepsize = abs(chunksize * inc) + + x = x0 + while (x - x1) * inc < 0: + delta = min(stepsize, abs(x - x1)) + step = delta * s + supp = np.arange(x, x + step, inc) + x += step + yield supp + + +class rv_sample(rv_discrete): + """A 'sample' discrete distribution defined by the support and values. + + The ctor ignores most of the arguments, only needs the `values` argument. + """ + + def __init__(self, a=0, b=inf, name=None, badvalue=None, + moment_tol=1e-8, values=None, inc=1, longname=None, + shapes=None, seed=None): + + super(rv_discrete, self).__init__(seed) + + if values is None: + raise ValueError("rv_sample.__init__(..., values=None,...)") + + # cf generic freeze + self._ctor_param = dict( + a=a, b=b, name=name, badvalue=badvalue, + moment_tol=moment_tol, values=values, inc=inc, + longname=longname, shapes=shapes, seed=seed) + + if badvalue is None: + badvalue = nan + self.badvalue = badvalue + self.moment_tol = moment_tol + self.inc = inc + self.shapes = shapes + self.vecentropy = self._entropy + + xk, pk = values + + if np.shape(xk) != np.shape(pk): + raise ValueError("xk and pk must have the same shape.") + if np.less(pk, 0.0).any(): + raise ValueError("All elements of pk must be non-negative.") + if not np.allclose(np.sum(pk), 1): + raise ValueError("The sum of provided pk is not 1.") + if not len(set(np.ravel(xk))) == np.size(xk): + raise ValueError("xk may not contain duplicate values.") + + indx = np.argsort(np.ravel(xk)) + self.xk = np.take(np.ravel(xk), indx, 0) + self.pk = np.take(np.ravel(pk), indx, 0) + self.a = self.xk[0] + self.b = self.xk[-1] + + self.qvals = np.cumsum(self.pk, axis=0) + + self.shapes = ' ' # bypass inspection + + self._construct_argparser(meths_to_inspect=[self._pmf], + locscale_in='loc=0', + # scale=1 for discrete RVs + locscale_out='loc, 1') + + self._attach_methods() + + self._construct_docstrings(name, longname) + + def __getstate__(self): + dct = self.__dict__.copy() + + # these methods will be remade in rv_generic.__setstate__, + # which calls rv_generic._attach_methods + attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"] + [dct.pop(attr, None) for attr in attrs] + + return dct + + def _attach_methods(self): + """Attaches dynamically created argparser methods.""" + self._attach_argparser_methods() + + def _get_support(self, *args): + """Return the support of the (unscaled, unshifted) distribution. + + Parameters + ---------- + arg1, arg2, ... : array_like + The shape parameter(s) for the distribution (see docstring of the + instance object for more information). + + Returns + ------- + a, b : numeric (float, or int or +/-np.inf) + end-points of the distribution's support. + """ + return self.a, self.b + + def _pmf(self, x): + return np.select([x == k for k in self.xk], + [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) + + def _cdf(self, x): + xx, xxk = np.broadcast_arrays(x[:, None], self.xk) + indx = np.argmax(xxk > xx, axis=-1) - 1 + return self.qvals[indx] + + def _ppf(self, q): + qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) + indx = argmax(sqq >= qq, axis=-1) + return self.xk[indx] + + def _rvs(self, size=None, random_state=None): + # Need to define it explicitly, otherwise .rvs() with size=None + # fails due to explicit broadcasting in _ppf + U = random_state.uniform(size=size) + if size is None: + U = np.array(U, ndmin=1) + Y = self._ppf(U)[0] + else: + Y = self._ppf(U) + return Y + + def _entropy(self): + return stats.entropy(self.pk) + + def generic_moment(self, n): + n = asarray(n) + return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) + + def _expect(self, fun, lb, ub, *args, **kwds): + # ignore all args, just do a brute force summation + supp = self.xk[(lb <= self.xk) & (self.xk <= ub)] + vals = fun(supp) + return np.sum(vals) + + +def _check_shape(argshape, size): + """ + This is a utility function used by `_rvs()` in the class geninvgauss_gen. + It compares the tuple argshape to the tuple size. + + Parameters + ---------- + argshape : tuple of integers + Shape of the arguments. + size : tuple of integers or integer + Size argument of rvs(). + + Returns + ------- + The function returns two tuples, scalar_shape and bc. + + scalar_shape : tuple + Shape to which the 1-d array of random variates returned by + _rvs_scalar() is converted when it is copied into the + output array of _rvs(). + + bc : tuple of booleans + bc is an tuple the same length as size. bc[j] is True if the data + associated with that index is generated in one call of _rvs_scalar(). + + """ + scalar_shape = [] + bc = [] + for argdim, sizedim in zip_longest(argshape[::-1], size[::-1], + fillvalue=1): + if sizedim > argdim or (argdim == sizedim == 1): + scalar_shape.append(sizedim) + bc.append(True) + else: + bc.append(False) + return tuple(scalar_shape[::-1]), tuple(bc[::-1]) + + +def get_distribution_names(namespace_pairs, rv_base_class): + """Collect names of statistical distributions and their generators. + + Parameters + ---------- + namespace_pairs : sequence + A snapshot of (name, value) pairs in the namespace of a module. + rv_base_class : class + The base class of random variable generator classes in a module. + + Returns + ------- + distn_names : list of strings + Names of the statistical distributions. + distn_gen_names : list of strings + Names of the generators of the statistical distributions. + Note that these are not simply the names of the statistical + distributions, with a _gen suffix added. + + """ + distn_names = [] + distn_gen_names = [] + for name, value in namespace_pairs: + if name.startswith('_'): + continue + if name.endswith('_gen') and issubclass(value, rv_base_class): + distn_gen_names.append(name) + if isinstance(value, rv_base_class): + distn_names.append(name) + return distn_names, distn_gen_names diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_entropy.py b/venv/lib/python3.10/site-packages/scipy/stats/_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a81b05749fda69191eab0876eb4bfba8be8b59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_entropy.py @@ -0,0 +1,423 @@ +""" +Created on Fri Apr 2 09:06:05 2021 + +@author: matth +""" + +from __future__ import annotations +import math +import numpy as np +from scipy import special +from ._axis_nan_policy import _axis_nan_policy_factory, _broadcast_arrays + +__all__ = ['entropy', 'differential_entropy'] + + +@_axis_nan_policy_factory( + lambda x: x, + n_samples=lambda kwgs: ( + 2 if ("qk" in kwgs and kwgs["qk"] is not None) + else 1 + ), + n_outputs=1, result_to_tuple=lambda x: (x,), paired=True, + too_small=-1 # entropy doesn't have too small inputs +) +def entropy(pk: np.typing.ArrayLike, + qk: np.typing.ArrayLike | None = None, + base: float | None = None, + axis: int = 0 + ) -> np.number | np.ndarray: + """ + Calculate the Shannon entropy/relative entropy of given distribution(s). + + If only probabilities `pk` are given, the Shannon entropy is calculated as + ``H = -sum(pk * log(pk))``. + + If `qk` is not None, then compute the relative entropy + ``D = sum(pk * log(pk / qk))``. This quantity is also known + as the Kullback-Leibler divergence. + + This routine will normalize `pk` and `qk` if they don't sum to 1. + + Parameters + ---------- + pk : array_like + Defines the (discrete) distribution. Along each axis-slice of ``pk``, + element ``i`` is the (possibly unnormalized) probability of event + ``i``. + qk : array_like, optional + Sequence against which the relative entropy is computed. Should be in + the same format as `pk`. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + axis : int, optional + The axis along which the entropy is calculated. Default is 0. + + Returns + ------- + S : {float, array_like} + The calculated entropy. + + Notes + ----- + Informally, the Shannon entropy quantifies the expected uncertainty + inherent in the possible outcomes of a discrete random variable. + For example, + if messages consisting of sequences of symbols from a set are to be + encoded and transmitted over a noiseless channel, then the Shannon entropy + ``H(pk)`` gives a tight lower bound for the average number of units of + information needed per symbol if the symbols occur with frequencies + governed by the discrete distribution `pk` [1]_. The choice of base + determines the choice of units; e.g., ``e`` for nats, ``2`` for bits, etc. + + The relative entropy, ``D(pk|qk)``, quantifies the increase in the average + number of units of information needed per symbol if the encoding is + optimized for the probability distribution `qk` instead of the true + distribution `pk`. Informally, the relative entropy quantifies the expected + excess in surprise experienced if one believes the true distribution is + `qk` when it is actually `pk`. + + A related quantity, the cross entropy ``CE(pk, qk)``, satisfies the + equation ``CE(pk, qk) = H(pk) + D(pk|qk)`` and can also be calculated with + the formula ``CE = -sum(pk * log(qk))``. It gives the average + number of units of information needed per symbol if an encoding is + optimized for the probability distribution `qk` when the true distribution + is `pk`. It is not computed directly by `entropy`, but it can be computed + using two calls to the function (see Examples). + + See [2]_ for more information. + + References + ---------- + .. [1] Shannon, C.E. (1948), A Mathematical Theory of Communication. + Bell System Technical Journal, 27: 379-423. + https://doi.org/10.1002/j.1538-7305.1948.tb01338.x + .. [2] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information + Theory (Wiley Series in Telecommunications and Signal Processing). + Wiley-Interscience, USA. + + + Examples + -------- + The outcome of a fair coin is the most uncertain: + + >>> import numpy as np + >>> from scipy.stats import entropy + >>> base = 2 # work in units of bits + >>> pk = np.array([1/2, 1/2]) # fair coin + >>> H = entropy(pk, base=base) + >>> H + 1.0 + >>> H == -np.sum(pk * np.log(pk)) / np.log(base) + True + + The outcome of a biased coin is less uncertain: + + >>> qk = np.array([9/10, 1/10]) # biased coin + >>> entropy(qk, base=base) + 0.46899559358928117 + + The relative entropy between the fair coin and biased coin is calculated + as: + + >>> D = entropy(pk, qk, base=base) + >>> D + 0.7369655941662062 + >>> D == np.sum(pk * np.log(pk/qk)) / np.log(base) + True + + The cross entropy can be calculated as the sum of the entropy and + relative entropy`: + + >>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base) + >>> CE + 1.736965594166206 + >>> CE == -np.sum(pk * np.log(qk)) / np.log(base) + True + + """ + if base is not None and base <= 0: + raise ValueError("`base` must be a positive number or `None`.") + + pk = np.asarray(pk) + with np.errstate(invalid='ignore'): + pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True) + if qk is None: + vec = special.entr(pk) + else: + qk = np.asarray(qk) + pk, qk = _broadcast_arrays((pk, qk), axis=None) # don't ignore any axes + sum_kwargs = dict(axis=axis, keepdims=True) + qk = 1.0*qk / np.sum(qk, **sum_kwargs) # type: ignore[operator, call-overload] + vec = special.rel_entr(pk, qk) + S = np.sum(vec, axis=axis) + if base is not None: + S /= np.log(base) + return S + + +def _differential_entropy_is_too_small(samples, kwargs, axis=-1): + values = samples[0] + n = values.shape[axis] + window_length = kwargs.get("window_length", + math.floor(math.sqrt(n) + 0.5)) + if not 2 <= 2 * window_length < n: + return True + return False + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, result_to_tuple=lambda x: (x,), + too_small=_differential_entropy_is_too_small +) +def differential_entropy( + values: np.typing.ArrayLike, + *, + window_length: int | None = None, + base: float | None = None, + axis: int = 0, + method: str = "auto", +) -> np.number | np.ndarray: + r"""Given a sample of a distribution, estimate the differential entropy. + + Several estimation methods are available using the `method` parameter. By + default, a method is selected based the size of the sample. + + Parameters + ---------- + values : sequence + Sample from a continuous distribution. + window_length : int, optional + Window length for computing Vasicek estimate. Must be an integer + between 1 and half of the sample size. If ``None`` (the default), it + uses the heuristic value + + .. math:: + \left \lfloor \sqrt{n} + 0.5 \right \rfloor + + where :math:`n` is the sample size. This heuristic was originally + proposed in [2]_ and has become common in the literature. + base : float, optional + The logarithmic base to use, defaults to ``e`` (natural logarithm). + axis : int, optional + The axis along which the differential entropy is calculated. + Default is 0. + method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional + The method used to estimate the differential entropy from the sample. + Default is ``'auto'``. See Notes for more information. + + Returns + ------- + entropy : float + The calculated differential entropy. + + Notes + ----- + This function will converge to the true differential entropy in the limit + + .. math:: + n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0 + + The optimal choice of ``window_length`` for a given sample size depends on + the (unknown) distribution. Typically, the smoother the density of the + distribution, the larger the optimal value of ``window_length`` [1]_. + + The following options are available for the `method` parameter. + + * ``'vasicek'`` uses the estimator presented in [1]_. This is + one of the first and most influential estimators of differential entropy. + * ``'van es'`` uses the bias-corrected estimator presented in [3]_, which + is not only consistent but, under some conditions, asymptotically normal. + * ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown + in simulation to have smaller bias and mean squared error than + the Vasicek estimator. + * ``'correa'`` uses the estimator presented in [5]_ based on local linear + regression. In a simulation study, it had consistently smaller mean + square error than the Vasiceck estimator, but it is more expensive to + compute. + * ``'auto'`` selects the method automatically (default). Currently, + this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'`` + for moderate sample sizes (11-1000), and ``'vasicek'`` for larger + samples, but this behavior is subject to change in future versions. + + All estimators are implemented as described in [6]_. + + References + ---------- + .. [1] Vasicek, O. (1976). A test for normality based on sample entropy. + Journal of the Royal Statistical Society: + Series B (Methodological), 38(1), 54-59. + .. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based + goodness-of-fit test for exponentiality. Communications in + Statistics-Theory and Methods, 28(5), 1183-1202. + .. [3] Van Es, B. (1992). Estimating functionals related to a density by a + class of statistics based on spacings. Scandinavian Journal of + Statistics, 61-72. + .. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures + of sample entropy. Statistics & Probability Letters, 20(3), 225-234. + .. [5] Correa, J. C. (1995). A new estimator of entropy. Communications + in Statistics-Theory and Methods, 24(10), 2439-2449. + .. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods. + Annals of Data Science, 2(2), 231-241. + https://link.springer.com/article/10.1007/s40745-015-0045-9 + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import differential_entropy, norm + + Entropy of a standard normal distribution: + + >>> rng = np.random.default_rng() + >>> values = rng.standard_normal(100) + >>> differential_entropy(values) + 1.3407817436640392 + + Compare with the true entropy: + + >>> float(norm.entropy()) + 1.4189385332046727 + + For several sample sizes between 5 and 1000, compare the accuracy of + the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically, + compare the root mean squared error (over 1000 trials) between the estimate + and the true differential entropy of the distribution. + + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> + >>> + >>> def rmse(res, expected): + ... '''Root mean squared error''' + ... return np.sqrt(np.mean((res - expected)**2)) + >>> + >>> + >>> a, b = np.log10(5), np.log10(1000) + >>> ns = np.round(np.logspace(a, b, 10)).astype(int) + >>> reps = 1000 # number of repetitions for each sample size + >>> expected = stats.expon.entropy() + >>> + >>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []} + >>> for method in method_errors: + ... for n in ns: + ... rvs = stats.expon.rvs(size=(reps, n), random_state=rng) + ... res = stats.differential_entropy(rvs, method=method, axis=-1) + ... error = rmse(res, expected) + ... method_errors[method].append(error) + >>> + >>> for method, errors in method_errors.items(): + ... plt.loglog(ns, errors, label=method) + >>> + >>> plt.legend() + >>> plt.xlabel('sample size') + >>> plt.ylabel('RMSE (1000 trials)') + >>> plt.title('Entropy Estimator Error (Exponential Distribution)') + + """ + values = np.asarray(values) + values = np.moveaxis(values, axis, -1) + n = values.shape[-1] # number of observations + + if window_length is None: + window_length = math.floor(math.sqrt(n) + 0.5) + + if not 2 <= 2 * window_length < n: + raise ValueError( + f"Window length ({window_length}) must be positive and less " + f"than half the sample size ({n}).", + ) + + if base is not None and base <= 0: + raise ValueError("`base` must be a positive number or `None`.") + + sorted_data = np.sort(values, axis=-1) + + methods = {"vasicek": _vasicek_entropy, + "van es": _van_es_entropy, + "correa": _correa_entropy, + "ebrahimi": _ebrahimi_entropy, + "auto": _vasicek_entropy} + method = method.lower() + if method not in methods: + message = f"`method` must be one of {set(methods)}" + raise ValueError(message) + + if method == "auto": + if n <= 10: + method = 'van es' + elif n <= 1000: + method = 'ebrahimi' + else: + method = 'vasicek' + + res = methods[method](sorted_data, window_length) + + if base is not None: + res /= np.log(base) + + return res + + +def _pad_along_last_axis(X, m): + """Pad the data for computing the rolling window difference.""" + # scales a bit better than method in _vasicek_like_entropy + shape = np.array(X.shape) + shape[-1] = m + Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape + Xr = np.broadcast_to(X[..., [-1]], shape) + return np.concatenate((Xl, X, Xr), axis=-1) + + +def _vasicek_entropy(X, m): + """Compute the Vasicek estimator as described in [6] Eq. 1.3.""" + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + differences = X[..., 2 * m:] - X[..., : -2 * m:] + logs = np.log(n/(2*m) * differences) + return np.mean(logs, axis=-1) + + +def _van_es_entropy(X, m): + """Compute the van Es estimator as described in [6].""" + # No equation number, but referred to as HVE_mn. + # Typo: there should be a log within the summation. + n = X.shape[-1] + difference = X[..., m:] - X[..., :-m] + term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1) + k = np.arange(m, n+1) + return term1 + np.sum(1/k) + np.log(m) - np.log(n+1) + + +def _ebrahimi_entropy(X, m): + """Compute the Ebrahimi estimator as described in [6].""" + # No equation number, but referred to as HE_mn + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + + differences = X[..., 2 * m:] - X[..., : -2 * m:] + + i = np.arange(1, n+1).astype(float) + ci = np.ones_like(i)*2 + ci[i <= m] = 1 + (i[i <= m] - 1)/m + ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m + + logs = np.log(n * differences / (ci * m)) + return np.mean(logs, axis=-1) + + +def _correa_entropy(X, m): + """Compute the Correa estimator as described in [6].""" + # No equation number, but referred to as HC_mn + n = X.shape[-1] + X = _pad_along_last_axis(X, m) + + i = np.arange(1, n+1) + dj = np.arange(-m, m+1)[:, None] + j = i + dj + j0 = j + m - 1 # 0-indexed version of j + + Xibar = np.mean(X[..., j0], axis=-2, keepdims=True) + difference = X[..., j0] - Xibar + num = np.sum(difference*dj, axis=-2) # dj is d-i + den = n*np.sum(difference**2, axis=-2) + return -np.mean(np.log(num/den), axis=-1) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_kde.py b/venv/lib/python3.10/site-packages/scipy/stats/_kde.py new file mode 100644 index 0000000000000000000000000000000000000000..c1e4ed6acc64392a155f1b21b8d63813552a3f99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_kde.py @@ -0,0 +1,728 @@ +#------------------------------------------------------------------------------- +# +# Define classes for (uni/multi)-variate kernel density estimation. +# +# Currently, only Gaussian kernels are implemented. +# +# Written by: Robert Kern +# +# Date: 2004-08-09 +# +# Modified: 2005-02-10 by Robert Kern. +# Contributed to SciPy +# 2005-10-07 by Robert Kern. +# Some fixes to match the new scipy_core +# +# Copyright 2004-2005 by Enthought, Inc. +# +#------------------------------------------------------------------------------- + +# Standard library imports. +import warnings + +# SciPy imports. +from scipy import linalg, special +from scipy._lib._util import check_random_state + +from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi, + sqrt, ravel, power, atleast_1d, squeeze, sum, transpose, + ones, cov) +import numpy as np + +# Local imports. +from . import _mvn +from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log + +# deprecated import to be removed in SciPy 1.13.0 +from scipy.special import logsumexp # noqa: F401 + + +__all__ = ['gaussian_kde'] + + +class gaussian_kde: + """Representation of a kernel-density estimate using Gaussian kernels. + + Kernel density estimation is a way to estimate the probability density + function (PDF) of a random variable in a non-parametric way. + `gaussian_kde` works for both uni-variate and multi-variate data. It + includes automatic bandwidth determination. The estimation works best for + a unimodal distribution; bimodal or multi-modal distributions tend to be + oversmoothed. + + Parameters + ---------- + dataset : array_like + Datapoints to estimate from. In case of univariate data this is a 1-D + array, otherwise a 2-D array with shape (# of dims, # of data). + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. If a scalar, + this will be used directly as `kde.factor`. If a callable, it should + take a `gaussian_kde` instance as only parameter and return a scalar. + If None (default), 'scott' is used. See Notes for more details. + weights : array_like, optional + weights of datapoints. This must be the same shape as dataset. + If None (default), the samples are assumed to be equally weighted + + Attributes + ---------- + dataset : ndarray + The dataset with which `gaussian_kde` was initialized. + d : int + Number of dimensions. + n : int + Number of datapoints. + neff : int + Effective number of datapoints. + + .. versionadded:: 1.2.0 + factor : float + The bandwidth factor, obtained from `kde.covariance_factor`. The square + of `kde.factor` multiplies the covariance matrix of the data in the kde + estimation. + covariance : ndarray + The covariance matrix of `dataset`, scaled by the calculated bandwidth + (`kde.factor`). + inv_cov : ndarray + The inverse of `covariance`. + + Methods + ------- + evaluate + __call__ + integrate_gaussian + integrate_box_1d + integrate_box + integrate_kde + pdf + logpdf + resample + set_bandwidth + covariance_factor + + Notes + ----- + Bandwidth selection strongly influences the estimate obtained from the KDE + (much more so than the actual shape of the kernel). Bandwidth selection + can be done by a "rule of thumb", by cross-validation, by "plug-in + methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde` + uses a rule of thumb, the default is Scott's Rule. + + Scott's Rule [1]_, implemented as `scotts_factor`, is:: + + n**(-1./(d+4)), + + with ``n`` the number of data points and ``d`` the number of dimensions. + In the case of unequally weighted points, `scotts_factor` becomes:: + + neff**(-1./(d+4)), + + with ``neff`` the effective number of datapoints. + Silverman's Rule [2]_, implemented as `silverman_factor`, is:: + + (n * (d + 2) / 4.)**(-1. / (d + 4)). + + or in the case of unequally weighted points:: + + (neff * (d + 2) / 4.)**(-1. / (d + 4)). + + Good general descriptions of kernel density estimation can be found in [1]_ + and [2]_, the mathematics for this multi-dimensional implementation can be + found in [1]_. + + With a set of weighted samples, the effective number of datapoints ``neff`` + is defined by:: + + neff = sum(weights)^2 / sum(weights^2) + + as detailed in [5]_. + + `gaussian_kde` does not currently support data that lies in a + lower-dimensional subspace of the space in which it is expressed. For such + data, consider performing principle component analysis / dimensionality + reduction and using `gaussian_kde` with the transformed data. + + References + ---------- + .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and + Visualization", John Wiley & Sons, New York, Chicester, 1992. + .. [2] B.W. Silverman, "Density Estimation for Statistics and Data + Analysis", Vol. 26, Monographs on Statistics and Applied Probability, + Chapman and Hall, London, 1986. + .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A + Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993. + .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel + conditional density estimation", Computational Statistics & Data + Analysis, Vol. 36, pp. 279-298, 2001. + .. [5] Gray P. G., 1969, Journal of the Royal Statistical Society. + Series A (General), 132, 272 + + Examples + -------- + Generate some random two-dimensional data: + + >>> import numpy as np + >>> from scipy import stats + >>> def measure(n): + ... "Measurement model, return two coupled measurements." + ... m1 = np.random.normal(size=n) + ... m2 = np.random.normal(scale=0.5, size=n) + ... return m1+m2, m1-m2 + + >>> m1, m2 = measure(2000) + >>> xmin = m1.min() + >>> xmax = m1.max() + >>> ymin = m2.min() + >>> ymax = m2.max() + + Perform a kernel density estimate on the data: + + >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] + >>> positions = np.vstack([X.ravel(), Y.ravel()]) + >>> values = np.vstack([m1, m2]) + >>> kernel = stats.gaussian_kde(values) + >>> Z = np.reshape(kernel(positions).T, X.shape) + + Plot the results: + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, + ... extent=[xmin, xmax, ymin, ymax]) + >>> ax.plot(m1, m2, 'k.', markersize=2) + >>> ax.set_xlim([xmin, xmax]) + >>> ax.set_ylim([ymin, ymax]) + >>> plt.show() + + """ + def __init__(self, dataset, bw_method=None, weights=None): + self.dataset = atleast_2d(asarray(dataset)) + if not self.dataset.size > 1: + raise ValueError("`dataset` input should have multiple elements.") + + self.d, self.n = self.dataset.shape + + if weights is not None: + self._weights = atleast_1d(weights).astype(float) + self._weights /= sum(self._weights) + if self.weights.ndim != 1: + raise ValueError("`weights` input should be one-dimensional.") + if len(self._weights) != self.n: + raise ValueError("`weights` input should be of length n") + self._neff = 1/sum(self._weights**2) + + # This can be converted to a warning once gh-10205 is resolved + if self.d > self.n: + msg = ("Number of dimensions is greater than number of samples. " + "This results in a singular data covariance matrix, which " + "cannot be treated using the algorithms implemented in " + "`gaussian_kde`. Note that `gaussian_kde` interprets each " + "*column* of `dataset` to be a point; consider transposing " + "the input to `dataset`.") + raise ValueError(msg) + + try: + self.set_bandwidth(bw_method=bw_method) + except linalg.LinAlgError as e: + msg = ("The data appears to lie in a lower-dimensional subspace " + "of the space in which it is expressed. This has resulted " + "in a singular data covariance matrix, which cannot be " + "treated using the algorithms implemented in " + "`gaussian_kde`. Consider performing principle component " + "analysis / dimensionality reduction and using " + "`gaussian_kde` with the transformed data.") + raise linalg.LinAlgError(msg) from e + + def evaluate(self, points): + """Evaluate the estimated pdf on a set of points. + + Parameters + ---------- + points : (# of dimensions, # of points)-array + Alternatively, a (# of dimensions,) vector can be passed in and + treated as a single point. + + Returns + ------- + values : (# of points,)-array + The values at each point. + + Raises + ------ + ValueError : if the dimensionality of the input points is different than + the dimensionality of the KDE. + + """ + points = atleast_2d(asarray(points)) + + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + # points was passed in as a row vector + points = reshape(points, (self.d, 1)) + m = 1 + else: + msg = (f"points have dimension {d}, " + f"dataset has dimension {self.d}") + raise ValueError(msg) + + output_dtype, spec = _get_output_dtype(self.covariance, points) + result = gaussian_kernel_estimate[spec]( + self.dataset.T, self.weights[:, None], + points.T, self.cho_cov, output_dtype) + + return result[:, 0] + + __call__ = evaluate + + def integrate_gaussian(self, mean, cov): + """ + Multiply estimated density by a multivariate Gaussian and integrate + over the whole space. + + Parameters + ---------- + mean : aray_like + A 1-D array, specifying the mean of the Gaussian. + cov : array_like + A 2-D array, specifying the covariance matrix of the Gaussian. + + Returns + ------- + result : scalar + The value of the integral. + + Raises + ------ + ValueError + If the mean or covariance of the input Gaussian differs from + the KDE's dimensionality. + + """ + mean = atleast_1d(squeeze(mean)) + cov = atleast_2d(cov) + + if mean.shape != (self.d,): + raise ValueError("mean does not have dimension %s" % self.d) + if cov.shape != (self.d, self.d): + raise ValueError("covariance does not have dimension %s" % self.d) + + # make mean a column vector + mean = mean[:, newaxis] + + sum_cov = self.covariance + cov + + # This will raise LinAlgError if the new cov matrix is not s.p.d + # cho_factor returns (ndarray, bool) where bool is a flag for whether + # or not ndarray is upper or lower triangular + sum_cov_chol = linalg.cho_factor(sum_cov) + + diff = self.dataset - mean + tdiff = linalg.cho_solve(sum_cov_chol, diff) + + sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) + norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det + + energies = sum(diff * tdiff, axis=0) / 2.0 + result = sum(exp(-energies)*self.weights, axis=0) / norm_const + + return result + + def integrate_box_1d(self, low, high): + """ + Computes the integral of a 1D pdf between two bounds. + + Parameters + ---------- + low : scalar + Lower bound of integration. + high : scalar + Upper bound of integration. + + Returns + ------- + value : scalar + The result of the integral. + + Raises + ------ + ValueError + If the KDE is over more than one dimension. + + """ + if self.d != 1: + raise ValueError("integrate_box_1d() only handles 1D pdfs") + + stdev = ravel(sqrt(self.covariance))[0] + + normalized_low = ravel((low - self.dataset) / stdev) + normalized_high = ravel((high - self.dataset) / stdev) + + value = np.sum(self.weights*( + special.ndtr(normalized_high) - + special.ndtr(normalized_low))) + return value + + def integrate_box(self, low_bounds, high_bounds, maxpts=None): + """Computes the integral of a pdf over a rectangular interval. + + Parameters + ---------- + low_bounds : array_like + A 1-D array containing the lower bounds of integration. + high_bounds : array_like + A 1-D array containing the upper bounds of integration. + maxpts : int, optional + The maximum number of points to use for integration. + + Returns + ------- + value : scalar + The result of the integral. + + """ + if maxpts is not None: + extra_kwds = {'maxpts': maxpts} + else: + extra_kwds = {} + + value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds, + self.dataset, self.weights, + self.covariance, **extra_kwds) + if inform: + msg = ('An integral in _mvn.mvnun requires more points than %s' % + (self.d * 1000)) + warnings.warn(msg, stacklevel=2) + + return value + + def integrate_kde(self, other): + """ + Computes the integral of the product of this kernel density estimate + with another. + + Parameters + ---------- + other : gaussian_kde instance + The other kde. + + Returns + ------- + value : scalar + The result of the integral. + + Raises + ------ + ValueError + If the KDEs have different dimensionality. + + """ + if other.d != self.d: + raise ValueError("KDEs are not the same dimensionality") + + # we want to iterate over the smallest number of points + if other.n < self.n: + small = other + large = self + else: + small = self + large = other + + sum_cov = small.covariance + large.covariance + sum_cov_chol = linalg.cho_factor(sum_cov) + result = 0.0 + for i in range(small.n): + mean = small.dataset[:, i, newaxis] + diff = large.dataset - mean + tdiff = linalg.cho_solve(sum_cov_chol, diff) + + energies = sum(diff * tdiff, axis=0) / 2.0 + result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i] + + sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) + norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det + + result /= norm_const + + return result + + def resample(self, size=None, seed=None): + """Randomly sample a dataset from the estimated pdf. + + Parameters + ---------- + size : int, optional + The number of samples to draw. If not provided, then the size is + the same as the effective number of samples in the underlying + dataset. + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + resample : (self.d, `size`) ndarray + The sampled dataset. + + """ # numpy/numpydoc#87 # noqa: E501 + if size is None: + size = int(self.neff) + + random_state = check_random_state(seed) + norm = transpose(random_state.multivariate_normal( + zeros((self.d,), float), self.covariance, size=size + )) + indices = random_state.choice(self.n, size=size, p=self.weights) + means = self.dataset[:, indices] + + return means + norm + + def scotts_factor(self): + """Compute Scott's factor. + + Returns + ------- + s : float + Scott's factor. + """ + return power(self.neff, -1./(self.d+4)) + + def silverman_factor(self): + """Compute the Silverman factor. + + Returns + ------- + s : float + The silverman factor. + """ + return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4)) + + # Default method to calculate bandwidth, can be overwritten by subclass + covariance_factor = scotts_factor + covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that + multiplies the data covariance matrix to obtain the kernel covariance + matrix. The default is `scotts_factor`. A subclass can overwrite this + method to provide a different method, or set it through a call to + `kde.set_bandwidth`.""" + + def set_bandwidth(self, bw_method=None): + """Compute the estimator bandwidth with given method. + + The new bandwidth calculated after a call to `set_bandwidth` is used + for subsequent evaluations of the estimated density. + + Parameters + ---------- + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. If a + scalar, this will be used directly as `kde.factor`. If a callable, + it should take a `gaussian_kde` instance as only parameter and + return a scalar. If None (default), nothing happens; the current + `kde.covariance_factor` method is kept. + + Notes + ----- + .. versionadded:: 0.11 + + Examples + -------- + >>> import numpy as np + >>> import scipy.stats as stats + >>> x1 = np.array([-7, -5, 1, 4, 5.]) + >>> kde = stats.gaussian_kde(x1) + >>> xs = np.linspace(-10, 10, num=50) + >>> y1 = kde(xs) + >>> kde.set_bandwidth(bw_method='silverman') + >>> y2 = kde(xs) + >>> kde.set_bandwidth(bw_method=kde.factor / 3.) + >>> y3 = kde(xs) + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo', + ... label='Data points (rescaled)') + >>> ax.plot(xs, y1, label='Scott (default)') + >>> ax.plot(xs, y2, label='Silverman') + >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)') + >>> ax.legend() + >>> plt.show() + + """ + if bw_method is None: + pass + elif bw_method == 'scott': + self.covariance_factor = self.scotts_factor + elif bw_method == 'silverman': + self.covariance_factor = self.silverman_factor + elif np.isscalar(bw_method) and not isinstance(bw_method, str): + self._bw_method = 'use constant' + self.covariance_factor = lambda: bw_method + elif callable(bw_method): + self._bw_method = bw_method + self.covariance_factor = lambda: self._bw_method(self) + else: + msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ + "or a callable." + raise ValueError(msg) + + self._compute_covariance() + + def _compute_covariance(self): + """Computes the covariance matrix for each Gaussian kernel using + covariance_factor(). + """ + self.factor = self.covariance_factor() + # Cache covariance and Cholesky decomp of covariance + if not hasattr(self, '_data_cho_cov'): + self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, + bias=False, + aweights=self.weights)) + self._data_cho_cov = linalg.cholesky(self._data_covariance, + lower=True) + + self.covariance = self._data_covariance * self.factor**2 + self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64) + self.log_det = 2*np.log(np.diag(self.cho_cov + * np.sqrt(2*pi))).sum() + + @property + def inv_cov(self): + # Re-compute from scratch each time because I'm not sure how this is + # used in the wild. (Perhaps users change the `dataset`, since it's + # not a private attribute?) `_compute_covariance` used to recalculate + # all these, so we'll recalculate everything now that this is a + # a property. + self.factor = self.covariance_factor() + self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1, + bias=False, aweights=self.weights)) + return linalg.inv(self._data_covariance) / self.factor**2 + + def pdf(self, x): + """ + Evaluate the estimated pdf on a provided set of points. + + Notes + ----- + This is an alias for `gaussian_kde.evaluate`. See the ``evaluate`` + docstring for more details. + + """ + return self.evaluate(x) + + def logpdf(self, x): + """ + Evaluate the log of the estimated pdf on a provided set of points. + """ + points = atleast_2d(x) + + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + # points was passed in as a row vector + points = reshape(points, (self.d, 1)) + m = 1 + else: + msg = (f"points have dimension {d}, " + f"dataset has dimension {self.d}") + raise ValueError(msg) + + output_dtype, spec = _get_output_dtype(self.covariance, points) + result = gaussian_kernel_estimate_log[spec]( + self.dataset.T, self.weights[:, None], + points.T, self.cho_cov, output_dtype) + + return result[:, 0] + + def marginal(self, dimensions): + """Return a marginal KDE distribution + + Parameters + ---------- + dimensions : int or 1-d array_like + The dimensions of the multivariate distribution corresponding + with the marginal variables, that is, the indices of the dimensions + that are being retained. The other dimensions are marginalized out. + + Returns + ------- + marginal_kde : gaussian_kde + An object representing the marginal distribution. + + Notes + ----- + .. versionadded:: 1.10.0 + + """ + + dims = np.atleast_1d(dimensions) + + if not np.issubdtype(dims.dtype, np.integer): + msg = ("Elements of `dimensions` must be integers - the indices " + "of the marginal variables being retained.") + raise ValueError(msg) + + n = len(self.dataset) # number of dimensions + original_dims = dims.copy() + + dims[dims < 0] = n + dims[dims < 0] + + if len(np.unique(dims)) != len(dims): + msg = ("All elements of `dimensions` must be unique.") + raise ValueError(msg) + + i_invalid = (dims < 0) | (dims >= n) + if np.any(i_invalid): + msg = (f"Dimensions {original_dims[i_invalid]} are invalid " + f"for a distribution in {n} dimensions.") + raise ValueError(msg) + + dataset = self.dataset[dims] + weights = self.weights + + return gaussian_kde(dataset, bw_method=self.covariance_factor(), + weights=weights) + + @property + def weights(self): + try: + return self._weights + except AttributeError: + self._weights = ones(self.n)/self.n + return self._weights + + @property + def neff(self): + try: + return self._neff + except AttributeError: + self._neff = 1/sum(self.weights**2) + return self._neff + + +def _get_output_dtype(covariance, points): + """ + Calculates the output dtype and the "spec" (=C type name). + + This was necessary in order to deal with the fused types in the Cython + routine `gaussian_kernel_estimate`. See gh-10824 for details. + """ + output_dtype = np.common_type(covariance, points) + itemsize = np.dtype(output_dtype).itemsize + if itemsize == 4: + spec = 'float' + elif itemsize == 8: + spec = 'double' + elif itemsize in (12, 16): + spec = 'long double' + else: + raise ValueError( + f"{output_dtype} has unexpected item size: {itemsize}" + ) + + return output_dtype, spec diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_ksstats.py b/venv/lib/python3.10/site-packages/scipy/stats/_ksstats.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc60da7bba862a1b16f4b41c66e523f985ac415 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_ksstats.py @@ -0,0 +1,600 @@ +# Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where: +# D_n = sup_x{|F_n(x) - F(x)|}, +# F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n}, +# F(x) is the CDF of a probability distribution. +# +# Exact methods: +# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1] +# or a recursion algorithm due to Pomeranz[2]. +# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform +# the Durbin algorithm. +# D_n >= d <==> D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence +# Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d). +# For d > 0.5, the latter intersection probability is 0. +# +# Approximate methods: +# For d close to 0.5, ignoring that intersection term may still give a +# reasonable approximation. +# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending +# Kolmogorov's initial asymptotic, suitable for large d. (See +# scipy.special.kolmogorov for that asymptotic) +# Pelz-Good[6] used the functional equation for Jacobi theta functions to +# transform the Li-Chien/Korolyuk formula produce a computational formula +# suitable for small d. +# +# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of +# the above approaches and it is that which is used here. +# +# Other approaches: +# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d. +# Moscovich and Nadler[9] use FFTs to compute the convolutions. + +# References: +# [1] Durbin J (1968). +# "The Probability that the Sample Distribution Function Lies Between Two +# Parallel Straight Lines." +# Annals of Mathematical Statistics, 39, 398-411. +# [2] Pomeranz J (1974). +# "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for +# Small Samples (Algorithm 487)." +# Communications of the ACM, 17(12), 703-704. +# [3] Marsaglia G, Tsang WW, Wang J (2003). +# "Evaluating Kolmogorov's Distribution." +# Journal of Statistical Software, 8(18), 1-4. +# [4] LI-CHIEN, C. (1956). +# "On the exact distribution of the statistics of A. N. Kolmogorov and +# their asymptotic expansion." +# Acta Matematica Sinica, 6, 55-81. +# [5] KOROLYUK, V. S. (1960). +# "Asymptotic analysis of the distribution of the maximum deviation in +# the Bernoulli scheme." +# Theor. Probability Appl., 4, 339-366. +# [6] Pelz W, Good IJ (1976). +# "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample +# Statistic." +# Journal of the Royal Statistical Society, Series B, 38(2), 152-156. +# [7] Simard, R., L'Ecuyer, P. (2011) +# "Computing the Two-Sided Kolmogorov-Smirnov Distribution", +# Journal of Statistical Software, Vol 39, 11, 1-18. +# [8] Carvalho, Luis (2015) +# "An Improved Evaluation of Kolmogorov's Distribution" +# Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8. +# [9] Amit Moscovich, Boaz Nadler (2017) +# "Fast calculation of boundary crossing probabilities for Poisson +# processes", +# Statistics & Probability Letters, Vol 123, 177-182. + + +import numpy as np +import scipy.special +import scipy.special._ufuncs as scu +from scipy._lib._finite_differences import _derivative + +_E128 = 128 +_EP128 = np.ldexp(np.longdouble(1), _E128) +_EM128 = np.ldexp(np.longdouble(1), -_E128) + +_SQRT2PI = np.sqrt(2 * np.pi) +_LOG_2PI = np.log(2 * np.pi) +_MIN_LOG = -708 +_SQRT3 = np.sqrt(3) +_PI_SQUARED = np.pi ** 2 +_PI_FOUR = np.pi ** 4 +_PI_SIX = np.pi ** 6 + +# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers, +# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1. +_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3, + -1.9175269175269175269e-3, 8.4175084175084175084e-4, + -5.952380952380952381e-4, 7.9365079365079365079e-4, + -2.7777777777777777778e-3, 8.3333333333333333333e-2] + + +def _log_nfactorial_div_n_pow_n(n): + # Computes n! / n**n + # = (n-1)! / n**(n-1) + # Uses Stirling's approximation, but removes n*log(n) up-front to + # avoid subtractive cancellation. + # = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1) + rn = 1.0/n + return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n) + + +def _clip_prob(p): + """clips a probability to range 0<=p<=1.""" + return np.clip(p, 0.0, 1.0) + + +def _select_and_clip_prob(cdfprob, sfprob, cdf=True): + """Selects either the CDF or SF, and then clips to range 0<=p<=1.""" + p = np.where(cdf, cdfprob, sfprob) + return _clip_prob(p) + + +def _kolmogn_DMTW(n, d, cdf=True): + r"""Computes the Kolmogorov CDF: Pr(D_n <= d) using the MTW approach to + the Durbin matrix algorithm. + + Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3]. + """ + # Write d = (k-h)/n, where k is positive integer and 0 <= h < 1 + # Generate initial matrix H of size m*m where m=(2k-1) + # Compute k-th row of (n!/n^n) * H^n, scaling intermediate results. + # Requires memory O(m^2) and computation O(m^2 log(n)). + # Most suitable for small m. + + if d >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf) + nd = n * d + if nd <= 0.5: + return _select_and_clip_prob(0.0, 1.0, cdf) + k = int(np.ceil(nd)) + h = k - nd + m = 2 * k - 1 + + H = np.zeros([m, m]) + + # Initialize: v is first column (and last row) of H + # v[j] = (1-h^(j+1)/(j+1)! (except for v[-1]) + # w[j] = 1/(j)! + # q = k-th row of H (actually i!/n^i*H^i) + intm = np.arange(1, m + 1) + v = 1.0 - h ** intm + w = np.empty(m) + fac = 1.0 + for j in intm: + w[j - 1] = fac + fac /= j # This might underflow. Isn't a problem. + v[j - 1] *= fac + tt = max(2 * h - 1.0, 0)**m - 2*h**m + v[-1] = (1.0 + tt) * fac + + for i in range(1, m): + H[i - 1:, i] = w[:m - i + 1] + H[:, 0] = v + H[-1, :] = np.flip(v, axis=0) + + Hpwr = np.eye(np.shape(H)[0]) # Holds intermediate powers of H + nn = n + expnt = 0 # Scaling of Hpwr + Hexpnt = 0 # Scaling of H + while nn > 0: + if nn % 2: + Hpwr = np.matmul(Hpwr, H) + expnt += Hexpnt + H = np.matmul(H, H) + Hexpnt *= 2 + # Scale as needed. + if np.abs(H[k - 1, k - 1]) > _EP128: + H /= _EP128 + Hexpnt += _E128 + nn = nn // 2 + + p = Hpwr[k - 1, k - 1] + + # Multiply by n!/n^n + for i in range(1, n + 1): + p = i * p / n + if np.abs(p) < _EM128: + p *= _EP128 + expnt -= _E128 + + # unscale + if expnt != 0: + p = np.ldexp(p, expnt) + + return _select_and_clip_prob(p, 1.0-p, cdf) + + +def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf): + """Compute the endpoints of the interval for row i.""" + if i == 0: + j1, j2 = -ll - ceilf - 1, ll + ceilf - 1 + else: + # i + 1 = 2*ip1div2 + ip1mod2 + ip1div2, ip1mod2 = divmod(i + 1, 2) + if ip1mod2 == 0: # i is odd + if ip1div2 == n + 1: + j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1 + else: + j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1 + else: + j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1 + + return max(j1 + 2, 0), min(j2, n) + + +def _kolmogn_Pomeranz(n, x, cdf=True): + r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm. + + Pomeranz (1974) [2] + """ + + # V is n*(2n+2) matrix. + # Each row is convolution of the previous row and probabilities from a + # Poisson distribution. + # Desired CDF probability is n! V[n-1, 2n+1] (final entry in final row). + # Only two rows are needed at any given stage: + # - Call them V0 and V1. + # - Swap each iteration + # Only a few (contiguous) entries in each row can be non-zero. + # - Keep track of start and end (j1 and j2 below) + # - V0s and V1s track the start in the two rows + # Scale intermediate results as needed. + # Only a few different Poisson distributions can occur + t = n * x + ll = int(np.floor(t)) + f = 1.0 * (t - ll) # fractional part of t + g = min(f, 1.0 - f) + ceilf = (1 if f > 0 else 0) + roundf = (1 if f > 0.5 else 0) + npwrs = 2 * (ll + 1) # Maximum number of powers needed in convolutions + gpower = np.empty(npwrs) # gpower = (g/n)^m/m! + twogpower = np.empty(npwrs) # twogpower = (2g/n)^m/m! + onem2gpower = np.empty(npwrs) # onem2gpower = ((1-2g)/n)^m/m! + # gpower etc are *almost* Poisson probs, just missing normalizing factor. + + gpower[0] = 1.0 + twogpower[0] = 1.0 + onem2gpower[0] = 1.0 + expnt = 0 + g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n + for m in range(1, npwrs): + gpower[m] = gpower[m - 1] * g_over_n / m + twogpower[m] = twogpower[m - 1] * two_g_over_n / m + onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m + + V0 = np.zeros([npwrs]) + V1 = np.zeros([npwrs]) + V1[0] = 1 # first row + V0s, V1s = 0, 0 # start indices of the two rows + + j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf) + for i in range(1, 2 * n + 2): + # Preserve j1, V1, V1s, V0s from last iteration + k1 = j1 + V0, V1 = V1, V0 + V0s, V1s = V1s, V0s + V1.fill(0.0) + j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf) + if i == 1 or i == 2 * n + 1: + pwrs = gpower + else: + pwrs = (twogpower if i % 2 else onem2gpower) + ln2 = j2 - k1 + 1 + if ln2 > 0: + conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2]) + conv_start = j1 - k1 # First index to use from conv + conv_len = j2 - j1 + 1 # Number of entries to use from conv + V1[:conv_len] = conv[conv_start:conv_start + conv_len] + # Scale to avoid underflow. + if 0 < np.max(V1) < _EM128: + V1 *= _EP128 + expnt -= _E128 + V1s = V0s + j1 - k1 + + # multiply by n! + ans = V1[n - V1s] + for m in range(1, n + 1): + if np.abs(ans) > _EP128: + ans *= _EM128 + expnt += _E128 + ans *= m + + # Undo any intermediate scaling + if expnt != 0: + ans = np.ldexp(ans, expnt) + ans = _select_and_clip_prob(ans, 1.0 - ans, cdf) + return ans + + +def _kolmogn_PelzGood(n, x, cdf=True): + """Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1. + + Start with Li-Chien, Korolyuk approximation: + Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5 + where z = x*sqrt(n). + Transform each K_(z) using Jacobi theta functions into a form suitable + for small z. + Pelz-Good (1976). [6] + """ + if x <= 0.0: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + if x >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf=cdf) + + z = np.sqrt(n) * x + zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6 + + qlog = -_PI_SQUARED / 8 / zsquared + if qlog < _MIN_LOG: # z ~ 0.041743441416853426 + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + + q = np.exp(qlog) + + # Coefficients of terms in the sums for K1, K2 and K3 + k1a = -zsquared + k1b = _PI_SQUARED / 4 + + k2a = 6 * zsix + 2 * zfour + k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4 + k2c = _PI_FOUR * (1 - 2 * zsquared) / 16 + + k3d = _PI_SIX * (5 - 30 * zsquared) / 64 + k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16 + k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4 + k3a = -30 * zsix - 90 * z**8 + + K0to3 = np.zeros(4) + # Use a Horner scheme to evaluate sum c_i q^(i^2) + # Reduces to a sum over odd integers. + maxk = int(np.ceil(16 * z / np.pi)) + for k in range(maxk, 0, -1): + m = 2 * k - 1 + msquared, mfour, msix = m**2, m**4, m**6 + qpower = np.power(q, 8 * k) + coeffs = np.array([1.0, + k1a + k1b*msquared, + k2a + k2b*msquared + k2c*mfour, + k3a + k3b*msquared + k3c*mfour + k3d*msix]) + K0to3 *= qpower + K0to3 += coeffs + K0to3 *= q + K0to3 *= _SQRT2PI + # z**10 > 0 as z > 0.04 + K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10]) + + # Now do the other sum over the other terms, all integers k + # K_2: (pi^2 k^2) q^(k^2), + # K_3: (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2) + # Don't expect much subtractive cancellation so use direct calculation + q = np.exp(-_PI_SQUARED / 2 / zsquared) + ks = np.arange(maxk, 0, -1) + ksquared = ks ** 2 + sqrt3z = _SQRT3 * z + kspi = np.pi * ks + qpwers = q ** ksquared + k2extra = np.sum(ksquared * qpwers) + k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree) + K0to3[2] += k2extra + k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers) + k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix) + K0to3[3] += k3extra + powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0) + K0to3 /= powers_of_n + + if not cdf: + K0to3 *= -1 + K0to3[0] += 1 + + Ksum = sum(K0to3) + return Ksum + + +def _kolmogn(n, x, cdf=True): + """Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic. + + x must be of type float, n of type integer. + + Simard & L'Ecuyer (2011) [7]. + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if x >= 1.0: + return _select_and_clip_prob(1.0, 0.0, cdf=cdf) + if x <= 0.0: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + t = n * x + if t <= 1.0: # Ruben-Gambino: 1/2n <= x <= 1/n + if t <= 0.5: + return _select_and_clip_prob(0.0, 1.0, cdf=cdf) + if n <= 140: + prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1)) + else: + prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1)) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + if t >= n - 1: # Ruben-Gambino + prob = 2 * (1.0 - x)**n + return _select_and_clip_prob(1 - prob, prob, cdf=cdf) + if x >= 0.5: # Exact: 2 * smirnov + prob = 2 * scipy.special.smirnov(n, x) + return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf) + + nxsquared = t * x + if n <= 140: + if nxsquared <= 0.754693: + prob = _kolmogn_DMTW(n, x, cdf=True) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + if nxsquared <= 4: + prob = _kolmogn_Pomeranz(n, x, cdf=True) + return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf) + # Now use Miller approximation of 2*smirnov + prob = 2 * scipy.special.smirnov(n, x) + return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf) + + # Split CDF and SF as they have different cutoffs on nxsquared. + if not cdf: + if nxsquared >= 370.0: + return 0.0 + if nxsquared >= 2.2: + prob = 2 * scipy.special.smirnov(n, x) + return _clip_prob(prob) + # Fall through and compute the SF as 1.0-CDF + if nxsquared >= 18.0: + cdfprob = 1.0 + elif n <= 100000 and n * x**1.5 <= 1.4: + cdfprob = _kolmogn_DMTW(n, x, cdf=True) + else: + cdfprob = _kolmogn_PelzGood(n, x, cdf=True) + return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf) + + +def _kolmogn_p(n, x): + """Computes the PDF for the two-sided Kolmogorov-Smirnov statistic. + + x must be of type float, n of type integer. + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if x >= 1.0 or x <= 0: + return 0 + t = n * x + if t <= 1.0: + # Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1) + if t <= 0.5: + return 0.0 + if n <= 140: + prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1)) + else: + prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1)) + return prd * 2 * n**2 + if t >= n - 1: + # Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1) + return 2 * (1.0 - x) ** (n-1) * n + if x >= 0.5: + return 2 * scipy.stats.ksone.pdf(x, n) + + # Just take a small delta. + # Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a. + # as the CDF is a piecewise degree n polynomial. + # It has knots at 1/n, 2/n, ... (n-1)/n + # and is not a C-infinity function at the knots + delta = x / 2.0**16 + delta = min(delta, x - 1.0/n) + delta = min(delta, 0.5 - x) + + def _kk(_x): + return kolmogn(n, _x) + + return _derivative(_kk, x, dx=delta, order=5) + + +def _kolmogni(n, p, q): + """Computes the PPF/ISF of kolmogn. + + n of type integer, n>= 1 + p is the CDF, q the SF, p+q=1 + """ + if np.isnan(n): + return n # Keep the same type of nan + if int(n) != n or n <= 0: + return np.nan + if p <= 0: + return 1.0/n + if q <= 0: + return 1.0 + delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n) + if delta <= 1.0/n: + return (delta + 1.0 / n) / 2 + x = -np.expm1(np.log(q/2.0)/n) + if x >= 1 - 1.0/n: + return x + x1 = scu._kolmogci(p)/np.sqrt(n) + x1 = min(x1, 1.0 - 1.0/n) + + def _f(x): + return _kolmogn(n, x) - p + + return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14) + + +def kolmogn(n, x, cdf=True): + """Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. + + The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), + for a sample of size n drawn from a distribution with CDF F(t), where + :math:`D_n &= sup_t |F_n(t) - F(t)|`, and + :math:`F_n(t)` is the Empirical Cumulative Distribution Function of the sample. + + Parameters + ---------- + n : integer, array_like + the number of samples + x : float, array_like + The K-S statistic, float between 0 and 1 + cdf : bool, optional + whether to compute the CDF(default=true) or the SF. + + Returns + ------- + cdf : ndarray + CDF (or SF it cdf is False) at the specified locations. + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, x, cdf, None], + op_dtypes=[None, np.float64, np.bool_, np.float64]) + for _n, _x, _cdf, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + z[...] = _kolmogn(int(_n), _x, cdf=_cdf) + result = it.operands[-1] + return result + + +def kolmognp(n, x): + """Computes the PDF for the two-sided Kolmogorov-Smirnov distribution. + + Parameters + ---------- + n : integer, array_like + the number of samples + x : float, array_like + The K-S statistic, float between 0 and 1 + + Returns + ------- + pdf : ndarray + The PDF at the specified locations + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, x, None]) + for _n, _x, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + z[...] = _kolmogn_p(int(_n), _x) + result = it.operands[-1] + return result + + +def kolmogni(n, q, cdf=True): + """Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution. + + Parameters + ---------- + n : integer, array_like + the number of samples + q : float, array_like + Probabilities, float between 0 and 1 + cdf : bool, optional + whether to compute the PPF(default=true) or the ISF. + + Returns + ------- + ppf : ndarray + PPF (or ISF if cdf is False) at the specified locations + + The return value has shape the result of numpy broadcasting n and x. + """ + it = np.nditer([n, q, cdf, None]) + for _n, _q, _cdf, z in it: + if np.isnan(_n): + z[...] = _n + continue + if int(_n) != _n: + raise ValueError(f'n is not integral: {_n}') + _pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q) + z[...] = _kolmogni(int(_n), _pcdf, _psf) + result = it.operands[-1] + return result diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py b/venv/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py new file mode 100644 index 0000000000000000000000000000000000000000..a796edf075e70627461927849b3f6fdb0788035f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py @@ -0,0 +1,519 @@ +import numpy as np +from collections import namedtuple +from scipy import special +from scipy import stats +from scipy.stats._stats_py import _rankdata +from ._axis_nan_policy import _axis_nan_policy_factory + + +def _broadcast_concatenate(x, y, axis): + '''Broadcast then concatenate arrays, leaving concatenation axis last''' + x = np.moveaxis(x, axis, -1) + y = np.moveaxis(y, axis, -1) + z = np.broadcast(x[..., 0], y[..., 0]) + x = np.broadcast_to(x, z.shape + (x.shape[-1],)) + y = np.broadcast_to(y, z.shape + (y.shape[-1],)) + z = np.concatenate((x, y), axis=-1) + return x, y, z + + +class _MWU: + '''Distribution of MWU statistic under the null hypothesis''' + # Possible improvement: if m and n are small enough, use integer arithmetic + + def __init__(self): + '''Minimal initializer''' + self._fmnks = -np.ones((1, 1, 1)) + self._recursive = None + + def pmf(self, k, m, n): + + # In practice, `pmf` is never called with k > m*n/2. + # If it were, we'd exploit symmetry here: + # k = np.array(k, copy=True) + # k2 = m*n - k + # i = k2 < k + # k[i] = k2[i] + + if (self._recursive is None and m <= 500 and n <= 500 + or self._recursive): + return self.pmf_recursive(k, m, n) + else: + return self.pmf_iterative(k, m, n) + + def pmf_recursive(self, k, m, n): + '''Probability mass function, recursive version''' + self._resize_fmnks(m, n, np.max(k)) + # could loop over just the unique elements, but probably not worth + # the time to find them + for i in np.ravel(k): + self._f(m, n, i) + return self._fmnks[m, n, k] / special.binom(m + n, m) + + def pmf_iterative(self, k, m, n): + '''Probability mass function, iterative version''' + fmnks = {} + for i in np.ravel(k): + fmnks = _mwu_f_iterative(m, n, i, fmnks) + return (np.array([fmnks[(m, n, ki)] for ki in k]) + / special.binom(m + n, m)) + + def cdf(self, k, m, n): + '''Cumulative distribution function''' + + # In practice, `cdf` is never called with k > m*n/2. + # If it were, we'd exploit symmetry here rather than in `sf` + pmfs = self.pmf(np.arange(0, np.max(k) + 1), m, n) + cdfs = np.cumsum(pmfs) + return cdfs[k] + + def sf(self, k, m, n): + '''Survival function''' + # Note that both CDF and SF include the PMF at k. The p-value is + # calculated from the SF and should include the mass at k, so this + # is desirable + + # Use the fact that the distribution is symmetric; i.e. + # _f(m, n, m*n-k) = _f(m, n, k), and sum from the left + kc = np.asarray(m*n - k) # complement of k + i = k < kc + if np.any(i): + kc[i] = k[i] + cdfs = np.asarray(self.cdf(kc, m, n)) + cdfs[i] = 1. - cdfs[i] + self.pmf(kc[i], m, n) + else: + cdfs = np.asarray(self.cdf(kc, m, n)) + return cdfs[()] + + def _resize_fmnks(self, m, n, k): + '''If necessary, expand the array that remembers PMF values''' + # could probably use `np.pad` but I'm not sure it would save code + shape_old = np.array(self._fmnks.shape) + shape_new = np.array((m+1, n+1, k+1)) + if np.any(shape_new > shape_old): + shape = np.maximum(shape_old, shape_new) + fmnks = -np.ones(shape) # create the new array + m0, n0, k0 = shape_old + fmnks[:m0, :n0, :k0] = self._fmnks # copy remembered values + self._fmnks = fmnks + + def _f(self, m, n, k): + '''Recursive implementation of function of [3] Theorem 2.5''' + + # [3] Theorem 2.5 Line 1 + if k < 0 or m < 0 or n < 0 or k > m*n: + return 0 + + # if already calculated, return the value + if self._fmnks[m, n, k] >= 0: + return self._fmnks[m, n, k] + + if k == 0 and m >= 0 and n >= 0: # [3] Theorem 2.5 Line 2 + fmnk = 1 + else: # [3] Theorem 2.5 Line 3 / Equation 3 + fmnk = self._f(m-1, n, k-n) + self._f(m, n-1, k) + + self._fmnks[m, n, k] = fmnk # remember result + + return fmnk + + +# Maintain state for faster repeat calls to mannwhitneyu w/ method='exact' +_mwu_state = _MWU() + + +def _mwu_f_iterative(m, n, k, fmnks): + '''Iterative implementation of function of [3] Theorem 2.5''' + + def _base_case(m, n, k): + '''Base cases from recursive version''' + + # if already calculated, return the value + if fmnks.get((m, n, k), -1) >= 0: + return fmnks[(m, n, k)] + + # [3] Theorem 2.5 Line 1 + elif k < 0 or m < 0 or n < 0 or k > m*n: + return 0 + + # [3] Theorem 2.5 Line 2 + elif k == 0 and m >= 0 and n >= 0: + return 1 + + return None + + stack = [(m, n, k)] + fmnk = None + + while stack: + # Popping only if necessary would save a tiny bit of time, but NWI. + m, n, k = stack.pop() + + # If we're at a base case, continue (stack unwinds) + fmnk = _base_case(m, n, k) + if fmnk is not None: + fmnks[(m, n, k)] = fmnk + continue + + # If both terms are base cases, continue (stack unwinds) + f1 = _base_case(m-1, n, k-n) + f2 = _base_case(m, n-1, k) + if f1 is not None and f2 is not None: + # [3] Theorem 2.5 Line 3 / Equation 3 + fmnk = f1 + f2 + fmnks[(m, n, k)] = fmnk + continue + + # recurse deeper + stack.append((m, n, k)) + if f1 is None: + stack.append((m-1, n, k-n)) + if f2 is None: + stack.append((m, n-1, k)) + + return fmnks + + +def _get_mwu_z(U, n1, n2, t, axis=0, continuity=True): + '''Standardized MWU statistic''' + # Follows mannwhitneyu [2] + mu = n1 * n2 / 2 + n = n1 + n2 + + # Tie correction according to [2], "Normal approximation and tie correction" + # "A more computationally-efficient form..." + tie_term = (t**3 - t).sum(axis=-1) + s = np.sqrt(n1*n2/12 * ((n + 1) - tie_term/(n*(n-1)))) + + numerator = U - mu + + # Continuity correction. + # Because SF is always used to calculate the p-value, we can always + # _subtract_ 0.5 for the continuity correction. This always increases the + # p-value to account for the rest of the probability mass _at_ q = U. + if continuity: + numerator -= 0.5 + + # no problem evaluating the norm SF at an infinity + with np.errstate(divide='ignore', invalid='ignore'): + z = numerator / s + return z + + +def _mwu_input_validation(x, y, use_continuity, alternative, axis, method): + ''' Input validation and standardization for mannwhitneyu ''' + # Would use np.asarray_chkfinite, but infs are OK + x, y = np.atleast_1d(x), np.atleast_1d(y) + if np.isnan(x).any() or np.isnan(y).any(): + raise ValueError('`x` and `y` must not contain NaNs.') + if np.size(x) == 0 or np.size(y) == 0: + raise ValueError('`x` and `y` must be of nonzero size.') + + bools = {True, False} + if use_continuity not in bools: + raise ValueError(f'`use_continuity` must be one of {bools}.') + + alternatives = {"two-sided", "less", "greater"} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f'`alternative` must be one of {alternatives}.') + + axis_int = int(axis) + if axis != axis_int: + raise ValueError('`axis` must be an integer.') + + if not isinstance(method, stats.PermutationMethod): + methods = {"asymptotic", "exact", "auto"} + method = method.lower() + if method not in methods: + raise ValueError(f'`method` must be one of {methods}.') + + return x, y, use_continuity, alternative, axis_int, method + + +def _mwu_choose_method(n1, n2, ties): + """Choose method 'asymptotic' or 'exact' depending on input size, ties""" + + # if both inputs are large, asymptotic is OK + if n1 > 8 and n2 > 8: + return "asymptotic" + + # if there are any ties, asymptotic is preferred + if ties: + return "asymptotic" + + return "exact" + + +MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(MannwhitneyuResult, n_samples=2) +def mannwhitneyu(x, y, use_continuity=True, alternative="two-sided", + axis=0, method="auto"): + r'''Perform the Mann-Whitney U rank test on two independent samples. + + The Mann-Whitney U test is a nonparametric test of the null hypothesis + that the distribution underlying sample `x` is the same as the + distribution underlying sample `y`. It is often used as a test of + difference in location between distributions. + + Parameters + ---------- + x, y : array-like + N-d arrays of samples. The arrays must be broadcastable except along + the dimension given by `axis`. + use_continuity : bool, optional + Whether a continuity correction (1/2) should be applied. + Default is True when `method` is ``'asymptotic'``; has no effect + otherwise. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + Let *F(u)* and *G(u)* be the cumulative distribution functions of the + distributions underlying `x` and `y`, respectively. Then the following + alternative hypotheses are available: + + * 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for + at least one *u*. + * 'less': the distribution underlying `x` is stochastically less + than the distribution underlying `y`, i.e. *F(u) > G(u)* for all *u*. + * 'greater': the distribution underlying `x` is stochastically greater + than the distribution underlying `y`, i.e. *F(u) < G(u)* for all *u*. + + Note that the mathematical expressions in the alternative hypotheses + above describe the CDFs of the underlying distributions. The directions + of the inequalities appear inconsistent with the natural language + description at first glance, but they are not. For example, suppose + *X* and *Y* are random variables that follow distributions with CDFs + *F* and *G*, respectively. If *F(u) > G(u)* for all *u*, samples drawn + from *X* tend to be less than those drawn from *Y*. + + Under a more restrictive set of assumptions, the alternative hypotheses + can be expressed in terms of the locations of the distributions; + see [5] section 5.1. + axis : int, optional + Axis along which to perform the test. Default is 0. + method : {'auto', 'asymptotic', 'exact'} or `PermutationMethod` instance, optional + Selects the method used to calculate the *p*-value. + Default is 'auto'. The following options are available. + + * ``'asymptotic'``: compares the standardized test statistic + against the normal distribution, correcting for ties. + * ``'exact'``: computes the exact *p*-value by comparing the observed + :math:`U` statistic against the exact distribution of the :math:`U` + statistic under the null hypothesis. No correction is made for ties. + * ``'auto'``: chooses ``'exact'`` when the size of one of the samples + is less than or equal to 8 and there are no ties; + chooses ``'asymptotic'`` otherwise. + * `PermutationMethod` instance. In this case, the p-value + is computed using `permutation_test` with the provided + configuration options and other appropriate settings. + + Returns + ------- + res : MannwhitneyuResult + An object containing attributes: + + statistic : float + The Mann-Whitney U statistic corresponding with sample `x`. See + Notes for the test statistic corresponding with sample `y`. + pvalue : float + The associated *p*-value for the chosen `alternative`. + + Notes + ----- + If ``U1`` is the statistic corresponding with sample `x`, then the + statistic corresponding with sample `y` is + ``U2 = x.shape[axis] * y.shape[axis] - U1``. + + `mannwhitneyu` is for independent samples. For related / paired samples, + consider `scipy.stats.wilcoxon`. + + `method` ``'exact'`` is recommended when there are no ties and when either + sample size is less than 8 [1]_. The implementation follows the recurrence + relation originally proposed in [1]_ as it is described in [3]_. + Note that the exact method is *not* corrected for ties, but + `mannwhitneyu` will not raise errors or warnings if there are ties in the + data. If there are ties and either samples is small (fewer than ~10 + observations), consider passing an instance of `PermutationMethod` + as the `method` to perform a permutation test. + + The Mann-Whitney U test is a non-parametric version of the t-test for + independent samples. When the means of samples from the populations + are normally distributed, consider `scipy.stats.ttest_ind`. + + See Also + -------- + scipy.stats.wilcoxon, scipy.stats.ranksums, scipy.stats.ttest_ind + + References + ---------- + .. [1] H.B. Mann and D.R. Whitney, "On a test of whether one of two random + variables is stochastically larger than the other", The Annals of + Mathematical Statistics, Vol. 18, pp. 50-60, 1947. + .. [2] Mann-Whitney U Test, Wikipedia, + http://en.wikipedia.org/wiki/Mann-Whitney_U_test + .. [3] A. Di Bucchianico, "Combinatorics, computer algebra, and the + Wilcoxon-Mann-Whitney test", Journal of Statistical Planning and + Inference, Vol. 79, pp. 349-364, 1999. + .. [4] Rosie Shier, "Statistics: 2.3 The Mann-Whitney U Test", Mathematics + Learning Support Centre, 2004. + .. [5] Michael P. Fay and Michael A. Proschan. "Wilcoxon-Mann-Whitney + or t-test? On assumptions for hypothesis tests and multiple \ + interpretations of decision rules." Statistics surveys, Vol. 4, pp. + 1-39, 2010. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2857732/ + + Examples + -------- + We follow the example from [4]_: nine randomly sampled young adults were + diagnosed with type II diabetes at the ages below. + + >>> males = [19, 22, 16, 29, 24] + >>> females = [20, 11, 17, 12] + + We use the Mann-Whitney U test to assess whether there is a statistically + significant difference in the diagnosis age of males and females. + The null hypothesis is that the distribution of male diagnosis ages is + the same as the distribution of female diagnosis ages. We decide + that a confidence level of 95% is required to reject the null hypothesis + in favor of the alternative that the distributions are different. + Since the number of samples is very small and there are no ties in the + data, we can compare the observed test statistic against the *exact* + distribution of the test statistic under the null hypothesis. + + >>> from scipy.stats import mannwhitneyu + >>> U1, p = mannwhitneyu(males, females, method="exact") + >>> print(U1) + 17.0 + + `mannwhitneyu` always reports the statistic associated with the first + sample, which, in this case, is males. This agrees with :math:`U_M = 17` + reported in [4]_. The statistic associated with the second statistic + can be calculated: + + >>> nx, ny = len(males), len(females) + >>> U2 = nx*ny - U1 + >>> print(U2) + 3.0 + + This agrees with :math:`U_F = 3` reported in [4]_. The two-sided + *p*-value can be calculated from either statistic, and the value produced + by `mannwhitneyu` agrees with :math:`p = 0.11` reported in [4]_. + + >>> print(p) + 0.1111111111111111 + + The exact distribution of the test statistic is asymptotically normal, so + the example continues by comparing the exact *p*-value against the + *p*-value produced using the normal approximation. + + >>> _, pnorm = mannwhitneyu(males, females, method="asymptotic") + >>> print(pnorm) + 0.11134688653314041 + + Here `mannwhitneyu`'s reported *p*-value appears to conflict with the + value :math:`p = 0.09` given in [4]_. The reason is that [4]_ + does not apply the continuity correction performed by `mannwhitneyu`; + `mannwhitneyu` reduces the distance between the test statistic and the + mean :math:`\mu = n_x n_y / 2` by 0.5 to correct for the fact that the + discrete statistic is being compared against a continuous distribution. + Here, the :math:`U` statistic used is less than the mean, so we reduce + the distance by adding 0.5 in the numerator. + + >>> import numpy as np + >>> from scipy.stats import norm + >>> U = min(U1, U2) + >>> N = nx + ny + >>> z = (U - nx*ny/2 + 0.5) / np.sqrt(nx*ny * (N + 1)/ 12) + >>> p = 2 * norm.cdf(z) # use CDF to get p-value from smaller statistic + >>> print(p) + 0.11134688653314041 + + If desired, we can disable the continuity correction to get a result + that agrees with that reported in [4]_. + + >>> _, pnorm = mannwhitneyu(males, females, use_continuity=False, + ... method="asymptotic") + >>> print(pnorm) + 0.0864107329737 + + Regardless of whether we perform an exact or asymptotic test, the + probability of the test statistic being as extreme or more extreme by + chance exceeds 5%, so we do not consider the results statistically + significant. + + Suppose that, before seeing the data, we had hypothesized that females + would tend to be diagnosed at a younger age than males. + In that case, it would be natural to provide the female ages as the + first input, and we would have performed a one-sided test using + ``alternative = 'less'``: females are diagnosed at an age that is + stochastically less than that of males. + + >>> res = mannwhitneyu(females, males, alternative="less", method="exact") + >>> print(res) + MannwhitneyuResult(statistic=3.0, pvalue=0.05555555555555555) + + Again, the probability of getting a sufficiently low value of the + test statistic by chance under the null hypothesis is greater than 5%, + so we do not reject the null hypothesis in favor of our alternative. + + If it is reasonable to assume that the means of samples from the + populations are normally distributed, we could have used a t-test to + perform the analysis. + + >>> from scipy.stats import ttest_ind + >>> res = ttest_ind(females, males, alternative="less") + >>> print(res) + Ttest_indResult(statistic=-2.239334696520584, pvalue=0.030068441095757924) + + Under this assumption, the *p*-value would be low enough to reject the + null hypothesis in favor of the alternative. + + ''' + + x, y, use_continuity, alternative, axis_int, method = ( + _mwu_input_validation(x, y, use_continuity, alternative, axis, method)) + + x, y, xy = _broadcast_concatenate(x, y, axis) + + n1, n2 = x.shape[-1], y.shape[-1] + + # Follows [2] + ranks, t = _rankdata(xy, 'average', return_ties=True) # method 2, step 1 + R1 = ranks[..., :n1].sum(axis=-1) # method 2, step 2 + U1 = R1 - n1*(n1+1)/2 # method 2, step 3 + U2 = n1 * n2 - U1 # as U1 + U2 = n1 * n2 + + if alternative == "greater": + U, f = U1, 1 # U is the statistic to use for p-value, f is a factor + elif alternative == "less": + U, f = U2, 1 # Due to symmetry, use SF of U2 rather than CDF of U1 + else: + U, f = np.maximum(U1, U2), 2 # multiply SF by two for two-sided test + + if method == "auto": + method = _mwu_choose_method(n1, n2, np.any(t > 1)) + + if method == "exact": + p = _mwu_state.sf(U.astype(int), min(n1, n2), max(n1, n2)) + elif method == "asymptotic": + z = _get_mwu_z(U, n1, n2, t, continuity=use_continuity) + p = stats.norm.sf(z) + else: # `PermutationMethod` instance (already validated) + def statistic(x, y, axis): + return mannwhitneyu(x, y, use_continuity=use_continuity, + alternative=alternative, axis=axis, + method="asymptotic").statistic + + res = stats.permutation_test((x, y), statistic, axis=axis, + **method._asdict(), alternative=alternative) + p = res.pvalue + f = 1 + + p *= f + + # Ensure that test statistic is not greater than 1 + # This could happen for exact test when U = m*n/2 + p = np.clip(p, 0, 1) + + return MannwhitneyuResult(U1, p) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_morestats.py b/venv/lib/python3.10/site-packages/scipy/stats/_morestats.py new file mode 100644 index 0000000000000000000000000000000000000000..8da9345041842106da5f05164155f65c3142e9b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_morestats.py @@ -0,0 +1,4933 @@ +from __future__ import annotations +import math +import warnings +from collections import namedtuple + +import numpy as np +from numpy import (isscalar, r_, log, around, unique, asarray, zeros, + arange, sort, amin, amax, sqrt, array, atleast_1d, # noqa: F401 + compress, pi, exp, ravel, count_nonzero, sin, cos, # noqa: F401 + arctan2, hypot) + +from scipy import optimize, special, interpolate, stats +from scipy._lib._bunch import _make_tuple_bunch +from scipy._lib._util import _rename_parameter, _contains_nan, _get_nan + +from ._ansari_swilk_statistics import gscale, swilk +from . import _stats_py, _wilcoxon +from ._fit import FitResult +from ._stats_py import find_repeats, _get_pvalue, SignificanceResult # noqa: F401 +from .contingency import chi2_contingency +from . import distributions +from ._distn_infrastructure import rv_generic +from ._axis_nan_policy import _axis_nan_policy_factory + + +__all__ = ['mvsdist', + 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', + 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', + 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', + 'fligner', 'mood', 'wilcoxon', 'median_test', + 'circmean', 'circvar', 'circstd', 'anderson_ksamp', + 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax', + 'yeojohnson_normplot', 'directional_stats', + 'false_discovery_control' + ] + + +Mean = namedtuple('Mean', ('statistic', 'minmax')) +Variance = namedtuple('Variance', ('statistic', 'minmax')) +Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) + + +def bayes_mvs(data, alpha=0.90): + r""" + Bayesian confidence intervals for the mean, var, and std. + + Parameters + ---------- + data : array_like + Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. + Requires 2 or more data points. + alpha : float, optional + Probability that the returned confidence interval contains + the true parameter. + + Returns + ------- + mean_cntr, var_cntr, std_cntr : tuple + The three results are for the mean, variance and standard deviation, + respectively. Each result is a tuple of the form:: + + (center, (lower, upper)) + + with `center` the mean of the conditional pdf of the value given the + data, and `(lower, upper)` a confidence interval, centered on the + median, containing the estimate to a probability ``alpha``. + + See Also + -------- + mvsdist + + Notes + ----- + Each tuple of mean, variance, and standard deviation estimates represent + the (center, (lower, upper)) with center the mean of the conditional pdf + of the value given the data and (lower, upper) is a confidence interval + centered on the median, containing the estimate to a probability + ``alpha``. + + Converts data to 1-D and assumes all data has the same mean and variance. + Uses Jeffrey's prior for variance and std. + + Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` + + References + ---------- + T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and + standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, + 2006. + + Examples + -------- + First a basic example to demonstrate the outputs: + + >>> from scipy import stats + >>> data = [6, 9, 12, 7, 8, 8, 13] + >>> mean, var, std = stats.bayes_mvs(data) + >>> mean + Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) + >>> var + Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...)) + >>> std + Std_dev(statistic=2.9724954732045084, + minmax=(1.7823367265645143, 4.945614605014631)) + + Now we generate some normally distributed random data, and get estimates of + mean and standard deviation with 95% confidence intervals for those + estimates: + + >>> n_samples = 100000 + >>> data = stats.norm.rvs(size=n_samples) + >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.hist(data, bins=100, density=True, label='Histogram of data') + >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') + >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', + ... alpha=0.2, label=r'Estimated mean (95% limits)') + >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') + >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, + ... label=r'Estimated scale (95% limits)') + + >>> ax.legend(fontsize=10) + >>> ax.set_xlim([-4, 4]) + >>> ax.set_ylim([0, 0.5]) + >>> plt.show() + + """ + m, v, s = mvsdist(data) + if alpha >= 1 or alpha <= 0: + raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." + % alpha) + + m_res = Mean(m.mean(), m.interval(alpha)) + v_res = Variance(v.mean(), v.interval(alpha)) + s_res = Std_dev(s.mean(), s.interval(alpha)) + + return m_res, v_res, s_res + + +def mvsdist(data): + """ + 'Frozen' distributions for mean, variance, and standard deviation of data. + + Parameters + ---------- + data : array_like + Input array. Converted to 1-D using ravel. + Requires 2 or more data-points. + + Returns + ------- + mdist : "frozen" distribution object + Distribution object representing the mean of the data. + vdist : "frozen" distribution object + Distribution object representing the variance of the data. + sdist : "frozen" distribution object + Distribution object representing the standard deviation of the data. + + See Also + -------- + bayes_mvs + + Notes + ----- + The return values from ``bayes_mvs(data)`` is equivalent to + ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. + + In other words, calling ``.mean()`` and ``.interval(0.90)`` + on the three distribution objects returned from this function will give + the same results that are returned from `bayes_mvs`. + + References + ---------- + T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and + standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, + 2006. + + Examples + -------- + >>> from scipy import stats + >>> data = [6, 9, 12, 7, 8, 8, 13] + >>> mean, var, std = stats.mvsdist(data) + + We now have frozen distribution objects "mean", "var" and "std" that we can + examine: + + >>> mean.mean() + 9.0 + >>> mean.interval(0.95) + (6.6120585482655692, 11.387941451734431) + >>> mean.std() + 1.1952286093343936 + + """ + x = ravel(data) + n = len(x) + if n < 2: + raise ValueError("Need at least 2 data-points.") + xbar = x.mean() + C = x.var() + if n > 1000: # gaussian approximations for large n + mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) + sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) + vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) + else: + nm1 = n - 1 + fac = n * C / 2. + val = nm1 / 2. + mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) + sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) + vdist = distributions.invgamma(val, scale=fac) + return mdist, vdist, sdist + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None +) +def kstat(data, n=2): + r""" + Return the nth k-statistic (1<=n<=4 so far). + + The nth k-statistic k_n is the unique symmetric unbiased estimator of the + nth cumulant kappa_n. + + Parameters + ---------- + data : array_like + Input array. Note that n-D input gets flattened. + n : int, {1, 2, 3, 4}, optional + Default is equal to 2. + + Returns + ------- + kstat : float + The nth k-statistic. + + See Also + -------- + kstatvar : Returns an unbiased estimator of the variance of the k-statistic + moment : Returns the n-th central moment about the mean for a sample. + + Notes + ----- + For a sample size n, the first few k-statistics are given by: + + .. math:: + + k_{1} = \mu + k_{2} = \frac{n}{n-1} m_{2} + k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3} + k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)} + + where :math:`\mu` is the sample mean, :math:`m_2` is the sample + variance, and :math:`m_i` is the i-th sample central moment. + + References + ---------- + http://mathworld.wolfram.com/k-Statistic.html + + http://mathworld.wolfram.com/Cumulant.html + + Examples + -------- + >>> from scipy import stats + >>> from numpy.random import default_rng + >>> rng = default_rng() + + As sample size increases, n-th moment and n-th k-statistic converge to the + same number (although they aren't identical). In the case of the normal + distribution, they converge to zero. + + >>> for n in [2, 3, 4, 5, 6, 7]: + ... x = rng.normal(size=10**n) + ... m, k = stats.moment(x, 3), stats.kstat(x, 3) + ... print("%.3g %.3g %.3g" % (m, k, m-k)) + -0.631 -0.651 0.0194 # random + 0.0282 0.0283 -8.49e-05 + -0.0454 -0.0454 1.36e-05 + 7.53e-05 7.53e-05 -2.26e-09 + 0.00166 0.00166 -4.99e-09 + -2.88e-06 -2.88e-06 8.63e-13 + """ + if n > 4 or n < 1: + raise ValueError("k-statistics only supported for 1<=n<=4") + n = int(n) + S = np.zeros(n + 1, np.float64) + data = ravel(data) + N = data.size + + # raise ValueError on empty input + if N == 0: + raise ValueError("Data input must not be empty") + + # on nan input, return nan without warning + if np.isnan(np.sum(data)): + return np.nan + + for k in range(1, n + 1): + S[k] = np.sum(data**k, axis=0) + if n == 1: + return S[1] * 1.0/N + elif n == 2: + return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) + elif n == 3: + return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) + elif n == 4: + return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - + 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / + (N*(N-1.0)*(N-2.0)*(N-3.0))) + else: + raise ValueError("Should not be here.") + + +@_axis_nan_policy_factory( + lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None +) +def kstatvar(data, n=2): + r"""Return an unbiased estimator of the variance of the k-statistic. + + See `kstat` for more details of the k-statistic. + + Parameters + ---------- + data : array_like + Input array. Note that n-D input gets flattened. + n : int, {1, 2}, optional + Default is equal to 2. + + Returns + ------- + kstatvar : float + The nth k-statistic variance. + + See Also + -------- + kstat : Returns the n-th k-statistic. + moment : Returns the n-th central moment about the mean for a sample. + + Notes + ----- + The variances of the first few k-statistics are given by: + + .. math:: + + var(k_{1}) = \frac{\kappa^2}{n} + var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1} + var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} + + \frac{9 \kappa^2_{3}}{n - 1} + + \frac{6 n \kappa^3_{2}}{(n-1) (n-2)} + var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} + + \frac{48 \kappa_{3} \kappa_5}{n - 1} + + \frac{34 \kappa^2_{4}}{n-1} + + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} + + \frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} + + \frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)} + """ # noqa: E501 + data = ravel(data) + N = len(data) + if n == 1: + return kstat(data, n=2) * 1.0/N + elif n == 2: + k2 = kstat(data, n=2) + k4 = kstat(data, n=4) + return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) + else: + raise ValueError("Only n=1 or n=2 supported.") + + +def _calc_uniform_order_statistic_medians(n): + """Approximations of uniform order statistic medians. + + Parameters + ---------- + n : int + Sample size. + + Returns + ------- + v : 1d float array + Approximations of the order statistic medians. + + References + ---------- + .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient + Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + Examples + -------- + Order statistics of the uniform distribution on the unit interval + are marginally distributed according to beta distributions. + The expectations of these order statistic are evenly spaced across + the interval, but the distributions are skewed in a way that + pushes the medians slightly towards the endpoints of the unit interval: + + >>> import numpy as np + >>> n = 4 + >>> k = np.arange(1, n+1) + >>> from scipy.stats import beta + >>> a = k + >>> b = n-k+1 + >>> beta.mean(a, b) + array([0.2, 0.4, 0.6, 0.8]) + >>> beta.median(a, b) + array([0.15910358, 0.38572757, 0.61427243, 0.84089642]) + + The Filliben approximation uses the exact medians of the smallest + and greatest order statistics, and the remaining medians are approximated + by points spread evenly across a sub-interval of the unit interval: + + >>> from scipy.stats._morestats import _calc_uniform_order_statistic_medians + >>> _calc_uniform_order_statistic_medians(n) + array([0.15910358, 0.38545246, 0.61454754, 0.84089642]) + + This plot shows the skewed distributions of the order statistics + of a sample of size four from a uniform distribution on the unit interval: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) + >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] + >>> plt.figure() + >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) + + """ + v = np.empty(n, dtype=np.float64) + v[-1] = 0.5**(1.0 / n) + v[0] = 1 - v[-1] + i = np.arange(2, n) + v[1:-1] = (i - 0.3175) / (n + 0.365) + return v + + +def _parse_dist_kw(dist, enforce_subclass=True): + """Parse `dist` keyword. + + Parameters + ---------- + dist : str or stats.distributions instance. + Several functions take `dist` as a keyword, hence this utility + function. + enforce_subclass : bool, optional + If True (default), `dist` needs to be a + `_distn_infrastructure.rv_generic` instance. + It can sometimes be useful to set this keyword to False, if a function + wants to accept objects that just look somewhat like such an instance + (for example, they have a ``ppf`` method). + + """ + if isinstance(dist, rv_generic): + pass + elif isinstance(dist, str): + try: + dist = getattr(distributions, dist) + except AttributeError as e: + raise ValueError("%s is not a valid distribution name" % dist) from e + elif enforce_subclass: + msg = ("`dist` should be a stats.distributions instance or a string " + "with the name of such a distribution.") + raise ValueError(msg) + + return dist + + +def _add_axis_labels_title(plot, xlabel, ylabel, title): + """Helper function to add axes labels and a title to stats plots.""" + try: + if hasattr(plot, 'set_title'): + # Matplotlib Axes instance or something that looks like it + plot.set_title(title) + plot.set_xlabel(xlabel) + plot.set_ylabel(ylabel) + else: + # matplotlib.pyplot module + plot.title(title) + plot.xlabel(xlabel) + plot.ylabel(ylabel) + except Exception: + # Not an MPL object or something that looks (enough) like it. + # Don't crash on adding labels or title + pass + + +def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False): + """ + Calculate quantiles for a probability plot, and optionally show the plot. + + Generates a probability plot of sample data against the quantiles of a + specified theoretical distribution (the normal distribution by default). + `probplot` optionally calculates a best-fit line for the data and plots the + results using Matplotlib or a given plot function. + + Parameters + ---------- + x : array_like + Sample/response data from which `probplot` creates the plot. + sparams : tuple, optional + Distribution-specific shape parameters (shape parameters plus location + and scale). + dist : str or stats.distributions instance, optional + Distribution or distribution function name. The default is 'norm' for a + normal probability plot. Objects that look enough like a + stats.distributions instance (i.e. they have a ``ppf`` method) are also + accepted. + fit : bool, optional + Fit a least-squares regression (best-fit) line to the sample data if + True (default). + plot : object, optional + If given, plots the quantiles. + If given and `fit` is True, also plots the least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + rvalue : bool, optional + If `plot` is provided and `fit` is True, setting `rvalue` to True + includes the coefficient of determination on the plot. + Default is False. + + Returns + ------- + (osm, osr) : tuple of ndarrays + Tuple of theoretical quantiles (osm, or order statistic medians) and + ordered responses (osr). `osr` is simply sorted input `x`. + For details on how `osm` is calculated see the Notes section. + (slope, intercept, r) : tuple of floats, optional + Tuple containing the result of the least-squares fit, if that is + performed by `probplot`. `r` is the square root of the coefficient of + determination. If ``fit=False`` and ``plot=None``, this tuple is not + returned. + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by `probplot`; + ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after + calling `probplot`. + + `probplot` generates a probability plot, which should not be confused with + a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this + type, see ``statsmodels.api.ProbPlot``. + + The formula used for the theoretical quantiles (horizontal axis of the + probability plot) is Filliben's estimate:: + + quantiles = dist.ppf(val), for + + 0.5**(1/n), for i = n + val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 + 1 - 0.5**(1/n), for i = 1 + + where ``i`` indicates the i-th ordered value and ``n`` is the total number + of values. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> nsample = 100 + >>> rng = np.random.default_rng() + + A t distribution with small degrees of freedom: + + >>> ax1 = plt.subplot(221) + >>> x = stats.t.rvs(3, size=nsample, random_state=rng) + >>> res = stats.probplot(x, plot=plt) + + A t distribution with larger degrees of freedom: + + >>> ax2 = plt.subplot(222) + >>> x = stats.t.rvs(25, size=nsample, random_state=rng) + >>> res = stats.probplot(x, plot=plt) + + A mixture of two normal distributions with broadcasting: + + >>> ax3 = plt.subplot(223) + >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], + ... size=(nsample//2,2), random_state=rng).ravel() + >>> res = stats.probplot(x, plot=plt) + + A standard normal distribution: + + >>> ax4 = plt.subplot(224) + >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample, random_state=rng) + >>> res = stats.probplot(x, plot=plt) + + Produce a new figure with a loggamma distribution, using the ``dist`` and + ``sparams`` keywords: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> x = stats.loggamma.rvs(c=2.5, size=500, random_state=rng) + >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) + >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") + + Show the results with Matplotlib: + + >>> plt.show() + + """ + x = np.asarray(x) + if x.size == 0: + if fit: + return (x, x), (np.nan, np.nan, 0.0) + else: + return x, x + + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + dist = _parse_dist_kw(dist, enforce_subclass=False) + if sparams is None: + sparams = () + if isscalar(sparams): + sparams = (sparams,) + if not isinstance(sparams, tuple): + sparams = tuple(sparams) + + osm = dist.ppf(osm_uniform, *sparams) + osr = sort(x) + if fit: + # perform a linear least squares fit. + slope, intercept, r, prob, _ = _stats_py.linregress(osm, osr) + + if plot is not None: + plot.plot(osm, osr, 'bo') + if fit: + plot.plot(osm, slope*osm + intercept, 'r-') + _add_axis_labels_title(plot, xlabel='Theoretical quantiles', + ylabel='Ordered Values', + title='Probability Plot') + + # Add R^2 value to the plot as text + if fit and rvalue: + xmin = amin(osm) + xmax = amax(osm) + ymin = amin(x) + ymax = amax(x) + posx = xmin + 0.70 * (xmax - xmin) + posy = ymin + 0.01 * (ymax - ymin) + plot.text(posx, posy, "$R^2=%1.4f$" % r**2) + + if fit: + return (osm, osr), (slope, intercept, r) + else: + return osm, osr + + +def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): + """Calculate the shape parameter that maximizes the PPCC. + + The probability plot correlation coefficient (PPCC) plot can be used + to determine the optimal shape parameter for a one-parameter family + of distributions. ``ppcc_max`` returns the shape parameter that would + maximize the probability plot correlation coefficient for the given + data to a one-parameter family of distributions. + + Parameters + ---------- + x : array_like + Input array. + brack : tuple, optional + Triple (a,b,c) where (a>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> c = 2.5 + >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng) + + Generate the PPCC plot for this data with the Weibull distribution. + + >>> fig, ax = plt.subplots(figsize=(8, 6)) + >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax) + + We calculate the value where the shape should reach its maximum and a + red line is drawn there. The line should coincide with the highest + point in the PPCC graph. + + >>> cmax = stats.ppcc_max(x, brack=(c/2, 2*c), dist='weibull_min') + >>> ax.axvline(cmax, color='r') + >>> plt.show() + + """ + dist = _parse_dist_kw(dist) + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + osr = sort(x) + + # this function computes the x-axis values of the probability plot + # and computes a linear regression (including the correlation) + # and returns 1-r so that a minimization function maximizes the + # correlation + def tempfunc(shape, mi, yvals, func): + xvals = func(mi, shape) + r, prob = _stats_py.pearsonr(xvals, yvals) + return 1 - r + + return optimize.brent(tempfunc, brack=brack, + args=(osm_uniform, osr, dist.ppf)) + + +def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): + """Calculate and optionally plot probability plot correlation coefficient. + + The probability plot correlation coefficient (PPCC) plot can be used to + determine the optimal shape parameter for a one-parameter family of + distributions. It cannot be used for distributions without shape + parameters + (like the normal distribution) or with multiple shape parameters. + + By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A + Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed + distributions via an approximately normal one, and is therefore + particularly useful in practice. + + Parameters + ---------- + x : array_like + Input array. + a, b : scalar + Lower and upper bounds of the shape parameter to use. + dist : str or stats.distributions instance, optional + Distribution or distribution function name. Objects that look enough + like a stats.distributions instance (i.e. they have a ``ppf`` method) + are also accepted. The default is ``'tukeylambda'``. + plot : object, optional + If given, plots PPCC against the shape parameter. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `a` to `b`). + + Returns + ------- + svals : ndarray + The shape values for which `ppcc` was calculated. + ppcc : ndarray + The calculated probability plot correlation coefficient values. + + See Also + -------- + ppcc_max, probplot, boxcox_normplot, tukeylambda + + References + ---------- + J.J. Filliben, "The Probability Plot Correlation Coefficient Test for + Normality", Technometrics, Vol. 17, pp. 111-117, 1975. + + Examples + -------- + First we generate some random data from a Weibull distribution + with shape parameter 2.5, and plot the histogram of the data: + + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> c = 2.5 + >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng) + + Take a look at the histogram of the data. + + >>> fig1, ax = plt.subplots(figsize=(9, 4)) + >>> ax.hist(x, bins=50) + >>> ax.set_title('Histogram of x') + >>> plt.show() + + Now we explore this data with a PPCC plot as well as the related + probability plot and Box-Cox normplot. A red line is drawn where we + expect the PPCC value to be maximal (at the shape parameter ``c`` + used above): + + >>> fig2 = plt.figure(figsize=(12, 4)) + >>> ax1 = fig2.add_subplot(1, 3, 1) + >>> ax2 = fig2.add_subplot(1, 3, 2) + >>> ax3 = fig2.add_subplot(1, 3, 3) + >>> res = stats.probplot(x, plot=ax1) + >>> res = stats.boxcox_normplot(x, -4, 4, plot=ax2) + >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax3) + >>> ax3.axvline(c, color='r') + >>> plt.show() + + """ + if b <= a: + raise ValueError("`b` has to be larger than `a`.") + + svals = np.linspace(a, b, num=N) + ppcc = np.empty_like(svals) + for k, sval in enumerate(svals): + _, r2 = probplot(x, sval, dist=dist, fit=True) + ppcc[k] = r2[-1] + + if plot is not None: + plot.plot(svals, ppcc, 'x') + _add_axis_labels_title(plot, xlabel='Shape Values', + ylabel='Prob Plot Corr. Coef.', + title='(%s) PPCC Plot' % dist) + + return svals, ppcc + + +def _log_mean(logx): + # compute log of mean of x from log(x) + return special.logsumexp(logx, axis=0) - np.log(len(logx)) + + +def _log_var(logx): + # compute log of variance of x from log(x) + logmean = _log_mean(logx) + pij = np.full_like(logx, np.pi * 1j, dtype=np.complex128) + logxmu = special.logsumexp([logx, logmean + pij], axis=0) + return np.real(special.logsumexp(2 * logxmu, axis=0)) - np.log(len(logx)) + + +def boxcox_llf(lmb, data): + r"""The boxcox log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Box-Cox transformation. See `boxcox` for details. + data : array_like + Data to calculate Box-Cox log-likelihood for. If `data` is + multi-dimensional, the log-likelihood is calculated along the first + axis. + + Returns + ------- + llf : float or ndarray + Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, + an array otherwise. + + See Also + -------- + boxcox, probplot, boxcox_normplot, boxcox_normmax + + Notes + ----- + The Box-Cox log-likelihood function is defined here as + + .. math:: + + llf = (\lambda - 1) \sum_i(\log(x_i)) - + N/2 \log(\sum_i (y_i - \bar{y})^2 / N), + + where ``y`` is the Box-Cox transformed input data ``x``. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + Generate some random variates and calculate Box-Cox log-likelihood values + for them for a range of ``lmbda`` values: + + >>> rng = np.random.default_rng() + >>> x = stats.loggamma.rvs(5, loc=10, size=1000, random_state=rng) + >>> lmbdas = np.linspace(-2, 10) + >>> llf = np.zeros(lmbdas.shape, dtype=float) + >>> for ii, lmbda in enumerate(lmbdas): + ... llf[ii] = stats.boxcox_llf(lmbda, x) + + Also find the optimal lmbda value with `boxcox`: + + >>> x_most_normal, lmbda_optimal = stats.boxcox(x) + + Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a + horizontal line to check that that's really the optimum: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(lmbdas, llf, 'b.-') + >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') + >>> ax.set_xlabel('lmbda parameter') + >>> ax.set_ylabel('Box-Cox log-likelihood') + + Now add some probability plots to show that where the log-likelihood is + maximized the data transformed with `boxcox` looks closest to normal: + + >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' + >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): + ... xt = stats.boxcox(x, lmbda=lmbda) + ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) + ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) + ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') + ... ax_inset.set_xticklabels([]) + ... ax_inset.set_yticklabels([]) + ... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda) + + >>> plt.show() + + """ + data = np.asarray(data) + N = data.shape[0] + if N == 0: + return np.nan + + logdata = np.log(data) + + # Compute the variance of the transformed data. + if lmb == 0: + logvar = np.log(np.var(logdata, axis=0)) + else: + # Transform without the constant offset 1/lmb. The offset does + # not affect the variance, and the subtraction of the offset can + # lead to loss of precision. + # Division by lmb can be factored out to enhance numerical stability. + logx = lmb * logdata + logvar = _log_var(logx) - 2 * np.log(abs(lmb)) + + return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * logvar + + +def _boxcox_conf_interval(x, lmax, alpha): + # Need to find the lambda for which + # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 + fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) + target = boxcox_llf(lmax, x) - fac + + def rootfunc(lmbda, data, target): + return boxcox_llf(lmbda, data) - target + + # Find positive endpoint of interval in which answer is to be found + newlm = lmax + 0.5 + N = 0 + while (rootfunc(newlm, x, target) > 0.0) and (N < 500): + newlm += 0.1 + N += 1 + + if N == 500: + raise RuntimeError("Could not find endpoint.") + + lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) + + # Now find negative interval in the same way + newlm = lmax - 0.5 + N = 0 + while (rootfunc(newlm, x, target) > 0.0) and (N < 500): + newlm -= 0.1 + N += 1 + + if N == 500: + raise RuntimeError("Could not find endpoint.") + + lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) + return lmminus, lmplus + + +def boxcox(x, lmbda=None, alpha=None, optimizer=None): + r"""Return a dataset transformed by a Box-Cox power transformation. + + Parameters + ---------- + x : ndarray + Input array to be transformed. + + If `lmbda` is not None, this is an alias of + `scipy.special.boxcox`. + Returns nan if ``x < 0``; returns -inf if ``x == 0 and lmbda < 0``. + + If `lmbda` is None, array must be positive, 1-dimensional, and + non-constant. + + lmbda : scalar, optional + If `lmbda` is None (default), find the value of `lmbda` that maximizes + the log-likelihood function and return it as the second output + argument. + + If `lmbda` is not None, do the transformation for that value. + + alpha : float, optional + If `lmbda` is None and `alpha` is not None (default), return the + ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third + output argument. Must be between 0.0 and 1.0. + + If `lmbda` is not None, `alpha` is ignored. + optimizer : callable, optional + If `lmbda` is None, `optimizer` is the scalar optimizer used to find + the value of `lmbda` that minimizes the negative log-likelihood + function. `optimizer` is a callable that accepts one argument: + + fun : callable + The objective function, which evaluates the negative + log-likelihood function at a provided value of `lmbda` + + and returns an object, such as an instance of + `scipy.optimize.OptimizeResult`, which holds the optimal value of + `lmbda` in an attribute `x`. + + See the example in `boxcox_normmax` or the documentation of + `scipy.optimize.minimize_scalar` for more information. + + If `lmbda` is not None, `optimizer` is ignored. + + Returns + ------- + boxcox : ndarray + Box-Cox power transformed array. + maxlog : float, optional + If the `lmbda` parameter is None, the second returned argument is + the `lmbda` that maximizes the log-likelihood function. + (min_ci, max_ci) : tuple of float, optional + If `lmbda` parameter is None and `alpha` is not None, this returned + tuple of floats represents the minimum and maximum confidence limits + given `alpha`. + + See Also + -------- + probplot, boxcox_normplot, boxcox_normmax, boxcox_llf + + Notes + ----- + The Box-Cox transform is given by:: + + y = (x**lmbda - 1) / lmbda, for lmbda != 0 + log(x), for lmbda = 0 + + `boxcox` requires the input data to be positive. Sometimes a Box-Cox + transformation provides a shift parameter to achieve this; `boxcox` does + not. Such a shift parameter is equivalent to adding a positive constant to + `x` before calling `boxcox`. + + The confidence limits returned when `alpha` is provided give the interval + where: + + .. math:: + + llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), + + with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared + function. + + References + ---------- + G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the + Royal Statistical Society B, 26, 211-252 (1964). + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We generate some random variates from a non-normal distribution and make a + probability plot for it, to show it is non-normal in the tails: + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) + >>> ax1.set_xlabel('') + >>> ax1.set_title('Probplot against normal distribution') + + We now use `boxcox` to transform the data so it's closest to normal: + + >>> ax2 = fig.add_subplot(212) + >>> xt, _ = stats.boxcox(x) + >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) + >>> ax2.set_title('Probplot after Box-Cox transformation') + + >>> plt.show() + + """ + x = np.asarray(x) + + if lmbda is not None: # single transformation + return special.boxcox(x, lmbda) + + if x.ndim != 1: + raise ValueError("Data must be 1-dimensional.") + + if x.size == 0: + return x + + if np.all(x == x[0]): + raise ValueError("Data must not be constant.") + + if np.any(x <= 0): + raise ValueError("Data must be positive.") + + # If lmbda=None, find the lmbda that maximizes the log-likelihood function. + lmax = boxcox_normmax(x, method='mle', optimizer=optimizer) + y = boxcox(x, lmax) + + if alpha is None: + return y, lmax + else: + # Find confidence interval + interval = _boxcox_conf_interval(x, lmax, alpha) + return y, lmax, interval + + +def _boxcox_inv_lmbda(x, y): + # compute lmbda given x and y for Box-Cox transformation + num = special.lambertw(-(x ** (-1 / y)) * np.log(x) / y, k=-1) + return np.real(-num / np.log(x) - 1 / y) + + +class _BigFloat: + def __repr__(self): + return "BIG_FLOAT" + + +def boxcox_normmax( + x, brack=None, method='pearsonr', optimizer=None, *, ymax=_BigFloat() +): + """Compute optimal Box-Cox transform parameter for input data. + + Parameters + ---------- + x : array_like + Input array. All entries must be positive, finite, real numbers. + brack : 2-tuple, optional, default (-2.0, 2.0) + The starting interval for a downhill bracket search for the default + `optimize.brent` solver. Note that this is in most cases not + critical; the final result is allowed to be outside this bracket. + If `optimizer` is passed, `brack` must be None. + method : str, optional + The method to determine the optimal transform parameter (`boxcox` + ``lmbda`` parameter). Options are: + + 'pearsonr' (default) + Maximizes the Pearson correlation coefficient between + ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be + normally-distributed. + + 'mle' + Maximizes the log-likelihood `boxcox_llf`. This is the method used + in `boxcox`. + + 'all' + Use all optimization methods available, and return all results. + Useful to compare different methods. + optimizer : callable, optional + `optimizer` is a callable that accepts one argument: + + fun : callable + The objective function to be minimized. `fun` accepts one argument, + the Box-Cox transform parameter `lmbda`, and returns the value of + the function (e.g., the negative log-likelihood) at the provided + argument. The job of `optimizer` is to find the value of `lmbda` + that *minimizes* `fun`. + + and returns an object, such as an instance of + `scipy.optimize.OptimizeResult`, which holds the optimal value of + `lmbda` in an attribute `x`. + + See the example below or the documentation of + `scipy.optimize.minimize_scalar` for more information. + ymax : float, optional + The unconstrained optimal transform parameter may cause Box-Cox + transformed data to have extreme magnitude or even overflow. + This parameter constrains MLE optimization such that the magnitude + of the transformed `x` does not exceed `ymax`. The default is + the maximum value of the input dtype. If set to infinity, + `boxcox_normmax` returns the unconstrained optimal lambda. + Ignored when ``method='pearsonr'``. + + Returns + ------- + maxlog : float or ndarray + The optimal transform parameter found. An array instead of a scalar + for ``method='all'``. + + See Also + -------- + boxcox, boxcox_llf, boxcox_normplot, scipy.optimize.minimize_scalar + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We can generate some data and determine the optimal ``lmbda`` in various + ways: + + >>> rng = np.random.default_rng() + >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5 + >>> y, lmax_mle = stats.boxcox(x) + >>> lmax_pearsonr = stats.boxcox_normmax(x) + + >>> lmax_mle + 2.217563431465757 + >>> lmax_pearsonr + 2.238318660200961 + >>> stats.boxcox_normmax(x, method='all') + array([2.23831866, 2.21756343]) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) + >>> ax.axvline(lmax_mle, color='r') + >>> ax.axvline(lmax_pearsonr, color='g', ls='--') + + >>> plt.show() + + Alternatively, we can define our own `optimizer` function. Suppose we + are only interested in values of `lmbda` on the interval [6, 7], we + want to use `scipy.optimize.minimize_scalar` with ``method='bounded'``, + and we want to use tighter tolerances when optimizing the log-likelihood + function. To do this, we define a function that accepts positional argument + `fun` and uses `scipy.optimize.minimize_scalar` to minimize `fun` subject + to the provided bounds and tolerances: + + >>> from scipy import optimize + >>> options = {'xatol': 1e-12} # absolute tolerance on `x` + >>> def optimizer(fun): + ... return optimize.minimize_scalar(fun, bounds=(6, 7), + ... method="bounded", options=options) + >>> stats.boxcox_normmax(x, optimizer=optimizer) + 6.000... + """ + x = np.asarray(x) + end_msg = "exceed specified `ymax`." + if isinstance(ymax, _BigFloat): + dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64 + # 10000 is a safety factor because `special.boxcox` overflows prematurely. + ymax = np.finfo(dtype).max / 10000 + end_msg = f"overflow in {dtype}." + elif ymax <= 0: + raise ValueError("`ymax` must be strictly positive") + + # If optimizer is not given, define default 'brent' optimizer. + if optimizer is None: + + # Set default value for `brack`. + if brack is None: + brack = (-2.0, 2.0) + + def _optimizer(func, args): + return optimize.brent(func, args=args, brack=brack) + + # Otherwise check optimizer. + else: + if not callable(optimizer): + raise ValueError("`optimizer` must be a callable") + + if brack is not None: + raise ValueError("`brack` must be None if `optimizer` is given") + + # `optimizer` is expected to return a `OptimizeResult` object, we here + # get the solution to the optimization problem. + def _optimizer(func, args): + def func_wrapped(x): + return func(x, *args) + return getattr(optimizer(func_wrapped), 'x', None) + + def _pearsonr(x): + osm_uniform = _calc_uniform_order_statistic_medians(len(x)) + xvals = distributions.norm.ppf(osm_uniform) + + def _eval_pearsonr(lmbda, xvals, samps): + # This function computes the x-axis values of the probability plot + # and computes a linear regression (including the correlation) and + # returns ``1 - r`` so that a minimization function maximizes the + # correlation. + y = boxcox(samps, lmbda) + yvals = np.sort(y) + r, prob = _stats_py.pearsonr(xvals, yvals) + return 1 - r + + return _optimizer(_eval_pearsonr, args=(xvals, x)) + + def _mle(x): + def _eval_mle(lmb, data): + # function to minimize + return -boxcox_llf(lmb, data) + + return _optimizer(_eval_mle, args=(x,)) + + def _all(x): + maxlog = np.empty(2, dtype=float) + maxlog[0] = _pearsonr(x) + maxlog[1] = _mle(x) + return maxlog + + methods = {'pearsonr': _pearsonr, + 'mle': _mle, + 'all': _all} + if method not in methods.keys(): + raise ValueError("Method %s not recognized." % method) + + optimfunc = methods[method] + + try: + res = optimfunc(x) + except ValueError as e: + if "infs or NaNs" in str(e): + message = ("The `x` argument of `boxcox_normmax` must contain " + "only positive, finite, real numbers.") + raise ValueError(message) from e + else: + raise e + + if res is None: + message = ("The `optimizer` argument of `boxcox_normmax` must return " + "an object containing the optimal `lmbda` in attribute `x`.") + raise ValueError(message) + elif not np.isinf(ymax): # adjust the final lambda + # x > 1, boxcox(x) > 0; x < 1, boxcox(x) < 0 + xmax, xmin = np.max(x), np.min(x) + if xmin >= 1: + x_treme = xmax + elif xmax <= 1: + x_treme = xmin + else: # xmin < 1 < xmax + indicator = special.boxcox(xmax, res) > abs(special.boxcox(xmin, res)) + if isinstance(res, np.ndarray): + indicator = indicator[1] # select corresponds with 'mle' + x_treme = xmax if indicator else xmin + + mask = abs(special.boxcox(x_treme, res)) > ymax + if np.any(mask): + message = ( + f"The optimal lambda is {res}, but the returned lambda is the " + f"constrained optimum to ensure that the maximum or the minimum " + f"of the transformed data does not " + end_msg + ) + warnings.warn(message, stacklevel=2) + + # Return the constrained lambda to ensure the transformation + # does not cause overflow or exceed specified `ymax` + constrained_res = _boxcox_inv_lmbda(x_treme, ymax * np.sign(x_treme - 1)) + + if isinstance(res, np.ndarray): + res[mask] = constrained_res + else: + res = constrained_res + return res + + +def _normplot(method, x, la, lb, plot=None, N=80): + """Compute parameters for a Box-Cox or Yeo-Johnson normality plot, + optionally show it. + + See `boxcox_normplot` or `yeojohnson_normplot` for details. + """ + + if method == 'boxcox': + title = 'Box-Cox Normality Plot' + transform_func = boxcox + else: + title = 'Yeo-Johnson Normality Plot' + transform_func = yeojohnson + + x = np.asarray(x) + if x.size == 0: + return x + + if lb <= la: + raise ValueError("`lb` has to be larger than `la`.") + + if method == 'boxcox' and np.any(x <= 0): + raise ValueError("Data must be positive.") + + lmbdas = np.linspace(la, lb, num=N) + ppcc = lmbdas * 0.0 + for i, val in enumerate(lmbdas): + # Determine for each lmbda the square root of correlation coefficient + # of transformed x + z = transform_func(x, lmbda=val) + _, (_, _, r) = probplot(z, dist='norm', fit=True) + ppcc[i] = r + + if plot is not None: + plot.plot(lmbdas, ppcc, 'x') + _add_axis_labels_title(plot, xlabel='$\\lambda$', + ylabel='Prob Plot Corr. Coef.', + title=title) + + return lmbdas, ppcc + + +def boxcox_normplot(x, la, lb, plot=None, N=80): + """Compute parameters for a Box-Cox normality plot, optionally show it. + + A Box-Cox normality plot shows graphically what the best transformation + parameter is to use in `boxcox` to obtain a distribution that is close + to normal. + + Parameters + ---------- + x : array_like + Input array. + la, lb : scalar + The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` + for Box-Cox transformations. These are also the limits of the + horizontal axis of the plot if that is generated. + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `la` to `lb`). + + Returns + ------- + lmbdas : ndarray + The ``lmbda`` values for which a Box-Cox transform was done. + ppcc : ndarray + Probability Plot Correlelation Coefficient, as obtained from `probplot` + when fitting the Box-Cox transformed input `x` against a normal + distribution. + + See Also + -------- + probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by + `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` + should be used after calling `probplot`. + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some non-normally distributed data, and create a Box-Cox plot: + + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) + + Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in + the same plot: + + >>> _, maxlog = stats.boxcox(x) + >>> ax.axvline(maxlog, color='r') + + >>> plt.show() + + """ + return _normplot('boxcox', x, la, lb, plot, N) + + +def yeojohnson(x, lmbda=None): + r"""Return a dataset transformed by a Yeo-Johnson power transformation. + + Parameters + ---------- + x : ndarray + Input array. Should be 1-dimensional. + lmbda : float, optional + If ``lmbda`` is ``None``, find the lambda that maximizes the + log-likelihood function and return it as the second output argument. + Otherwise the transformation is done for the given value. + + Returns + ------- + yeojohnson: ndarray + Yeo-Johnson power transformed array. + maxlog : float, optional + If the `lmbda` parameter is None, the second returned argument is + the lambda that maximizes the log-likelihood function. + + See Also + -------- + probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox + + Notes + ----- + The Yeo-Johnson transform is given by:: + + y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0 + log(x + 1), for x >= 0, lmbda = 0 + -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2 + -log(-x + 1), for x < 0, lmbda = 2 + + Unlike `boxcox`, `yeojohnson` does not require the input data to be + positive. + + .. versionadded:: 1.2.0 + + + References + ---------- + I. Yeo and R.A. Johnson, "A New Family of Power Transformations to + Improve Normality or Symmetry", Biometrika 87.4 (2000): + + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + We generate some random variates from a non-normal distribution and make a + probability plot for it, to show it is non-normal in the tails: + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(211) + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) + >>> ax1.set_xlabel('') + >>> ax1.set_title('Probplot against normal distribution') + + We now use `yeojohnson` to transform the data so it's closest to normal: + + >>> ax2 = fig.add_subplot(212) + >>> xt, lmbda = stats.yeojohnson(x) + >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) + >>> ax2.set_title('Probplot after Yeo-Johnson transformation') + + >>> plt.show() + + """ + x = np.asarray(x) + if x.size == 0: + return x + + if np.issubdtype(x.dtype, np.complexfloating): + raise ValueError('Yeo-Johnson transformation is not defined for ' + 'complex numbers.') + + if np.issubdtype(x.dtype, np.integer): + x = x.astype(np.float64, copy=False) + + if lmbda is not None: + return _yeojohnson_transform(x, lmbda) + + # if lmbda=None, find the lmbda that maximizes the log-likelihood function. + lmax = yeojohnson_normmax(x) + y = _yeojohnson_transform(x, lmax) + + return y, lmax + + +def _yeojohnson_transform(x, lmbda): + """Returns `x` transformed by the Yeo-Johnson power transform with given + parameter `lmbda`. + """ + dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64 + out = np.zeros_like(x, dtype=dtype) + pos = x >= 0 # binary mask + + # when x >= 0 + if abs(lmbda) < np.spacing(1.): + out[pos] = np.log1p(x[pos]) + else: # lmbda != 0 + # more stable version of: ((x + 1) ** lmbda - 1) / lmbda + out[pos] = np.expm1(lmbda * np.log1p(x[pos])) / lmbda + + # when x < 0 + if abs(lmbda - 2) > np.spacing(1.): + out[~pos] = -np.expm1((2 - lmbda) * np.log1p(-x[~pos])) / (2 - lmbda) + else: # lmbda == 2 + out[~pos] = -np.log1p(-x[~pos]) + + return out + + +def yeojohnson_llf(lmb, data): + r"""The yeojohnson log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Yeo-Johnson transformation. See `yeojohnson` for + details. + data : array_like + Data to calculate Yeo-Johnson log-likelihood for. If `data` is + multi-dimensional, the log-likelihood is calculated along the first + axis. + + Returns + ------- + llf : float + Yeo-Johnson log-likelihood of `data` given `lmb`. + + See Also + -------- + yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax + + Notes + ----- + The Yeo-Johnson log-likelihood function is defined here as + + .. math:: + + llf = -N/2 \log(\hat{\sigma}^2) + (\lambda - 1) + \sum_i \text{ sign }(x_i)\log(|x_i| + 1) + + where :math:`\hat{\sigma}^2` is estimated variance of the Yeo-Johnson + transformed input data ``x``. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + Generate some random variates and calculate Yeo-Johnson log-likelihood + values for them for a range of ``lmbda`` values: + + >>> x = stats.loggamma.rvs(5, loc=10, size=1000) + >>> lmbdas = np.linspace(-2, 10) + >>> llf = np.zeros(lmbdas.shape, dtype=float) + >>> for ii, lmbda in enumerate(lmbdas): + ... llf[ii] = stats.yeojohnson_llf(lmbda, x) + + Also find the optimal lmbda value with `yeojohnson`: + + >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x) + + Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a + horizontal line to check that that's really the optimum: + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> ax.plot(lmbdas, llf, 'b.-') + >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r') + >>> ax.set_xlabel('lmbda parameter') + >>> ax.set_ylabel('Yeo-Johnson log-likelihood') + + Now add some probability plots to show that where the log-likelihood is + maximized the data transformed with `yeojohnson` looks closest to normal: + + >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' + >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): + ... xt = stats.yeojohnson(x, lmbda=lmbda) + ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) + ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) + ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') + ... ax_inset.set_xticklabels([]) + ... ax_inset.set_yticklabels([]) + ... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda) + + >>> plt.show() + + """ + data = np.asarray(data) + n_samples = data.shape[0] + + if n_samples == 0: + return np.nan + + trans = _yeojohnson_transform(data, lmb) + trans_var = trans.var(axis=0) + loglike = np.empty_like(trans_var) + + # Avoid RuntimeWarning raised by np.log when the variance is too low + tiny_variance = trans_var < np.finfo(trans_var.dtype).tiny + loglike[tiny_variance] = np.inf + + loglike[~tiny_variance] = ( + -n_samples / 2 * np.log(trans_var[~tiny_variance])) + loglike[~tiny_variance] += ( + (lmb - 1) * (np.sign(data) * np.log1p(np.abs(data))).sum(axis=0)) + return loglike + + +def yeojohnson_normmax(x, brack=None): + """Compute optimal Yeo-Johnson transform parameter. + + Compute optimal Yeo-Johnson transform parameter for input data, using + maximum likelihood estimation. + + Parameters + ---------- + x : array_like + Input array. + brack : 2-tuple, optional + The starting interval for a downhill bracket search with + `optimize.brent`. Note that this is in most cases not critical; the + final result is allowed to be outside this bracket. If None, + `optimize.fminbound` is used with bounds that avoid overflow. + + Returns + ------- + maxlog : float + The optimal transform parameter found. + + See Also + -------- + yeojohnson, yeojohnson_llf, yeojohnson_normplot + + Notes + ----- + .. versionadded:: 1.2.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some data and determine optimal ``lmbda`` + + >>> rng = np.random.default_rng() + >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5 + >>> lmax = stats.yeojohnson_normmax(x) + + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax) + >>> ax.axvline(lmax, color='r') + + >>> plt.show() + + """ + def _neg_llf(lmbda, data): + llf = yeojohnson_llf(lmbda, data) + # reject likelihoods that are inf which are likely due to small + # variance in the transformed space + llf[np.isinf(llf)] = -np.inf + return -llf + + with np.errstate(invalid='ignore'): + if not np.all(np.isfinite(x)): + raise ValueError('Yeo-Johnson input must be finite.') + if np.all(x == 0): + return 1.0 + if brack is not None: + return optimize.brent(_neg_llf, brack=brack, args=(x,)) + x = np.asarray(x) + dtype = x.dtype if np.issubdtype(x.dtype, np.floating) else np.float64 + # Allow values up to 20 times the maximum observed value to be safely + # transformed without over- or underflow. + log1p_max_x = np.log1p(20 * np.max(np.abs(x))) + # Use half of floating point's exponent range to allow safe computation + # of the variance of the transformed data. + log_eps = np.log(np.finfo(dtype).eps) + log_tiny_float = (np.log(np.finfo(dtype).tiny) - log_eps) / 2 + log_max_float = (np.log(np.finfo(dtype).max) + log_eps) / 2 + # Compute the bounds by approximating the inverse of the Yeo-Johnson + # transform on the smallest and largest floating point exponents, given + # the largest data we expect to observe. See [1] for further details. + # [1] https://github.com/scipy/scipy/pull/18852#issuecomment-1630286174 + lb = log_tiny_float / log1p_max_x + ub = log_max_float / log1p_max_x + # Convert the bounds if all or some of the data is negative. + if np.all(x < 0): + lb, ub = 2 - ub, 2 - lb + elif np.any(x < 0): + lb, ub = max(2 - ub, lb), min(2 - lb, ub) + # Match `optimize.brent`'s tolerance. + tol_brent = 1.48e-08 + return optimize.fminbound(_neg_llf, lb, ub, args=(x,), xtol=tol_brent) + + +def yeojohnson_normplot(x, la, lb, plot=None, N=80): + """Compute parameters for a Yeo-Johnson normality plot, optionally show it. + + A Yeo-Johnson normality plot shows graphically what the best + transformation parameter is to use in `yeojohnson` to obtain a + distribution that is close to normal. + + Parameters + ---------- + x : array_like + Input array. + la, lb : scalar + The lower and upper bounds for the ``lmbda`` values to pass to + `yeojohnson` for Yeo-Johnson transformations. These are also the + limits of the horizontal axis of the plot if that is generated. + plot : object, optional + If given, plots the quantiles and least squares fit. + `plot` is an object that has to have methods "plot" and "text". + The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, + or a custom object with the same methods. + Default is None, which means that no plot is created. + N : int, optional + Number of points on the horizontal axis (equally distributed from + `la` to `lb`). + + Returns + ------- + lmbdas : ndarray + The ``lmbda`` values for which a Yeo-Johnson transform was done. + ppcc : ndarray + Probability Plot Correlelation Coefficient, as obtained from `probplot` + when fitting the Box-Cox transformed input `x` against a normal + distribution. + + See Also + -------- + probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max + + Notes + ----- + Even if `plot` is given, the figure is not shown or saved by + `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` + should be used after calling `probplot`. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> from scipy import stats + >>> import matplotlib.pyplot as plt + + Generate some non-normally distributed data, and create a Yeo-Johnson plot: + + >>> x = stats.loggamma.rvs(5, size=500) + 5 + >>> fig = plt.figure() + >>> ax = fig.add_subplot(111) + >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax) + + Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in + the same plot: + + >>> _, maxlog = stats.yeojohnson(x) + >>> ax.axvline(maxlog, color='r') + + >>> plt.show() + + """ + return _normplot('yeojohnson', x, la, lb, plot, N) + + +ShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(ShapiroResult, n_samples=1, too_small=2, default_axis=None) +def shapiro(x): + r"""Perform the Shapiro-Wilk test for normality. + + The Shapiro-Wilk test tests the null hypothesis that the + data was drawn from a normal distribution. + + Parameters + ---------- + x : array_like + Array of sample data. + + Returns + ------- + statistic : float + The test statistic. + p-value : float + The p-value for the hypothesis test. + + See Also + -------- + anderson : The Anderson-Darling test for normality + kstest : The Kolmogorov-Smirnov test for goodness of fit. + + Notes + ----- + The algorithm used is described in [4]_ but censoring parameters as + described are not implemented. For N > 5000 the W test statistic is + accurate, but the p-value may not be. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm + :doi:`10.18434/M32189` + .. [2] Shapiro, S. S. & Wilk, M.B, "An analysis of variance test for + normality (complete samples)", Biometrika, 1965, Vol. 52, + pp. 591-611, :doi:`10.2307/2333709` + .. [3] Razali, N. M. & Wah, Y. B., "Power comparisons of Shapiro-Wilk, + Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests", Journal + of Statistical Modeling and Analytics, 2011, Vol. 2, pp. 21-33. + .. [4] Royston P., "Remark AS R94: A Remark on Algorithm AS 181: The + W-test for Normality", 1995, Applied Statistics, Vol. 44, + :doi:`10.2307/2986146` + .. [5] Phipson B., and Smyth, G. K., "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn", Statistical Applications in Genetics and Molecular Biology, + 2010, Vol.9, :doi:`10.2202/1544-6115.1585` + .. [6] Panagiotakos, D. B., "The value of p-value in biomedical + research", The Open Cardiovascular Medicine Journal, 2008, Vol.2, + pp. 97-99, :doi:`10.2174/1874192400802010097` + + Examples + -------- + Suppose we wish to infer from measurements whether the weights of adult + human males in a medical study are not normally distributed [2]_. + The weights (lbs) are recorded in the array ``x`` below. + + >>> import numpy as np + >>> x = np.array([148, 154, 158, 160, 161, 162, 166, 170, 182, 195, 236]) + + The normality test of [1]_ and [2]_ begins by computing a statistic based + on the relationship between the observations and the expected order + statistics of a normal distribution. + + >>> from scipy import stats + >>> res = stats.shapiro(x) + >>> res.statistic + 0.7888147830963135 + + The value of this statistic tends to be high (close to 1) for samples drawn + from a normal distribution. + + The test is performed by comparing the observed value of the statistic + against the null distribution: the distribution of statistic values formed + under the null hypothesis that the weights were drawn from a normal + distribution. For this normality test, the null distribution is not easy to + calculate exactly, so it is usually approximated by Monte Carlo methods, + that is, drawing many samples of the same size as ``x`` from a normal + distribution and computing the values of the statistic for each. + + >>> def statistic(x): + ... # Get only the `shapiro` statistic; ignore its p-value + ... return stats.shapiro(x).statistic + >>> ref = stats.monte_carlo_test(x, stats.norm.rvs, statistic, + ... alternative='less') + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> bins = np.linspace(0.65, 1, 50) + >>> def plot(ax): # we'll reuse this + ... ax.hist(ref.null_distribution, density=True, bins=bins) + ... ax.set_title("Shapiro-Wilk Test Null Distribution \n" + ... "(Monte Carlo Approximation, 11 Observations)") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution less than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> annotation = (f'p-value={res.pvalue:.6f}\n(highlighted area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (0.75, 0.1), (0.68, 0.7), arrowprops=props) + >>> i_extreme = np.where(bins <= res.statistic)[0] + >>> for i in i_extreme: + ... ax.patches[i].set_color('C1') + >>> plt.xlim(0.65, 0.9) + >>> plt.ylim(0, 4) + >>> plt.show + >>> res.pvalue + 0.006703833118081093 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from a normally distributed population that produces such an + extreme value of the statistic - this may be taken as evidence against + the null hypothesis in favor of the alternative: the weights were not + drawn from a normal distribution. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence *for* the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [5]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + + """ + x = np.ravel(x).astype(np.float64) + + N = len(x) + if N < 3: + raise ValueError("Data must be at least length 3.") + + a = zeros(N//2, dtype=np.float64) + init = 0 + + y = sort(x) + y -= x[N//2] # subtract the median (or a nearby value); see gh-15777 + + w, pw, ifault = swilk(y, a, init) + if ifault not in [0, 2]: + warnings.warn("scipy.stats.shapiro: Input data has range zero. The" + " results may not be accurate.", stacklevel=2) + if N > 5000: + warnings.warn("scipy.stats.shapiro: For N > 5000, computed p-value " + f"may not be accurate. Current N is {N}.", + stacklevel=2) + + # `w` and `pw` are always Python floats, which are double precision. + # We want to ensure that they are NumPy floats, so until dtypes are + # respected, we can explicitly convert each to float64 (faster than + # `np.array([w, pw])`). + return ShapiroResult(np.float64(w), np.float64(pw)) + + +# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and +# Some Comparisons", Journal of the American Statistical +# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 +_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) +_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) +# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", +# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. +_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) +# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based +# on the Empirical Distribution Function.", Biometrika, +# Vol. 66, Issue 3, Dec. 1979, pp 591-595. +_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) +# From Richard A. Lockhart and Michael A. Stephens "Estimation and Tests of +# Fit for the Three-Parameter Weibull Distribution" +# Journal of the Royal Statistical Society.Series B(Methodological) +# Vol. 56, No. 3 (1994), pp. 491-500, table 1. Keys are c*100 +_Avals_weibull = [[0.292, 0.395, 0.467, 0.522, 0.617, 0.711, 0.836, 0.931], + [0.295, 0.399, 0.471, 0.527, 0.623, 0.719, 0.845, 0.941], + [0.298, 0.403, 0.476, 0.534, 0.631, 0.728, 0.856, 0.954], + [0.301, 0.408, 0.483, 0.541, 0.640, 0.738, 0.869, 0.969], + [0.305, 0.414, 0.490, 0.549, 0.650, 0.751, 0.885, 0.986], + [0.309, 0.421, 0.498, 0.559, 0.662, 0.765, 0.902, 1.007], + [0.314, 0.429, 0.508, 0.570, 0.676, 0.782, 0.923, 1.030], + [0.320, 0.438, 0.519, 0.583, 0.692, 0.802, 0.947, 1.057], + [0.327, 0.448, 0.532, 0.598, 0.711, 0.824, 0.974, 1.089], + [0.334, 0.469, 0.547, 0.615, 0.732, 0.850, 1.006, 1.125], + [0.342, 0.472, 0.563, 0.636, 0.757, 0.879, 1.043, 1.167]] +_Avals_weibull = np.array(_Avals_weibull) +_cvals_weibull = np.linspace(0, 0.5, 11) +_get_As_weibull = interpolate.interp1d(_cvals_weibull, _Avals_weibull.T, + kind='linear', bounds_error=False, + fill_value=_Avals_weibull[-1]) + + +def _weibull_fit_check(params, x): + # Refine the fit returned by `weibull_min.fit` to ensure that the first + # order necessary conditions are satisfied. If not, raise an error. + # Here, use `m` for the shape parameter to be consistent with [7] + # and avoid confusion with `c` as defined in [7]. + n = len(x) + m, u, s = params + + def dnllf_dm(m, u): + # Partial w.r.t. shape w/ optimal scale. See [7] Equation 5. + xu = x-u + return (1/m - (xu**m*np.log(xu)).sum()/(xu**m).sum() + + np.log(xu).sum()/n) + + def dnllf_du(m, u): + # Partial w.r.t. loc w/ optimal scale. See [7] Equation 6. + xu = x-u + return (m-1)/m*(xu**-1).sum() - n*(xu**(m-1)).sum()/(xu**m).sum() + + def get_scale(m, u): + # Partial w.r.t. scale solved in terms of shape and location. + # See [7] Equation 7. + return ((x-u)**m/n).sum()**(1/m) + + def dnllf(params): + # Partial derivatives of the NLLF w.r.t. parameters, i.e. + # first order necessary conditions for MLE fit. + return [dnllf_dm(*params), dnllf_du(*params)] + + suggestion = ("Maximum likelihood estimation is known to be challenging " + "for the three-parameter Weibull distribution. Consider " + "performing a custom goodness-of-fit test using " + "`scipy.stats.monte_carlo_test`.") + + if np.allclose(u, np.min(x)) or m < 1: + # The critical values provided by [7] don't seem to control the + # Type I error rate in this case. Error out. + message = ("Maximum likelihood estimation has converged to " + "a solution in which the location is equal to the minimum " + "of the data, the shape parameter is less than 2, or both. " + "The table of critical values in [7] does not " + "include this case. " + suggestion) + raise ValueError(message) + + try: + # Refine the MLE / verify that first-order necessary conditions are + # satisfied. If so, the critical values provided in [7] seem reliable. + with np.errstate(over='raise', invalid='raise'): + res = optimize.root(dnllf, params[:-1]) + + message = ("Solution of MLE first-order conditions failed: " + f"{res.message}. `anderson` cannot continue. " + suggestion) + if not res.success: + raise ValueError(message) + + except (FloatingPointError, ValueError) as e: + message = ("An error occurred while fitting the Weibull distribution " + "to the data, so `anderson` cannot continue. " + suggestion) + raise ValueError(message) from e + + m, u = res.x + s = get_scale(m, u) + return m, u, s + + +AndersonResult = _make_tuple_bunch('AndersonResult', + ['statistic', 'critical_values', + 'significance_level'], ['fit_result']) + + +def anderson(x, dist='norm'): + """Anderson-Darling test for data coming from a particular distribution. + + The Anderson-Darling test tests the null hypothesis that a sample is + drawn from a population that follows a particular distribution. + For the Anderson-Darling test, the critical values depend on + which distribution is being tested against. This function works + for normal, exponential, logistic, weibull_min, or Gumbel (Extreme Value + Type I) distributions. + + Parameters + ---------- + x : array_like + Array of sample data. + dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1', 'weibull_min'}, optional + The type of distribution to test against. The default is 'norm'. + The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the + same distribution. + + Returns + ------- + result : AndersonResult + An object with the following attributes: + + statistic : float + The Anderson-Darling test statistic. + critical_values : list + The critical values for this distribution. + significance_level : list + The significance levels for the corresponding critical values + in percents. The function returns critical values for a + differing set of significance levels depending on the + distribution that is being tested against. + fit_result : `~scipy.stats._result_classes.FitResult` + An object containing the results of fitting the distribution to + the data. + + See Also + -------- + kstest : The Kolmogorov-Smirnov test for goodness-of-fit. + + Notes + ----- + Critical values provided are for the following significance levels: + + normal/exponential + 15%, 10%, 5%, 2.5%, 1% + logistic + 25%, 10%, 5%, 2.5%, 1%, 0.5% + gumbel_l / gumbel_r + 25%, 10%, 5%, 2.5%, 1% + weibull_min + 50%, 25%, 15%, 10%, 5%, 2.5%, 1%, 0.5% + + If the returned statistic is larger than these critical values then + for the corresponding significance level, the null hypothesis that + the data come from the chosen distribution can be rejected. + The returned statistic is referred to as 'A2' in the references. + + For `weibull_min`, maximum likelihood estimation is known to be + challenging. If the test returns successfully, then the first order + conditions for a maximum likehood estimate have been verified and + the critical values correspond relatively well to the significance levels, + provided that the sample is sufficiently large (>10 observations [7]). + However, for some data - especially data with no left tail - `anderson` + is likely to result in an error message. In this case, consider + performing a custom goodness of fit test using + `scipy.stats.monte_carlo_test`. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm + .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and + Some Comparisons, Journal of the American Statistical Association, + Vol. 69, pp. 730-737. + .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit + Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, + pp. 357-369. + .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value + Distribution, Biometrika, Vol. 64, pp. 583-588. + .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference + to Tests for Exponentiality , Technical Report No. 262, + Department of Statistics, Stanford University, Stanford, CA. + .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution + Based on the Empirical Distribution Function, Biometrika, Vol. 66, + pp. 591-595. + .. [7] Richard A. Lockhart and Michael A. Stephens "Estimation and Tests of + Fit for the Three-Parameter Weibull Distribution" + Journal of the Royal Statistical Society.Series B(Methodological) + Vol. 56, No. 3 (1994), pp. 491-500, Table 0. + + Examples + -------- + Test the null hypothesis that a random sample was drawn from a normal + distribution (with unspecified mean and standard deviation). + + >>> import numpy as np + >>> from scipy.stats import anderson + >>> rng = np.random.default_rng() + >>> data = rng.random(size=35) + >>> res = anderson(data) + >>> res.statistic + 0.8398018749744764 + >>> res.critical_values + array([0.527, 0.6 , 0.719, 0.839, 0.998]) + >>> res.significance_level + array([15. , 10. , 5. , 2.5, 1. ]) + + The value of the statistic (barely) exceeds the critical value associated + with a significance level of 2.5%, so the null hypothesis may be rejected + at a significance level of 2.5%, but not at a significance level of 1%. + + """ # numpy/numpydoc#87 # noqa: E501 + dist = dist.lower() + if dist in {'extreme1', 'gumbel'}: + dist = 'gumbel_l' + dists = {'norm', 'expon', 'gumbel_l', + 'gumbel_r', 'logistic', 'weibull_min'} + + if dist not in dists: + raise ValueError(f"Invalid distribution; dist must be in {dists}.") + y = sort(x) + xbar = np.mean(x, axis=0) + N = len(y) + if dist == 'norm': + s = np.std(x, ddof=1, axis=0) + w = (y - xbar) / s + fit_params = xbar, s + logcdf = distributions.norm.logcdf(w) + logsf = distributions.norm.logsf(w) + sig = array([15, 10, 5, 2.5, 1]) + critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) + elif dist == 'expon': + w = y / xbar + fit_params = 0, xbar + logcdf = distributions.expon.logcdf(w) + logsf = distributions.expon.logsf(w) + sig = array([15, 10, 5, 2.5, 1]) + critical = around(_Avals_expon / (1.0 + 0.6/N), 3) + elif dist == 'logistic': + def rootfunc(ab, xj, N): + a, b = ab + tmp = (xj - a) / b + tmp2 = exp(tmp) + val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, + np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] + return array(val) + + sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) + sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) + w = (y - sol[0]) / sol[1] + fit_params = sol + logcdf = distributions.logistic.logcdf(w) + logsf = distributions.logistic.logsf(w) + sig = array([25, 10, 5, 2.5, 1, 0.5]) + critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) + elif dist == 'gumbel_r': + xbar, s = distributions.gumbel_r.fit(x) + w = (y - xbar) / s + fit_params = xbar, s + logcdf = distributions.gumbel_r.logcdf(w) + logsf = distributions.gumbel_r.logsf(w) + sig = array([25, 10, 5, 2.5, 1]) + critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) + elif dist == 'gumbel_l': + xbar, s = distributions.gumbel_l.fit(x) + w = (y - xbar) / s + fit_params = xbar, s + logcdf = distributions.gumbel_l.logcdf(w) + logsf = distributions.gumbel_l.logsf(w) + sig = array([25, 10, 5, 2.5, 1]) + critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) + elif dist == 'weibull_min': + message = ("Critical values of the test statistic are given for the " + "asymptotic distribution. These may not be accurate for " + "samples with fewer than 10 observations. Consider using " + "`scipy.stats.monte_carlo_test`.") + if N < 10: + warnings.warn(message, stacklevel=2) + # [7] writes our 'c' as 'm', and they write `c = 1/m`. Use their names. + m, loc, scale = distributions.weibull_min.fit(y) + m, loc, scale = _weibull_fit_check((m, loc, scale), y) + fit_params = m, loc, scale + logcdf = stats.weibull_min(*fit_params).logcdf(y) + logsf = stats.weibull_min(*fit_params).logsf(y) + c = 1 / m # m and c are as used in [7] + sig = array([0.5, 0.75, 0.85, 0.9, 0.95, 0.975, 0.99, 0.995]) + critical = _get_As_weibull(c) + # Goodness-of-fit tests should only be used to provide evidence + # _against_ the null hypothesis. Be conservative and round up. + critical = np.round(critical + 0.0005, decimals=3) + + i = arange(1, N + 1) + A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0) + + # FitResult initializer expects an optimize result, so let's work with it + message = '`anderson` successfully fit the distribution to the data.' + res = optimize.OptimizeResult(success=True, message=message) + res.x = np.array(fit_params) + fit_result = FitResult(getattr(distributions, dist), y, + discrete=False, res=res) + + return AndersonResult(A2, critical, sig, fit_result=fit_result) + + +def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): + """Compute A2akN equation 7 of Scholz and Stephens. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample arrays. + Z : array_like + Sorted array of all observations. + Zstar : array_like + Sorted array of unique observations. + k : int + Number of samples. + n : array_like + Number of observations in each sample. + N : int + Total number of observations. + + Returns + ------- + A2aKN : float + The A2aKN statistics of Scholz and Stephens 1987. + + """ + A2akN = 0. + Z_ssorted_left = Z.searchsorted(Zstar, 'left') + if N == Zstar.size: + lj = 1. + else: + lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left + Bj = Z_ssorted_left + lj / 2. + for i in arange(0, k): + s = np.sort(samples[i]) + s_ssorted_right = s.searchsorted(Zstar, side='right') + Mij = s_ssorted_right.astype(float) + fij = s_ssorted_right - s.searchsorted(Zstar, 'left') + Mij -= fij / 2. + inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) + A2akN += inner.sum() / n[i] + A2akN *= (N - 1.) / N + return A2akN + + +def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): + """Compute A2akN equation 6 of Scholz & Stephens. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample arrays. + Z : array_like + Sorted array of all observations. + Zstar : array_like + Sorted array of unique observations. + k : int + Number of samples. + n : array_like + Number of observations in each sample. + N : int + Total number of observations. + + Returns + ------- + A2KN : float + The A2KN statistics of Scholz and Stephens 1987. + + """ + A2kN = 0. + lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], + 'left') + Bj = lj.cumsum() + for i in arange(0, k): + s = np.sort(samples[i]) + Mij = s.searchsorted(Zstar[:-1], side='right') + inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) + A2kN += inner.sum() / n[i] + return A2kN + + +Anderson_ksampResult = _make_tuple_bunch( + 'Anderson_ksampResult', + ['statistic', 'critical_values', 'pvalue'], [] +) + + +def anderson_ksamp(samples, midrank=True, *, method=None): + """The Anderson-Darling test for k-samples. + + The k-sample Anderson-Darling test is a modification of the + one-sample Anderson-Darling test. It tests the null hypothesis + that k-samples are drawn from the same population without having + to specify the distribution function of that population. The + critical values depend on the number of samples. + + Parameters + ---------- + samples : sequence of 1-D array_like + Array of sample data in arrays. + midrank : bool, optional + Type of Anderson-Darling test which is computed. Default + (True) is the midrank test applicable to continuous and + discrete populations. If False, the right side empirical + distribution is used. + method : PermutationMethod, optional + Defines the method used to compute the p-value. If `method` is an + instance of `PermutationMethod`, the p-value is computed using + `scipy.stats.permutation_test` with the provided configuration options + and other appropriate settings. Otherwise, the p-value is interpolated + from tabulated values. + + Returns + ------- + res : Anderson_ksampResult + An object containing attributes: + + statistic : float + Normalized k-sample Anderson-Darling test statistic. + critical_values : array + The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%, + 0.5%, 0.1%. + pvalue : float + The approximate p-value of the test. If `method` is not + provided, the value is floored / capped at 0.1% / 25%. + + Raises + ------ + ValueError + If fewer than 2 samples are provided, a sample is empty, or no + distinct observations are in the samples. + + See Also + -------- + ks_2samp : 2 sample Kolmogorov-Smirnov test + anderson : 1 sample Anderson-Darling test + + Notes + ----- + [1]_ defines three versions of the k-sample Anderson-Darling test: + one for continuous distributions and two for discrete + distributions, in which ties between samples may occur. The + default of this routine is to compute the version based on the + midrank empirical distribution function. This test is applicable + to continuous and discrete data. If midrank is set to False, the + right side empirical distribution is used for a test for discrete + data. According to [1]_, the two discrete test statistics differ + only slightly if a few collisions due to round-off errors occur in + the test not adjusted for ties between samples. + + The critical values corresponding to the significance levels from 0.01 + to 0.25 are taken from [1]_. p-values are floored / capped + at 0.1% / 25%. Since the range of critical values might be extended in + future releases, it is recommended not to test ``p == 0.25``, but rather + ``p >= 0.25`` (analogously for the lower bound). + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample + Anderson-Darling Tests, Journal of the American Statistical + Association, Vol. 82, pp. 918-924. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> res = stats.anderson_ksamp([rng.normal(size=50), + ... rng.normal(loc=0.5, size=30)]) + >>> res.statistic, res.pvalue + (1.974403288713695, 0.04991293614572478) + >>> res.critical_values + array([0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]) + + The null hypothesis that the two random samples come from the same + distribution can be rejected at the 5% level because the returned + test value is greater than the critical value for 5% (1.961) but + not at the 2.5% level. The interpolation gives an approximate + p-value of 4.99%. + + >>> samples = [rng.normal(size=50), rng.normal(size=30), + ... rng.normal(size=20)] + >>> res = stats.anderson_ksamp(samples) + >>> res.statistic, res.pvalue + (-0.29103725200789504, 0.25) + >>> res.critical_values + array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856, + 4.07210043, 5.56419101]) + + The null hypothesis cannot be rejected for three samples from an + identical distribution. The reported p-value (25%) has been capped and + may not be very accurate (since it corresponds to the value 0.449 + whereas the statistic is -0.291). + + In such cases where the p-value is capped or when sample sizes are + small, a permutation test may be more accurate. + + >>> method = stats.PermutationMethod(n_resamples=9999, random_state=rng) + >>> res = stats.anderson_ksamp(samples, method=method) + >>> res.pvalue + 0.5254 + + """ + k = len(samples) + if (k < 2): + raise ValueError("anderson_ksamp needs at least two samples") + + samples = list(map(np.asarray, samples)) + Z = np.sort(np.hstack(samples)) + N = Z.size + Zstar = np.unique(Z) + if Zstar.size < 2: + raise ValueError("anderson_ksamp needs more than one distinct " + "observation") + + n = np.array([sample.size for sample in samples]) + if np.any(n == 0): + raise ValueError("anderson_ksamp encountered sample without " + "observations") + + if midrank: + A2kN_fun = _anderson_ksamp_midrank + else: + A2kN_fun = _anderson_ksamp_right + A2kN = A2kN_fun(samples, Z, Zstar, k, n, N) + + def statistic(*samples): + return A2kN_fun(samples, Z, Zstar, k, n, N) + + if method is not None: + res = stats.permutation_test(samples, statistic, **method._asdict(), + alternative='greater') + + H = (1. / n).sum() + hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() + h = hs_cs[-1] + 1 + g = (hs_cs / arange(2, N)).sum() + + a = (4*g - 6) * (k - 1) + (10 - 6*g)*H + b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 + c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h + d = (2*h + 6)*k**2 - 4*h*k + sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) + m = k - 1 + A2 = (A2kN - m) / math.sqrt(sigmasq) + + # The b_i values are the interpolation coefficients from Table 2 + # of Scholz and Stephens 1987 + b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085]) + b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615]) + b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154]) + critical = b0 + b1 / math.sqrt(m) + b2 / m + + sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001]) + + if A2 < critical.min() and method is None: + p = sig.max() + msg = (f"p-value capped: true value larger than {p}. Consider " + "specifying `method` " + "(e.g. `method=stats.PermutationMethod()`.)") + warnings.warn(msg, stacklevel=2) + elif A2 > critical.max() and method is None: + p = sig.min() + msg = (f"p-value floored: true value smaller than {p}. Consider " + "specifying `method` " + "(e.g. `method=stats.PermutationMethod()`.)") + warnings.warn(msg, stacklevel=2) + elif method is None: + # interpolation of probit of significance level + pf = np.polyfit(critical, log(sig), 2) + p = math.exp(np.polyval(pf, A2)) + else: + p = res.pvalue if method is not None else p + + # create result object with alias for backward compatibility + res = Anderson_ksampResult(A2, critical, p) + res.significance_level = p + return res + + +AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) + + +class _ABW: + """Distribution of Ansari-Bradley W-statistic under the null hypothesis.""" + # TODO: calculate exact distribution considering ties + # We could avoid summing over more than half the frequencies, + # but initially it doesn't seem worth the extra complexity + + def __init__(self): + """Minimal initializer.""" + self.m = None + self.n = None + self.astart = None + self.total = None + self.freqs = None + + def _recalc(self, n, m): + """When necessary, recalculate exact distribution.""" + if n != self.n or m != self.m: + self.n, self.m = n, m + # distribution is NOT symmetric when m + n is odd + # n is len(x), m is len(y), and ratio of scales is defined x/y + astart, a1, _ = gscale(n, m) + self.astart = astart # minimum value of statistic + # Exact distribution of test statistic under null hypothesis + # expressed as frequencies/counts/integers to maintain precision. + # Stored as floats to avoid overflow of sums. + self.freqs = a1.astype(np.float64) + self.total = self.freqs.sum() # could calculate from m and n + # probability mass is self.freqs / self.total; + + def pmf(self, k, n, m): + """Probability mass function.""" + self._recalc(n, m) + # The convention here is that PMF at k = 12.5 is the same as at k = 12, + # -> use `floor` in case of ties. + ind = np.floor(k - self.astart).astype(int) + return self.freqs[ind] / self.total + + def cdf(self, k, n, m): + """Cumulative distribution function.""" + self._recalc(n, m) + # Null distribution derived without considering ties is + # approximate. Round down to avoid Type I error. + ind = np.ceil(k - self.astart).astype(int) + return self.freqs[:ind+1].sum() / self.total + + def sf(self, k, n, m): + """Survival function.""" + self._recalc(n, m) + # Null distribution derived without considering ties is + # approximate. Round down to avoid Type I error. + ind = np.floor(k - self.astart).astype(int) + return self.freqs[ind:].sum() / self.total + + +# Maintain state for faster repeat calls to ansari w/ method='exact' +_abw_state = _ABW() + + +@_axis_nan_policy_factory(AnsariResult, n_samples=2) +def ansari(x, y, alternative='two-sided'): + """Perform the Ansari-Bradley test for equal scale parameters. + + The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test + for the equality of the scale parameter of the distributions + from which two samples were drawn. The null hypothesis states that + the ratio of the scale of the distribution underlying `x` to the scale + of the distribution underlying `y` is 1. + + Parameters + ---------- + x, y : array_like + Arrays of sample data. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the ratio of scales is not equal to 1. + * 'less': the ratio of scales is less than 1. + * 'greater': the ratio of scales is greater than 1. + + .. versionadded:: 1.7.0 + + Returns + ------- + statistic : float + The Ansari-Bradley test statistic. + pvalue : float + The p-value of the hypothesis test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + mood : A non-parametric test for the equality of two scale parameters + + Notes + ----- + The p-value given is exact when the sample sizes are both less than + 55 and there are no ties, otherwise a normal approximation for the + p-value is used. + + References + ---------- + .. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for + dispersions, Annals of Mathematical Statistics, 31, 1174-1189. + .. [2] Sprent, Peter and N.C. Smeeton. Applied nonparametric + statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. + Section 5.8.2. + .. [3] Nathaniel E. Helwig "Nonparametric Dispersion and Equality + Tests" at http://users.stat.umn.edu/~helwig/notes/npde-Notes.pdf + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import ansari + >>> rng = np.random.default_rng() + + For these examples, we'll create three random data sets. The first + two, with sizes 35 and 25, are drawn from a normal distribution with + mean 0 and standard deviation 2. The third data set has size 25 and + is drawn from a normal distribution with standard deviation 1.25. + + >>> x1 = rng.normal(loc=0, scale=2, size=35) + >>> x2 = rng.normal(loc=0, scale=2, size=25) + >>> x3 = rng.normal(loc=0, scale=1.25, size=25) + + First we apply `ansari` to `x1` and `x2`. These samples are drawn + from the same distribution, so we expect the Ansari-Bradley test + should not lead us to conclude that the scales of the distributions + are different. + + >>> ansari(x1, x2) + AnsariResult(statistic=541.0, pvalue=0.9762532927399098) + + With a p-value close to 1, we cannot conclude that there is a + significant difference in the scales (as expected). + + Now apply the test to `x1` and `x3`: + + >>> ansari(x1, x3) + AnsariResult(statistic=425.0, pvalue=0.0003087020407974518) + + The probability of observing such an extreme value of the statistic + under the null hypothesis of equal scales is only 0.03087%. We take this + as evidence against the null hypothesis in favor of the alternative: + the scales of the distributions from which the samples were drawn + are not equal. + + We can use the `alternative` parameter to perform a one-tailed test. + In the above example, the scale of `x1` is greater than `x3` and so + the ratio of scales of `x1` and `x3` is greater than 1. This means + that the p-value when ``alternative='greater'`` should be near 0 and + hence we should be able to reject the null hypothesis: + + >>> ansari(x1, x3, alternative='greater') + AnsariResult(statistic=425.0, pvalue=0.0001543510203987259) + + As we can see, the p-value is indeed quite low. Use of + ``alternative='less'`` should thus yield a large p-value: + + >>> ansari(x1, x3, alternative='less') + AnsariResult(statistic=425.0, pvalue=0.9998643258449039) + + """ + if alternative not in {'two-sided', 'greater', 'less'}: + raise ValueError("'alternative' must be 'two-sided'," + " 'greater', or 'less'.") + x, y = asarray(x), asarray(y) + n = len(x) + m = len(y) + if m < 1: + raise ValueError("Not enough other observations.") + if n < 1: + raise ValueError("Not enough test observations.") + + N = m + n + xy = r_[x, y] # combine + rank = _stats_py.rankdata(xy) + symrank = amin(array((rank, N - rank + 1)), 0) + AB = np.sum(symrank[:n], axis=0) + uxy = unique(xy) + repeats = (len(uxy) != len(xy)) + exact = ((m < 55) and (n < 55) and not repeats) + if repeats and (m < 55 or n < 55): + warnings.warn("Ties preclude use of exact statistic.", stacklevel=2) + if exact: + if alternative == 'two-sided': + pval = 2.0 * np.minimum(_abw_state.cdf(AB, n, m), + _abw_state.sf(AB, n, m)) + elif alternative == 'greater': + # AB statistic is _smaller_ when ratio of scales is larger, + # so this is the opposite of the usual calculation + pval = _abw_state.cdf(AB, n, m) + else: + pval = _abw_state.sf(AB, n, m) + return AnsariResult(AB, min(1.0, pval)) + + # otherwise compute normal approximation + if N % 2: # N odd + mnAB = n * (N+1.0)**2 / 4.0 / N + varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) + else: + mnAB = n * (N+2.0) / 4.0 + varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) + if repeats: # adjust variance estimates + # compute np.sum(tj * rj**2,axis=0) + fac = np.sum(symrank**2, axis=0) + if N % 2: # N odd + varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) + else: # N even + varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) + + # Small values of AB indicate larger dispersion for the x sample. + # Large values of AB indicate larger dispersion for the y sample. + # This is opposite to the way we define the ratio of scales. see [1]_. + z = (mnAB - AB) / sqrt(varAB) + pvalue = _get_pvalue(z, distributions.norm, alternative) + return AnsariResult(AB[()], pvalue[()]) + + +BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(BartlettResult, n_samples=None) +def bartlett(*samples): + r"""Perform Bartlett's test for equal variances. + + Bartlett's test tests the null hypothesis that all input samples + are from populations with equal variances. For samples + from significantly non-normal populations, Levene's test + `levene` is more robust. + + Parameters + ---------- + sample1, sample2, ... : array_like + arrays of sample data. Only 1d arrays are accepted, they may have + different lengths. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value of the test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + levene : A robust parametric test for equality of k variances + + Notes + ----- + Conover et al. (1981) examine many of the existing parametric and + nonparametric tests by extensive simulations and they conclude that the + tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be + superior in terms of robustness of departures from normality and power + ([3]_). + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm + .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical + Methods, Eighth Edition, Iowa State University Press. + .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical + Tests. Proceedings of the Royal Society of London. Series A, + Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. + .. [5] C.I. BLISS (1952), The Statistics of Bioassay: With Special + Reference to the Vitamins, pp 499-503, + :doi:`10.1016/C2013-0-12584-6`. + .. [6] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [7] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + In [5]_, the influence of vitamin C on the tooth growth of guinea pigs + was investigated. In a control study, 60 subjects were divided into + small dose, medium dose, and large dose groups that received + daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively. + After 42 days, the tooth growth was measured. + + The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record + tooth growth measurements of the three groups in microns. + + >>> import numpy as np + >>> small_dose = np.array([ + ... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7, + ... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7 + ... ]) + >>> medium_dose = np.array([ + ... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5, + ... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3 + ... ]) + >>> large_dose = np.array([ + ... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5, + ... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23 + ... ]) + + The `bartlett` statistic is sensitive to differences in variances + between the samples. + + >>> from scipy import stats + >>> res = stats.bartlett(small_dose, medium_dose, large_dose) + >>> res.statistic + 0.6654670663030519 + + The value of the statistic tends to be high when there is a large + difference in variances. + + We can test for inequality of variance among the groups by comparing the + observed value of the statistic against the null distribution: the + distribution of statistic values derived under the null hypothesis that + the population variances of the three groups are equal. + + For this test, the null distribution follows the chi-square distribution + as shown below. + + >>> import matplotlib.pyplot as plt + >>> k = 3 # number of samples + >>> dist = stats.chi2(df=k-1) + >>> val = np.linspace(0, 5, 100) + >>> pdf = dist.pdf(val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(val, pdf, color='C0') + ... ax.set_title("Bartlett Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + ... ax.set_xlim(0, 5) + ... ax.set_ylim(0, 1) + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props) + >>> i = val >= res.statistic + >>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0') + >>> plt.show() + + >>> res.pvalue + 0.71696121509966 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from distributions with identical variances that produces + such an extreme value of the statistic - this may be taken as evidence + against the null hypothesis in favor of the alternative: the variances of + the groups are not equal. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [6]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Note that the chi-square distribution provides the null distribution + when the observations are normally distributed. For small samples + drawn from non-normal populations, it may be more appropriate to + perform a + permutation test: Under the null hypothesis that all three samples were + drawn from the same population, each of the measurements is equally likely + to have been observed in any of the three samples. Therefore, we can form + a randomized null distribution by calculating the statistic under many + randomly-generated partitionings of the observations into the three + samples. + + >>> def statistic(*samples): + ... return stats.bartlett(*samples).statistic + >>> ref = stats.permutation_test( + ... (small_dose, medium_dose, large_dose), statistic, + ... permutation_type='independent', alternative='greater' + ... ) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(0, 5, 25) + >>> ax.hist( + ... ref.null_distribution, bins=bins, density=True, facecolor="C1" + ... ) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'randomized null distribution']) + >>> plot(ax) + >>> plt.show() + + >>> ref.pvalue # randomized test p-value + 0.5387 # may vary + + Note that there is significant disagreement between the p-value calculated + here and the asymptotic approximation returned by `bartlett` above. + The statistical inferences that can be drawn rigorously from a permutation + test are limited; nonetheless, they may be the preferred approach in many + circumstances [7]_. + + Following is another generic example where the null hypothesis would be + rejected. + + Test whether the lists `a`, `b` and `c` come from populations + with equal variances. + + >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] + >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] + >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] + >>> stat, p = stats.bartlett(a, b, c) + >>> p + 1.1254782518834628e-05 + + The very small p-value suggests that the populations do not have equal + variances. + + This is not surprising, given that the sample variance of `b` is much + larger than that of `a` and `c`: + + >>> [np.var(x, ddof=1) for x in [a, b, c]] + [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] + + """ + k = len(samples) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + # Handle empty input and input that is not 1d + for sample in samples: + if np.asanyarray(sample).size == 0: + NaN = _get_nan(*samples) # get NaN of result_dtype of all samples + return BartlettResult(NaN, NaN) + + Ni = np.empty(k) + ssq = np.empty(k, 'd') + for j in range(k): + Ni[j] = len(samples[j]) + ssq[j] = np.var(samples[j], ddof=1) + Ntot = np.sum(Ni, axis=0) + spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) + numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0) + denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) - + 1.0/(Ntot - k)) + T = numer / denom + pval = distributions.chi2.sf(T, k - 1) # 1 - cdf + + return BartlettResult(T, pval) + + +LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(LeveneResult, n_samples=None) +def levene(*samples, center='median', proportiontocut=0.05): + r"""Perform Levene test for equal variances. + + The Levene test tests the null hypothesis that all input samples + are from populations with equal variances. Levene's test is an + alternative to Bartlett's test `bartlett` in the case where + there are significant deviations from normality. + + Parameters + ---------- + sample1, sample2, ... : array_like + The sample data, possibly with different lengths. Only one-dimensional + samples are accepted. + center : {'mean', 'median', 'trimmed'}, optional + Which function of the data to use in the test. The default + is 'median'. + proportiontocut : float, optional + When `center` is 'trimmed', this gives the proportion of data points + to cut from each end. (See `scipy.stats.trim_mean`.) + Default is 0.05. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value for the test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + bartlett : A parametric test for equality of k variances in normal samples + + Notes + ----- + Three variations of Levene's test are possible. The possibilities + and their recommended usages are: + + * 'median' : Recommended for skewed (non-normal) distributions> + * 'mean' : Recommended for symmetric, moderate-tailed distributions. + * 'trimmed' : Recommended for heavy-tailed distributions. + + The test version using the mean was proposed in the original article + of Levene ([2]_) while the median and trimmed mean have been studied by + Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe + test. + + References + ---------- + .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm + .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: + Essays in Honor of Harold Hotelling, I. Olkin et al. eds., + Stanford University Press, pp. 278-292. + .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American + Statistical Association, 69, 364-367 + .. [4] C.I. BLISS (1952), The Statistics of Bioassay: With Special + Reference to the Vitamins, pp 499-503, + :doi:`10.1016/C2013-0-12584-6`. + .. [5] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [6] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + In [4]_, the influence of vitamin C on the tooth growth of guinea pigs + was investigated. In a control study, 60 subjects were divided into + small dose, medium dose, and large dose groups that received + daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively. + After 42 days, the tooth growth was measured. + + The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record + tooth growth measurements of the three groups in microns. + + >>> import numpy as np + >>> small_dose = np.array([ + ... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7, + ... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7 + ... ]) + >>> medium_dose = np.array([ + ... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5, + ... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3 + ... ]) + >>> large_dose = np.array([ + ... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5, + ... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23 + ... ]) + + The `levene` statistic is sensitive to differences in variances + between the samples. + + >>> from scipy import stats + >>> res = stats.levene(small_dose, medium_dose, large_dose) + >>> res.statistic + 0.6457341109631506 + + The value of the statistic tends to be high when there is a large + difference in variances. + + We can test for inequality of variance among the groups by comparing the + observed value of the statistic against the null distribution: the + distribution of statistic values derived under the null hypothesis that + the population variances of the three groups are equal. + + For this test, the null distribution follows the F distribution as shown + below. + + >>> import matplotlib.pyplot as plt + >>> k, n = 3, 60 # number of samples, total number of observations + >>> dist = stats.f(dfn=k-1, dfd=n-k) + >>> val = np.linspace(0, 5, 100) + >>> pdf = dist.pdf(val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(val, pdf, color='C0') + ... ax.set_title("Levene Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + ... ax.set_xlim(0, 5) + ... ax.set_ylim(0, 1) + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.3f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props) + >>> i = val >= res.statistic + >>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0') + >>> plt.show() + + >>> res.pvalue + 0.5280694573759905 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from distributions with identical variances that produces + such an extreme value of the statistic - this may be taken as evidence + against the null hypothesis in favor of the alternative: the variances of + the groups are not equal. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [5]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Note that the F distribution provides an asymptotic approximation of the + null distribution. + For small samples, it may be more appropriate to perform a permutation + test: Under the null hypothesis that all three samples were drawn from + the same population, each of the measurements is equally likely to have + been observed in any of the three samples. Therefore, we can form a + randomized null distribution by calculating the statistic under many + randomly-generated partitionings of the observations into the three + samples. + + >>> def statistic(*samples): + ... return stats.levene(*samples).statistic + >>> ref = stats.permutation_test( + ... (small_dose, medium_dose, large_dose), statistic, + ... permutation_type='independent', alternative='greater' + ... ) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(0, 5, 25) + >>> ax.hist( + ... ref.null_distribution, bins=bins, density=True, facecolor="C1" + ... ) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'randomized null distribution']) + >>> plot(ax) + >>> plt.show() + + >>> ref.pvalue # randomized test p-value + 0.4559 # may vary + + Note that there is significant disagreement between the p-value calculated + here and the asymptotic approximation returned by `levene` above. + The statistical inferences that can be drawn rigorously from a permutation + test are limited; nonetheless, they may be the preferred approach in many + circumstances [6]_. + + Following is another generic example where the null hypothesis would be + rejected. + + Test whether the lists `a`, `b` and `c` come from populations + with equal variances. + + >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] + >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] + >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] + >>> stat, p = stats.levene(a, b, c) + >>> p + 0.002431505967249681 + + The small p-value suggests that the populations do not have equal + variances. + + This is not surprising, given that the sample variance of `b` is much + larger than that of `a` and `c`: + + >>> [np.var(x, ddof=1) for x in [a, b, c]] + [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] + + """ + if center not in ['mean', 'median', 'trimmed']: + raise ValueError("center must be 'mean', 'median' or 'trimmed'.") + + k = len(samples) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + Ni = np.empty(k) + Yci = np.empty(k, 'd') + + if center == 'median': + + def func(x): + return np.median(x, axis=0) + + elif center == 'mean': + + def func(x): + return np.mean(x, axis=0) + + else: # center == 'trimmed' + samples = tuple(_stats_py.trimboth(np.sort(sample), proportiontocut) + for sample in samples) + + def func(x): + return np.mean(x, axis=0) + + for j in range(k): + Ni[j] = len(samples[j]) + Yci[j] = func(samples[j]) + Ntot = np.sum(Ni, axis=0) + + # compute Zij's + Zij = [None] * k + for i in range(k): + Zij[i] = abs(asarray(samples[i]) - Yci[i]) + + # compute Zbari + Zbari = np.empty(k, 'd') + Zbar = 0.0 + for i in range(k): + Zbari[i] = np.mean(Zij[i], axis=0) + Zbar += Zbari[i] * Ni[i] + + Zbar /= Ntot + numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) + + # compute denom_variance + dvar = 0.0 + for i in range(k): + dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) + + denom = (k - 1.0) * dvar + + W = numer / denom + pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf + return LeveneResult(W, pval) + + +def _apply_func(x, g, func): + # g is list of indices into x + # separating x into different groups + # func should be applied over the groups + g = unique(r_[0, g, len(x)]) + output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)] + + return asarray(output) + + +FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) + + +@_axis_nan_policy_factory(FlignerResult, n_samples=None) +def fligner(*samples, center='median', proportiontocut=0.05): + r"""Perform Fligner-Killeen test for equality of variance. + + Fligner's test tests the null hypothesis that all input samples + are from populations with equal variances. Fligner-Killeen's test is + distribution free when populations are identical [2]_. + + Parameters + ---------- + sample1, sample2, ... : array_like + Arrays of sample data. Need not be the same length. + center : {'mean', 'median', 'trimmed'}, optional + Keyword argument controlling which function of the data is used in + computing the test statistic. The default is 'median'. + proportiontocut : float, optional + When `center` is 'trimmed', this gives the proportion of data points + to cut from each end. (See `scipy.stats.trim_mean`.) + Default is 0.05. + + Returns + ------- + statistic : float + The test statistic. + pvalue : float + The p-value for the hypothesis test. + + See Also + -------- + bartlett : A parametric test for equality of k variances in normal samples + levene : A robust parametric test for equality of k variances + + Notes + ----- + As with Levene's test there are three variants of Fligner's test that + differ by the measure of central tendency used in the test. See `levene` + for more information. + + Conover et al. (1981) examine many of the existing parametric and + nonparametric tests by extensive simulations and they conclude that the + tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be + superior in terms of robustness of departures from normality and power + [3]_. + + References + ---------- + .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf + .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample + tests for scale. 'Journal of the American Statistical Association.' + 71(353), 210-213. + .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and + Hypothesis Testing based on Quadratic Inference Function. Technical + Report #99-03, Center for Likelihood Studies, Pennsylvania State + University. + .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A + comparative study of tests for homogeneity of variances, with + applications to the outer continental shelf bidding data. + Technometrics, 23(4), 351-361. + .. [5] C.I. BLISS (1952), The Statistics of Bioassay: With Special + Reference to the Vitamins, pp 499-503, + :doi:`10.1016/C2013-0-12584-6`. + .. [6] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly + Drawn." Statistical Applications in Genetics and Molecular Biology + 9.1 (2010). + .. [7] Ludbrook, J., & Dudley, H. (1998). Why permutation tests are + superior to t and F tests in biomedical research. The American + Statistician, 52(2), 127-132. + + Examples + -------- + In [5]_, the influence of vitamin C on the tooth growth of guinea pigs + was investigated. In a control study, 60 subjects were divided into + small dose, medium dose, and large dose groups that received + daily doses of 0.5, 1.0 and 2.0 mg of vitamin C, respectively. + After 42 days, the tooth growth was measured. + + The ``small_dose``, ``medium_dose``, and ``large_dose`` arrays below record + tooth growth measurements of the three groups in microns. + + >>> import numpy as np + >>> small_dose = np.array([ + ... 4.2, 11.5, 7.3, 5.8, 6.4, 10, 11.2, 11.2, 5.2, 7, + ... 15.2, 21.5, 17.6, 9.7, 14.5, 10, 8.2, 9.4, 16.5, 9.7 + ... ]) + >>> medium_dose = np.array([ + ... 16.5, 16.5, 15.2, 17.3, 22.5, 17.3, 13.6, 14.5, 18.8, 15.5, + ... 19.7, 23.3, 23.6, 26.4, 20, 25.2, 25.8, 21.2, 14.5, 27.3 + ... ]) + >>> large_dose = np.array([ + ... 23.6, 18.5, 33.9, 25.5, 26.4, 32.5, 26.7, 21.5, 23.3, 29.5, + ... 25.5, 26.4, 22.4, 24.5, 24.8, 30.9, 26.4, 27.3, 29.4, 23 + ... ]) + + The `fligner` statistic is sensitive to differences in variances + between the samples. + + >>> from scipy import stats + >>> res = stats.fligner(small_dose, medium_dose, large_dose) + >>> res.statistic + 1.3878943408857916 + + The value of the statistic tends to be high when there is a large + difference in variances. + + We can test for inequality of variance among the groups by comparing the + observed value of the statistic against the null distribution: the + distribution of statistic values derived under the null hypothesis that + the population variances of the three groups are equal. + + For this test, the null distribution follows the chi-square distribution + as shown below. + + >>> import matplotlib.pyplot as plt + >>> k = 3 # number of samples + >>> dist = stats.chi2(df=k-1) + >>> val = np.linspace(0, 8, 100) + >>> pdf = dist.pdf(val) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> def plot(ax): # we'll reuse this + ... ax.plot(val, pdf, color='C0') + ... ax.set_title("Fligner Test Null Distribution") + ... ax.set_xlabel("statistic") + ... ax.set_ylabel("probability density") + ... ax.set_xlim(0, 8) + ... ax.set_ylim(0, 0.5) + >>> plot(ax) + >>> plt.show() + + The comparison is quantified by the p-value: the proportion of values in + the null distribution greater than or equal to the observed value of the + statistic. + + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> pvalue = dist.sf(res.statistic) + >>> annotation = (f'p-value={pvalue:.4f}\n(shaded area)') + >>> props = dict(facecolor='black', width=1, headwidth=5, headlength=8) + >>> _ = ax.annotate(annotation, (1.5, 0.22), (2.25, 0.3), arrowprops=props) + >>> i = val >= res.statistic + >>> ax.fill_between(val[i], y1=0, y2=pdf[i], color='C0') + >>> plt.show() + + >>> res.pvalue + 0.49960016501182125 + + If the p-value is "small" - that is, if there is a low probability of + sampling data from distributions with identical variances that produces + such an extreme value of the statistic - this may be taken as evidence + against the null hypothesis in favor of the alternative: the variances of + the groups are not equal. Note that: + + - The inverse is not true; that is, the test is not used to provide + evidence for the null hypothesis. + - The threshold for values that will be considered "small" is a choice that + should be made before the data is analyzed [6]_ with consideration of the + risks of both false positives (incorrectly rejecting the null hypothesis) + and false negatives (failure to reject a false null hypothesis). + - Small p-values are not evidence for a *large* effect; rather, they can + only provide evidence for a "significant" effect, meaning that they are + unlikely to have occurred under the null hypothesis. + + Note that the chi-square distribution provides an asymptotic approximation + of the null distribution. + For small samples, it may be more appropriate to perform a + permutation test: Under the null hypothesis that all three samples were + drawn from the same population, each of the measurements is equally likely + to have been observed in any of the three samples. Therefore, we can form + a randomized null distribution by calculating the statistic under many + randomly-generated partitionings of the observations into the three + samples. + + >>> def statistic(*samples): + ... return stats.fligner(*samples).statistic + >>> ref = stats.permutation_test( + ... (small_dose, medium_dose, large_dose), statistic, + ... permutation_type='independent', alternative='greater' + ... ) + >>> fig, ax = plt.subplots(figsize=(8, 5)) + >>> plot(ax) + >>> bins = np.linspace(0, 8, 25) + >>> ax.hist( + ... ref.null_distribution, bins=bins, density=True, facecolor="C1" + ... ) + >>> ax.legend(['aymptotic approximation\n(many observations)', + ... 'randomized null distribution']) + >>> plot(ax) + >>> plt.show() + + >>> ref.pvalue # randomized test p-value + 0.4332 # may vary + + Note that there is significant disagreement between the p-value calculated + here and the asymptotic approximation returned by `fligner` above. + The statistical inferences that can be drawn rigorously from a permutation + test are limited; nonetheless, they may be the preferred approach in many + circumstances [7]_. + + Following is another generic example where the null hypothesis would be + rejected. + + Test whether the lists `a`, `b` and `c` come from populations + with equal variances. + + >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] + >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] + >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] + >>> stat, p = stats.fligner(a, b, c) + >>> p + 0.00450826080004775 + + The small p-value suggests that the populations do not have equal + variances. + + This is not surprising, given that the sample variance of `b` is much + larger than that of `a` and `c`: + + >>> [np.var(x, ddof=1) for x in [a, b, c]] + [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] + + """ + if center not in ['mean', 'median', 'trimmed']: + raise ValueError("center must be 'mean', 'median' or 'trimmed'.") + + k = len(samples) + if k < 2: + raise ValueError("Must enter at least two input sample vectors.") + + # Handle empty input + for sample in samples: + if sample.size == 0: + NaN = _get_nan(*samples) + return FlignerResult(NaN, NaN) + + if center == 'median': + + def func(x): + return np.median(x, axis=0) + + elif center == 'mean': + + def func(x): + return np.mean(x, axis=0) + + else: # center == 'trimmed' + samples = tuple(_stats_py.trimboth(sample, proportiontocut) + for sample in samples) + + def func(x): + return np.mean(x, axis=0) + + Ni = asarray([len(samples[j]) for j in range(k)]) + Yci = asarray([func(samples[j]) for j in range(k)]) + Ntot = np.sum(Ni, axis=0) + # compute Zij's + Zij = [abs(asarray(samples[i]) - Yci[i]) for i in range(k)] + allZij = [] + g = [0] + for i in range(k): + allZij.extend(list(Zij[i])) + g.append(len(allZij)) + + ranks = _stats_py.rankdata(allZij) + sample = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) + + # compute Aibar + Aibar = _apply_func(sample, g, np.sum) / Ni + anbar = np.mean(sample, axis=0) + varsq = np.var(sample, axis=0, ddof=1) + Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq + pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf + return FlignerResult(Xsq, pval) + + +@_axis_nan_policy_factory(lambda x1: (x1,), n_samples=4, n_outputs=1) +def _mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N) -> float: + # Obtain the unique values and their frequencies from the pooled samples. + # "a_j, + b_j, = t_j, for j = 1, ... k" where `k` is the number of unique + # classes, and "[t]he number of values associated with the x's and y's in + # the jth class will be denoted by a_j, and b_j respectively." + # (Mielke, 312) + # Reuse previously computed sorted array and `diff` arrays to obtain the + # unique values and counts. Prepend `diffs` with a non-zero to indicate + # that the first element should be marked as not matching what preceded it. + diffs_prep = np.concatenate(([1], diffs)) + # Unique elements are where the was a difference between elements in the + # sorted array + uniques = sorted_xy[diffs_prep != 0] + # The count of each element is the bin size for each set of consecutive + # differences where the difference is zero. Replace nonzero differences + # with 1 and then use the cumulative sum to count the indices. + t = np.bincount(np.cumsum(np.asarray(diffs_prep != 0, dtype=int)))[1:] + k = len(uniques) + js = np.arange(1, k + 1, dtype=int) + # the `b` array mentioned in the paper is not used, outside of the + # calculation of `t`, so we do not need to calculate it separately. Here + # we calculate `a`. In plain language, `a[j]` is the number of values in + # `x` that equal `uniques[j]`. + sorted_xyx = np.sort(np.concatenate((xy, x))) + diffs = np.diff(sorted_xyx) + diffs_prep = np.concatenate(([1], diffs)) + diff_is_zero = np.asarray(diffs_prep != 0, dtype=int) + xyx_counts = np.bincount(np.cumsum(diff_is_zero))[1:] + a = xyx_counts - t + # "Define .. a_0 = b_0 = t_0 = S_0 = 0" (Mielke 312) so we shift `a` + # and `t` arrays over 1 to allow a first element of 0 to accommodate this + # indexing. + t = np.concatenate(([0], t)) + a = np.concatenate(([0], a)) + # S is built from `t`, so it does not need a preceding zero added on. + S = np.cumsum(t) + # define a copy of `S` with a prepending zero for later use to avoid + # the need for indexing. + S_i_m1 = np.concatenate(([0], S[:-1])) + + # Psi, as defined by the 6th unnumbered equation on page 313 (Mielke). + # Note that in the paper there is an error where the denominator `2` is + # squared when it should be the entire equation. + def psi(indicator): + return (indicator - (N + 1)/2)**2 + + # define summation range for use in calculation of phi, as seen in sum + # in the unnumbered equation on the bottom of page 312 (Mielke). + s_lower = S[js - 1] + 1 + s_upper = S[js] + 1 + phi_J = [np.arange(s_lower[idx], s_upper[idx]) for idx in range(k)] + + # for every range in the above array, determine the sum of psi(I) for + # every element in the range. Divide all the sums by `t`. Following the + # last unnumbered equation on page 312. + phis = [np.sum(psi(I_j)) for I_j in phi_J] / t[js] + + # `T` is equal to a[j] * phi[j], per the first unnumbered equation on + # page 312. `phis` is already in the order based on `js`, so we index + # into `a` with `js` as well. + T = sum(phis * a[js]) + + # The approximate statistic + E_0_T = n * (N * N - 1) / 12 + + varM = (m * n * (N + 1.0) * (N ** 2 - 4) / 180 - + m * n / (180 * N * (N - 1)) * np.sum( + t * (t**2 - 1) * (t**2 - 4 + (15 * (N - S - S_i_m1) ** 2)) + )) + + return ((T - E_0_T) / np.sqrt(varM),) + + +def _mood_too_small(samples, kwargs, axis=-1): + x, y = samples + n = x.shape[axis] + m = y.shape[axis] + N = m + n + return N < 3 + + +@_axis_nan_policy_factory(SignificanceResult, n_samples=2, too_small=_mood_too_small) +def mood(x, y, axis=0, alternative="two-sided"): + """Perform Mood's test for equal scale parameters. + + Mood's two-sample test for scale parameters is a non-parametric + test for the null hypothesis that two samples are drawn from the + same distribution with the same scale parameter. + + Parameters + ---------- + x, y : array_like + Arrays of sample data. + axis : int, optional + The axis along which the samples are tested. `x` and `y` can be of + different length along `axis`. + If `axis` is None, `x` and `y` are flattened and the test is done on + all values in the flattened arrays. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + The following options are available: + + * 'two-sided': the scales of the distributions underlying `x` and `y` + are different. + * 'less': the scale of the distribution underlying `x` is less than + the scale of the distribution underlying `y`. + * 'greater': the scale of the distribution underlying `x` is greater + than the scale of the distribution underlying `y`. + + .. versionadded:: 1.7.0 + + Returns + ------- + res : SignificanceResult + An object containing attributes: + + statistic : scalar or ndarray + The z-score for the hypothesis test. For 1-D inputs a scalar is + returned. + pvalue : scalar ndarray + The p-value for the hypothesis test. + + See Also + -------- + fligner : A non-parametric test for the equality of k variances + ansari : A non-parametric test for the equality of 2 variances + bartlett : A parametric test for equality of k variances in normal samples + levene : A parametric test for equality of k variances + + Notes + ----- + The data are assumed to be drawn from probability distributions ``f(x)`` + and ``f(x/s) / s`` respectively, for some probability density function f. + The null hypothesis is that ``s == 1``. + + For multi-dimensional arrays, if the inputs are of shapes + ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the + resulting z and p values will have shape ``(n0, n2, n3)``. Note that + ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. + + References + ---------- + [1] Mielke, Paul W. "Note on Some Squared Rank Tests with Existing Ties." + Technometrics, vol. 9, no. 2, 1967, pp. 312-14. JSTOR, + https://doi.org/10.2307/1266427. Accessed 18 May 2022. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> rng = np.random.default_rng() + >>> x2 = rng.standard_normal((2, 45, 6, 7)) + >>> x1 = rng.standard_normal((2, 30, 6, 7)) + >>> res = stats.mood(x1, x2, axis=1) + >>> res.pvalue.shape + (2, 6, 7) + + Find the number of points where the difference in scale is not significant: + + >>> (res.pvalue > 0.1).sum() + 78 + + Perform the test with different scales: + + >>> x1 = rng.standard_normal((2, 30)) + >>> x2 = rng.standard_normal((2, 35)) * 10.0 + >>> stats.mood(x1, x2, axis=1) + SignificanceResult(statistic=array([-5.76174136, -6.12650783]), + pvalue=array([8.32505043e-09, 8.98287869e-10])) + + """ + x = np.asarray(x, dtype=float) + y = np.asarray(y, dtype=float) + + if axis < 0: + axis = x.ndim + axis + + # Determine shape of the result arrays + res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) + if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if + ax != axis])): + raise ValueError("Dimensions of x and y on all axes except `axis` " + "should match") + + n = x.shape[axis] + m = y.shape[axis] + N = m + n + if N < 3: + raise ValueError("Not enough observations.") + + xy = np.concatenate((x, y), axis=axis) + # determine if any of the samples contain ties + sorted_xy = np.sort(xy, axis=axis) + diffs = np.diff(sorted_xy, axis=axis) + if 0 in diffs: + z = np.asarray(_mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N, + axis=axis)) + else: + if axis != 0: + xy = np.moveaxis(xy, axis, 0) + + xy = xy.reshape(xy.shape[0], -1) + # Generalized to the n-dimensional case by adding the axis argument, + # and using for loops, since rankdata is not vectorized. For improving + # performance consider vectorizing rankdata function. + all_ranks = np.empty_like(xy) + for j in range(xy.shape[1]): + all_ranks[:, j] = _stats_py.rankdata(xy[:, j]) + + Ri = all_ranks[:n] + M = np.sum((Ri - (N + 1.0) / 2) ** 2, axis=0) + # Approx stat. + mnM = n * (N * N - 1.0) / 12 + varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 + z = (M - mnM) / sqrt(varM) + pval = _get_pvalue(z, distributions.norm, alternative) + + if res_shape == (): + # Return scalars, not 0-D arrays + z = z[0] + pval = pval[0] + else: + z.shape = res_shape + pval.shape = res_shape + return SignificanceResult(z[()], pval[()]) + + +WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue']) + + +def wilcoxon_result_unpacker(res): + if hasattr(res, 'zstatistic'): + return res.statistic, res.pvalue, res.zstatistic + else: + return res.statistic, res.pvalue + + +def wilcoxon_result_object(statistic, pvalue, zstatistic=None): + res = WilcoxonResult(statistic, pvalue) + if zstatistic is not None: + res.zstatistic = zstatistic + return res + + +def wilcoxon_outputs(kwds): + method = kwds.get('method', 'auto') + if method == 'approx': + return 3 + return 2 + + +@_rename_parameter("mode", "method") +@_axis_nan_policy_factory( + wilcoxon_result_object, paired=True, + n_samples=lambda kwds: 2 if kwds.get('y', None) is not None else 1, + result_to_tuple=wilcoxon_result_unpacker, n_outputs=wilcoxon_outputs, +) +def wilcoxon(x, y=None, zero_method="wilcox", correction=False, + alternative="two-sided", method='auto', *, axis=0): + """Calculate the Wilcoxon signed-rank test. + + The Wilcoxon signed-rank test tests the null hypothesis that two + related paired samples come from the same distribution. In particular, + it tests whether the distribution of the differences ``x - y`` is symmetric + about zero. It is a non-parametric version of the paired T-test. + + Parameters + ---------- + x : array_like + Either the first set of measurements (in which case ``y`` is the second + set of measurements), or the differences between two sets of + measurements (in which case ``y`` is not to be specified.) Must be + one-dimensional. + y : array_like, optional + Either the second set of measurements (if ``x`` is the first set of + measurements), or not specified (if ``x`` is the differences between + two sets of measurements.) Must be one-dimensional. + + .. warning:: + When `y` is provided, `wilcoxon` calculates the test statistic + based on the ranks of the absolute values of ``d = x - y``. + Roundoff error in the subtraction can result in elements of ``d`` + being assigned different ranks even when they would be tied with + exact arithmetic. Rather than passing `x` and `y` separately, + consider computing the difference ``x - y``, rounding as needed to + ensure that only truly unique elements are numerically distinct, + and passing the result as `x`, leaving `y` at the default (None). + + zero_method : {"wilcox", "pratt", "zsplit"}, optional + There are different conventions for handling pairs of observations + with equal values ("zero-differences", or "zeros"). + + * "wilcox": Discards all zero-differences (default); see [4]_. + * "pratt": Includes zero-differences in the ranking process, + but drops the ranks of the zeros (more conservative); see [3]_. + In this case, the normal approximation is adjusted as in [5]_. + * "zsplit": Includes zero-differences in the ranking process and + splits the zero rank between positive and negative ones. + + correction : bool, optional + If True, apply continuity correction by adjusting the Wilcoxon rank + statistic by 0.5 towards the mean value when computing the + z-statistic if a normal approximation is used. Default is False. + alternative : {"two-sided", "greater", "less"}, optional + Defines the alternative hypothesis. Default is 'two-sided'. + In the following, let ``d`` represent the difference between the paired + samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or + ``d = x`` otherwise. + + * 'two-sided': the distribution underlying ``d`` is not symmetric + about zero. + * 'less': the distribution underlying ``d`` is stochastically less + than a distribution symmetric about zero. + * 'greater': the distribution underlying ``d`` is stochastically + greater than a distribution symmetric about zero. + + method : {"auto", "exact", "approx"} or `PermutationMethod` instance, optional + Method to calculate the p-value, see Notes. Default is "auto". + + axis : int or None, default: 0 + If an int, the axis of the input along which to compute the statistic. + The statistic of each axis-slice (e.g. row) of the input will appear + in a corresponding element of the output. If ``None``, the input will + be raveled before computing the statistic. + + Returns + ------- + An object with the following attributes. + + statistic : array_like + If `alternative` is "two-sided", the sum of the ranks of the + differences above or below zero, whichever is smaller. + Otherwise the sum of the ranks of the differences above zero. + pvalue : array_like + The p-value for the test depending on `alternative` and `method`. + zstatistic : array_like + When ``method = 'approx'``, this is the normalized z-statistic:: + + z = (T - mn - d) / se + + where ``T`` is `statistic` as defined above, ``mn`` is the mean of the + distribution under the null hypothesis, ``d`` is a continuity + correction, and ``se`` is the standard error. + When ``method != 'approx'``, this attribute is not available. + + See Also + -------- + kruskal, mannwhitneyu + + Notes + ----- + In the following, let ``d`` represent the difference between the paired + samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or ``d = x`` + otherwise. Assume that all elements of ``d`` are independent and + identically distributed observations, and all are distinct and nonzero. + + - When ``len(d)`` is sufficiently large, the null distribution of the + normalized test statistic (`zstatistic` above) is approximately normal, + and ``method = 'approx'`` can be used to compute the p-value. + + - When ``len(d)`` is small, the normal approximation may not be accurate, + and ``method='exact'`` is preferred (at the cost of additional + execution time). + + - The default, ``method='auto'``, selects between the two: when + ``len(d) <= 50`` and there are no zeros, the exact method is used; + otherwise, the approximate method is used. + + The presence of "ties" (i.e. not all elements of ``d`` are unique) or + "zeros" (i.e. elements of ``d`` are zero) changes the null distribution + of the test statistic, and ``method='exact'`` no longer calculates + the exact p-value. If ``method='approx'``, the z-statistic is adjusted + for more accurate comparison against the standard normal, but still, + for finite sample sizes, the standard normal is only an approximation of + the true null distribution of the z-statistic. For such situations, the + `method` parameter also accepts instances `PermutationMethod`. In this + case, the p-value is computed using `permutation_test` with the provided + configuration options and other appropriate settings. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test + .. [2] Conover, W.J., Practical Nonparametric Statistics, 1971. + .. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed + Rank Procedures, Journal of the American Statistical Association, + Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526` + .. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods, + Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968` + .. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank + Sampling Distribution When Zero Differences are Present, + Journal of the American Statistical Association, Vol. 62, 1967, + pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917` + + Examples + -------- + In [4]_, the differences in height between cross- and self-fertilized + corn plants is given as follows: + + >>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75] + + Cross-fertilized plants appear to be higher. To test the null + hypothesis that there is no height difference, we can apply the + two-sided test: + + >>> from scipy.stats import wilcoxon + >>> res = wilcoxon(d) + >>> res.statistic, res.pvalue + (24.0, 0.041259765625) + + Hence, we would reject the null hypothesis at a confidence level of 5%, + concluding that there is a difference in height between the groups. + To confirm that the median of the differences can be assumed to be + positive, we use: + + >>> res = wilcoxon(d, alternative='greater') + >>> res.statistic, res.pvalue + (96.0, 0.0206298828125) + + This shows that the null hypothesis that the median is negative can be + rejected at a confidence level of 5% in favor of the alternative that + the median is greater than zero. The p-values above are exact. Using the + normal approximation gives very similar values: + + >>> res = wilcoxon(d, method='approx') + >>> res.statistic, res.pvalue + (24.0, 0.04088813291185591) + + Note that the statistic changed to 96 in the one-sided case (the sum + of ranks of positive differences) whereas it is 24 in the two-sided + case (the minimum of sum of ranks above and below zero). + + In the example above, the differences in height between paired plants are + provided to `wilcoxon` directly. Alternatively, `wilcoxon` accepts two + samples of equal length, calculates the differences between paired + elements, then performs the test. Consider the samples ``x`` and ``y``: + + >>> import numpy as np + >>> x = np.array([0.5, 0.825, 0.375, 0.5]) + >>> y = np.array([0.525, 0.775, 0.325, 0.55]) + >>> res = wilcoxon(x, y, alternative='greater') + >>> res + WilcoxonResult(statistic=5.0, pvalue=0.5625) + + Note that had we calculated the differences by hand, the test would have + produced different results: + + >>> d = [-0.025, 0.05, 0.05, -0.05] + >>> ref = wilcoxon(d, alternative='greater') + >>> ref + WilcoxonResult(statistic=6.0, pvalue=0.4375) + + The substantial difference is due to roundoff error in the results of + ``x-y``: + + >>> d - (x-y) + array([2.08166817e-17, 6.93889390e-17, 1.38777878e-17, 4.16333634e-17]) + + Even though we expected all the elements of ``(x-y)[1:]`` to have the same + magnitude ``0.05``, they have slightly different magnitudes in practice, + and therefore are assigned different ranks in the test. Before performing + the test, consider calculating ``d`` and adjusting it as necessary to + ensure that theoretically identically values are not numerically distinct. + For example: + + >>> d2 = np.around(x - y, decimals=3) + >>> wilcoxon(d2, alternative='greater') + WilcoxonResult(statistic=6.0, pvalue=0.4375) + + """ + return _wilcoxon._wilcoxon_nd(x, y, zero_method, correction, alternative, + method, axis) + + +MedianTestResult = _make_tuple_bunch( + 'MedianTestResult', + ['statistic', 'pvalue', 'median', 'table'], [] +) + + +def median_test(*samples, ties='below', correction=True, lambda_=1, + nan_policy='propagate'): + """Perform a Mood's median test. + + Test that two or more samples come from populations with the same median. + + Let ``n = len(samples)`` be the number of samples. The "grand median" of + all the data is computed, and a contingency table is formed by + classifying the values in each sample as being above or below the grand + median. The contingency table, along with `correction` and `lambda_`, + are passed to `scipy.stats.chi2_contingency` to compute the test statistic + and p-value. + + Parameters + ---------- + sample1, sample2, ... : array_like + The set of samples. There must be at least two samples. + Each sample must be a one-dimensional sequence containing at least + one value. The samples are not required to have the same length. + ties : str, optional + Determines how values equal to the grand median are classified in + the contingency table. The string must be one of:: + + "below": + Values equal to the grand median are counted as "below". + "above": + Values equal to the grand median are counted as "above". + "ignore": + Values equal to the grand median are not counted. + + The default is "below". + correction : bool, optional + If True, *and* there are just two samples, apply Yates' correction + for continuity when computing the test statistic associated with + the contingency table. Default is True. + lambda_ : float or str, optional + By default, the statistic computed in this test is Pearson's + chi-squared statistic. `lambda_` allows a statistic from the + Cressie-Read power divergence family to be used instead. See + `power_divergence` for details. + Default is 1 (Pearson's chi-squared statistic). + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' returns nan, + 'raise' throws an error, 'omit' performs the calculations ignoring nan + values. Default is 'propagate'. + + Returns + ------- + res : MedianTestResult + An object containing attributes: + + statistic : float + The test statistic. The statistic that is returned is determined + by `lambda_`. The default is Pearson's chi-squared statistic. + pvalue : float + The p-value of the test. + median : float + The grand median. + table : ndarray + The contingency table. The shape of the table is (2, n), where + n is the number of samples. The first row holds the counts of the + values above the grand median, and the second row holds the counts + of the values below the grand median. The table allows further + analysis with, for example, `scipy.stats.chi2_contingency`, or with + `scipy.stats.fisher_exact` if there are two samples, without having + to recompute the table. If ``nan_policy`` is "propagate" and there + are nans in the input, the return value for ``table`` is ``None``. + + See Also + -------- + kruskal : Compute the Kruskal-Wallis H-test for independent samples. + mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. + + Notes + ----- + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill + (1950), pp. 394-399. + .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). + See Sections 8.12 and 10.15. + + Examples + -------- + A biologist runs an experiment in which there are three groups of plants. + Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. + Each plant produces a number of seeds. The seed counts for each group + are:: + + Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 + Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 + Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 + + The following code applies Mood's median test to these samples. + + >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] + >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] + >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] + >>> from scipy.stats import median_test + >>> res = median_test(g1, g2, g3) + + The median is + + >>> res.median + 34.0 + + and the contingency table is + + >>> res.table + array([[ 5, 10, 7], + [11, 5, 10]]) + + `p` is too large to conclude that the medians are not the same: + + >>> res.pvalue + 0.12609082774093244 + + The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to + `median_test`. + + >>> res = median_test(g1, g2, g3, lambda_="log-likelihood") + >>> res.pvalue + 0.12224779737117837 + + The median occurs several times in the data, so we'll get a different + result if, for example, ``ties="above"`` is used: + + >>> res = median_test(g1, g2, g3, ties="above") + >>> res.pvalue + 0.063873276069553273 + + >>> res.table + array([[ 5, 11, 9], + [11, 4, 8]]) + + This example demonstrates that if the data set is not large and there + are values equal to the median, the p-value can be sensitive to the + choice of `ties`. + + """ + if len(samples) < 2: + raise ValueError('median_test requires two or more samples.') + + ties_options = ['below', 'above', 'ignore'] + if ties not in ties_options: + raise ValueError(f"invalid 'ties' option '{ties}'; 'ties' must be one " + f"of: {str(ties_options)[1:-1]}") + + data = [np.asarray(sample) for sample in samples] + + # Validate the sizes and shapes of the arguments. + for k, d in enumerate(data): + if d.size == 0: + raise ValueError("Sample %d is empty. All samples must " + "contain at least one value." % (k + 1)) + if d.ndim != 1: + raise ValueError("Sample %d has %d dimensions. All " + "samples must be one-dimensional sequences." % + (k + 1, d.ndim)) + + cdata = np.concatenate(data) + contains_nan, nan_policy = _contains_nan(cdata, nan_policy) + if contains_nan and nan_policy == 'propagate': + return MedianTestResult(np.nan, np.nan, np.nan, None) + + if contains_nan: + grand_median = np.median(cdata[~np.isnan(cdata)]) + else: + grand_median = np.median(cdata) + # When the minimum version of numpy supported by scipy is 1.9.0, + # the above if/else statement can be replaced by the single line: + # grand_median = np.nanmedian(cdata) + + # Create the contingency table. + table = np.zeros((2, len(data)), dtype=np.int64) + for k, sample in enumerate(data): + sample = sample[~np.isnan(sample)] + + nabove = count_nonzero(sample > grand_median) + nbelow = count_nonzero(sample < grand_median) + nequal = sample.size - (nabove + nbelow) + table[0, k] += nabove + table[1, k] += nbelow + if ties == "below": + table[1, k] += nequal + elif ties == "above": + table[0, k] += nequal + + # Check that no row or column of the table is all zero. + # Such a table can not be given to chi2_contingency, because it would have + # a zero in the table of expected frequencies. + rowsums = table.sum(axis=1) + if rowsums[0] == 0: + raise ValueError("All values are below the grand median (%r)." % + grand_median) + if rowsums[1] == 0: + raise ValueError("All values are above the grand median (%r)." % + grand_median) + if ties == "ignore": + # We already checked that each sample has at least one value, but it + # is possible that all those values equal the grand median. If `ties` + # is "ignore", that would result in a column of zeros in `table`. We + # check for that case here. + zero_cols = np.nonzero((table == 0).all(axis=0))[0] + if len(zero_cols) > 0: + msg = ("All values in sample %d are equal to the grand " + "median (%r), so they are ignored, resulting in an " + "empty sample." % (zero_cols[0] + 1, grand_median)) + raise ValueError(msg) + + stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, + correction=correction) + return MedianTestResult(stat, p, grand_median, table) + + +def _circfuncs_common(samples, high, low): + # Ensure samples are array-like and size is not zero + if samples.size == 0: + NaN = _get_nan(samples) + return NaN, NaN, NaN + + # Recast samples as radians that range between 0 and 2 pi and calculate + # the sine and cosine + sin_samp = sin((samples - low)*2.*pi / (high - low)) + cos_samp = cos((samples - low)*2.*pi / (high - low)) + + return samples, sin_samp, cos_samp + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): + """Compute the circular mean for samples in a range. + + Parameters + ---------- + samples : array_like + Input array. + high : float or int, optional + High boundary for the sample range. Default is ``2*pi``. + low : float or int, optional + Low boundary for the sample range. Default is 0. + + Returns + ------- + circmean : float + Circular mean. + + See Also + -------- + circstd : Circular standard deviation. + circvar : Circular variance. + + Examples + -------- + For simplicity, all angles are printed out in degrees. + + >>> import numpy as np + >>> from scipy.stats import circmean + >>> import matplotlib.pyplot as plt + >>> angles = np.deg2rad(np.array([20, 30, 330])) + >>> circmean = circmean(angles) + >>> np.rad2deg(circmean) + 7.294976657784009 + + >>> mean = angles.mean() + >>> np.rad2deg(mean) + 126.66666666666666 + + Plot and compare the circular mean against the arithmetic mean. + + >>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + >>> plt.scatter(np.cos(angles), np.sin(angles), c='k') + >>> plt.scatter(np.cos(circmean), np.sin(circmean), c='b', + ... label='circmean') + >>> plt.scatter(np.cos(mean), np.sin(mean), c='r', label='mean') + >>> plt.legend() + >>> plt.axis('equal') + >>> plt.show() + + """ + samples, sin_samp, cos_samp = _circfuncs_common(samples, high, low) + sin_sum = sin_samp.sum(axis) + cos_sum = cos_samp.sum(axis) + res = arctan2(sin_sum, cos_sum) + + res = np.asarray(res) + res[res < 0] += 2*pi + res = res[()] + + return res*(high - low)/2.0/pi + low + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): + """Compute the circular variance for samples assumed to be in a range. + + Parameters + ---------- + samples : array_like + Input array. + high : float or int, optional + High boundary for the sample range. Default is ``2*pi``. + low : float or int, optional + Low boundary for the sample range. Default is 0. + + Returns + ------- + circvar : float + Circular variance. + + See Also + -------- + circmean : Circular mean. + circstd : Circular standard deviation. + + Notes + ----- + This uses the following definition of circular variance: ``1-R``, where + ``R`` is the mean resultant vector. The + returned value is in the range [0, 1], 0 standing for no variance, and 1 + for a large variance. In the limit of small angles, this value is similar + to half the 'linear' variance. + + References + ---------- + .. [1] Fisher, N.I. *Statistical analysis of circular data*. Cambridge + University Press, 1993. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import circvar + >>> import matplotlib.pyplot as plt + >>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286, + ... 0.133, -0.473, -0.001, -0.348, 0.131]) + >>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421, + ... 0.104, -0.136, -0.867, 0.012, 0.105]) + >>> circvar_1 = circvar(samples_1) + >>> circvar_2 = circvar(samples_2) + + Plot the samples. + + >>> fig, (left, right) = plt.subplots(ncols=2) + >>> for image in (left, right): + ... image.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + ... image.axis('equal') + ... image.axis('off') + >>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15) + >>> left.set_title(f"circular variance: {np.round(circvar_1, 2)!r}") + >>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15) + >>> right.set_title(f"circular variance: {np.round(circvar_2, 2)!r}") + >>> plt.show() + + """ + samples, sin_samp, cos_samp = _circfuncs_common(samples, high, low) + sin_mean = sin_samp.mean(axis) + cos_mean = cos_samp.mean(axis) + # hypot can go slightly above 1 due to rounding errors + with np.errstate(invalid='ignore'): + R = np.minimum(1, hypot(sin_mean, cos_mean)) + + res = 1. - R + return res + + +@_axis_nan_policy_factory( + lambda x: x, n_outputs=1, default_axis=None, + result_to_tuple=lambda x: (x,) +) +def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *, + normalize=False): + """ + Compute the circular standard deviation for samples assumed to be in the + range [low to high]. + + Parameters + ---------- + samples : array_like + Input array. + high : float or int, optional + High boundary for the sample range. Default is ``2*pi``. + low : float or int, optional + Low boundary for the sample range. Default is 0. + normalize : boolean, optional + If True, the returned value is equal to ``sqrt(-2*log(R))`` and does + not depend on the variable units. If False (default), the returned + value is scaled by ``((high-low)/(2*pi))``. + + Returns + ------- + circstd : float + Circular standard deviation. + + See Also + -------- + circmean : Circular mean. + circvar : Circular variance. + + Notes + ----- + This uses a definition of circular standard deviation from [1]_. + Essentially, the calculation is as follows. + + .. code-block:: python + + import numpy as np + C = np.cos(samples).mean() + S = np.sin(samples).mean() + R = np.sqrt(C**2 + S**2) + l = 2*np.pi / (high-low) + circstd = np.sqrt(-2*np.log(R)) / l + + In the limit of small angles, it returns a number close to the 'linear' + standard deviation. + + References + ---------- + .. [1] Mardia, K. V. (1972). 2. In *Statistics of Directional Data* + (pp. 18-24). Academic Press. :doi:`10.1016/C2013-0-07425-7`. + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import circstd + >>> import matplotlib.pyplot as plt + >>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286, + ... 0.133, -0.473, -0.001, -0.348, 0.131]) + >>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421, + ... 0.104, -0.136, -0.867, 0.012, 0.105]) + >>> circstd_1 = circstd(samples_1) + >>> circstd_2 = circstd(samples_2) + + Plot the samples. + + >>> fig, (left, right) = plt.subplots(ncols=2) + >>> for image in (left, right): + ... image.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + ... image.axis('equal') + ... image.axis('off') + >>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15) + >>> left.set_title(f"circular std: {np.round(circstd_1, 2)!r}") + >>> right.plot(np.cos(np.linspace(0, 2*np.pi, 500)), + ... np.sin(np.linspace(0, 2*np.pi, 500)), + ... c='k') + >>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15) + >>> right.set_title(f"circular std: {np.round(circstd_2, 2)!r}") + >>> plt.show() + + """ + samples, sin_samp, cos_samp = _circfuncs_common(samples, high, low) + sin_mean = sin_samp.mean(axis) # [1] (2.2.3) + cos_mean = cos_samp.mean(axis) # [1] (2.2.3) + # hypot can go slightly above 1 due to rounding errors + with np.errstate(invalid='ignore'): + R = np.minimum(1, hypot(sin_mean, cos_mean)) # [1] (2.2.4) + + res = sqrt(-2*log(R)) + if not normalize: + res *= (high-low)/(2.*pi) # [1] (2.3.14) w/ (2.3.7) + return res + + +class DirectionalStats: + def __init__(self, mean_direction, mean_resultant_length): + self.mean_direction = mean_direction + self.mean_resultant_length = mean_resultant_length + + def __repr__(self): + return (f"DirectionalStats(mean_direction={self.mean_direction}," + f" mean_resultant_length={self.mean_resultant_length})") + + +def directional_stats(samples, *, axis=0, normalize=True): + """ + Computes sample statistics for directional data. + + Computes the directional mean (also called the mean direction vector) and + mean resultant length of a sample of vectors. + + The directional mean is a measure of "preferred direction" of vector data. + It is analogous to the sample mean, but it is for use when the length of + the data is irrelevant (e.g. unit vectors). + + The mean resultant length is a value between 0 and 1 used to quantify the + dispersion of directional data: the smaller the mean resultant length, the + greater the dispersion. Several definitions of directional variance + involving the mean resultant length are given in [1]_ and [2]_. + + Parameters + ---------- + samples : array_like + Input array. Must be at least two-dimensional, and the last axis of the + input must correspond with the dimensionality of the vector space. + When the input is exactly two dimensional, this means that each row + of the data is a vector observation. + axis : int, default: 0 + Axis along which the directional mean is computed. + normalize: boolean, default: True + If True, normalize the input to ensure that each observation is a + unit vector. It the observations are already unit vectors, consider + setting this to False to avoid unnecessary computation. + + Returns + ------- + res : DirectionalStats + An object containing attributes: + + mean_direction : ndarray + Directional mean. + mean_resultant_length : ndarray + The mean resultant length [1]_. + + See Also + -------- + circmean: circular mean; i.e. directional mean for 2D *angles* + circvar: circular variance; i.e. directional variance for 2D *angles* + + Notes + ----- + This uses a definition of directional mean from [1]_. + Assuming the observations are unit vectors, the calculation is as follows. + + .. code-block:: python + + mean = samples.mean(axis=0) + mean_resultant_length = np.linalg.norm(mean) + mean_direction = mean / mean_resultant_length + + This definition is appropriate for *directional* data (i.e. vector data + for which the magnitude of each observation is irrelevant) but not + for *axial* data (i.e. vector data for which the magnitude and *sign* of + each observation is irrelevant). + + Several definitions of directional variance involving the mean resultant + length ``R`` have been proposed, including ``1 - R`` [1]_, ``1 - R**2`` + [2]_, and ``2 * (1 - R)`` [2]_. Rather than choosing one, this function + returns ``R`` as attribute `mean_resultant_length` so the user can compute + their preferred measure of dispersion. + + References + ---------- + .. [1] Mardia, Jupp. (2000). *Directional Statistics* + (p. 163). Wiley. + + .. [2] https://en.wikipedia.org/wiki/Directional_statistics + + Examples + -------- + >>> import numpy as np + >>> from scipy.stats import directional_stats + >>> data = np.array([[3, 4], # first observation, 2D vector space + ... [6, -8]]) # second observation + >>> dirstats = directional_stats(data) + >>> dirstats.mean_direction + array([1., 0.]) + + In contrast, the regular sample mean of the vectors would be influenced + by the magnitude of each observation. Furthermore, the result would not be + a unit vector. + + >>> data.mean(axis=0) + array([4.5, -2.]) + + An exemplary use case for `directional_stats` is to find a *meaningful* + center for a set of observations on a sphere, e.g. geographical locations. + + >>> data = np.array([[0.8660254, 0.5, 0.], + ... [0.8660254, -0.5, 0.]]) + >>> dirstats = directional_stats(data) + >>> dirstats.mean_direction + array([1., 0., 0.]) + + The regular sample mean on the other hand yields a result which does not + lie on the surface of the sphere. + + >>> data.mean(axis=0) + array([0.8660254, 0., 0.]) + + The function also returns the mean resultant length, which + can be used to calculate a directional variance. For example, using the + definition ``Var(z) = 1 - R`` from [2]_ where ``R`` is the + mean resultant length, we can calculate the directional variance of the + vectors in the above example as: + + >>> 1 - dirstats.mean_resultant_length + 0.13397459716167093 + """ + samples = np.asarray(samples) + if samples.ndim < 2: + raise ValueError("samples must at least be two-dimensional. " + f"Instead samples has shape: {samples.shape!r}") + samples = np.moveaxis(samples, axis, 0) + if normalize: + vectornorms = np.linalg.norm(samples, axis=-1, keepdims=True) + samples = samples/vectornorms + mean = np.mean(samples, axis=0) + mean_resultant_length = np.linalg.norm(mean, axis=-1, keepdims=True) + mean_direction = mean / mean_resultant_length + return DirectionalStats(mean_direction, + mean_resultant_length.squeeze(-1)[()]) + + +def false_discovery_control(ps, *, axis=0, method='bh'): + """Adjust p-values to control the false discovery rate. + + The false discovery rate (FDR) is the expected proportion of rejected null + hypotheses that are actually true. + If the null hypothesis is rejected when the *adjusted* p-value falls below + a specified level, the false discovery rate is controlled at that level. + + Parameters + ---------- + ps : 1D array_like + The p-values to adjust. Elements must be real numbers between 0 and 1. + axis : int + The axis along which to perform the adjustment. The adjustment is + performed independently along each axis-slice. If `axis` is None, `ps` + is raveled before performing the adjustment. + method : {'bh', 'by'} + The false discovery rate control procedure to apply: ``'bh'`` is for + Benjamini-Hochberg [1]_ (Eq. 1), ``'by'`` is for Benjaminini-Yekutieli + [2]_ (Theorem 1.3). The latter is more conservative, but it is + guaranteed to control the FDR even when the p-values are not from + independent tests. + + Returns + ------- + ps_adusted : array_like + The adjusted p-values. If the null hypothesis is rejected where these + fall below a specified level, the false discovery rate is controlled + at that level. + + See Also + -------- + combine_pvalues + statsmodels.stats.multitest.multipletests + + Notes + ----- + In multiple hypothesis testing, false discovery control procedures tend to + offer higher power than familywise error rate control procedures (e.g. + Bonferroni correction [1]_). + + If the p-values correspond with independent tests (or tests with + "positive regression dependencies" [2]_), rejecting null hypotheses + corresponding with Benjamini-Hochberg-adjusted p-values below :math:`q` + controls the false discovery rate at a level less than or equal to + :math:`q m_0 / m`, where :math:`m_0` is the number of true null hypotheses + and :math:`m` is the total number of null hypotheses tested. The same is + true even for dependent tests when the p-values are adjusted accorded to + the more conservative Benjaminini-Yekutieli procedure. + + The adjusted p-values produced by this function are comparable to those + produced by the R function ``p.adjust`` and the statsmodels function + `statsmodels.stats.multitest.multipletests`. Please consider the latter + for more advanced methods of multiple comparison correction. + + References + ---------- + .. [1] Benjamini, Yoav, and Yosef Hochberg. "Controlling the false + discovery rate: a practical and powerful approach to multiple + testing." Journal of the Royal statistical society: series B + (Methodological) 57.1 (1995): 289-300. + + .. [2] Benjamini, Yoav, and Daniel Yekutieli. "The control of the false + discovery rate in multiple testing under dependency." Annals of + statistics (2001): 1165-1188. + + .. [3] TileStats. FDR - Benjamini-Hochberg explained - Youtube. + https://www.youtube.com/watch?v=rZKa4tW2NKs. + + .. [4] Neuhaus, Karl-Ludwig, et al. "Improved thrombolysis in acute + myocardial infarction with front-loaded administration of alteplase: + results of the rt-PA-APSAC patency study (TAPS)." Journal of the + American College of Cardiology 19.5 (1992): 885-891. + + Examples + -------- + We follow the example from [1]_. + + Thrombolysis with recombinant tissue-type plasminogen activator (rt-PA) + and anisoylated plasminogen streptokinase activator (APSAC) in + myocardial infarction has been proved to reduce mortality. [4]_ + investigated the effects of a new front-loaded administration of rt-PA + versus those obtained with a standard regimen of APSAC, in a randomized + multicentre trial in 421 patients with acute myocardial infarction. + + There were four families of hypotheses tested in the study, the last of + which was "cardiac and other events after the start of thrombolitic + treatment". FDR control may be desired in this family of hypotheses + because it would not be appropriate to conclude that the front-loaded + treatment is better if it is merely equivalent to the previous treatment. + + The p-values corresponding with the 15 hypotheses in this family were + + >>> ps = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, + ... 0.0459, 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000] + + If the chosen significance level is 0.05, we may be tempted to reject the + null hypotheses for the tests corresponding with the first nine p-values, + as the first nine p-values fall below the chosen significance level. + However, this would ignore the problem of "multiplicity": if we fail to + correct for the fact that multiple comparisons are being performed, we + are more likely to incorrectly reject true null hypotheses. + + One approach to the multiplicity problem is to control the family-wise + error rate (FWER), that is, the rate at which the null hypothesis is + rejected when it is actually true. A common procedure of this kind is the + Bonferroni correction [1]_. We begin by multiplying the p-values by the + number of hypotheses tested. + + >>> import numpy as np + >>> np.array(ps) * len(ps) + array([1.5000e-03, 6.0000e-03, 2.8500e-02, 1.4250e-01, 3.0150e-01, + 4.1700e-01, 4.4700e-01, 5.1600e-01, 6.8850e-01, 4.8600e+00, + 6.3930e+00, 8.5785e+00, 9.7920e+00, 1.1385e+01, 1.5000e+01]) + + To control the FWER at 5%, we reject only the hypotheses corresponding + with adjusted p-values less than 0.05. In this case, only the hypotheses + corresponding with the first three p-values can be rejected. According to + [1]_, these three hypotheses concerned "allergic reaction" and "two + different aspects of bleeding." + + An alternative approach is to control the false discovery rate: the + expected fraction of rejected null hypotheses that are actually true. The + advantage of this approach is that it typically affords greater power: an + increased rate of rejecting the null hypothesis when it is indeed false. To + control the false discovery rate at 5%, we apply the Benjamini-Hochberg + p-value adjustment. + + >>> from scipy import stats + >>> stats.false_discovery_control(ps) + array([0.0015 , 0.003 , 0.0095 , 0.035625 , 0.0603 , + 0.06385714, 0.06385714, 0.0645 , 0.0765 , 0.486 , + 0.58118182, 0.714875 , 0.75323077, 0.81321429, 1. ]) + + Now, the first *four* adjusted p-values fall below 0.05, so we would reject + the null hypotheses corresponding with these *four* p-values. Rejection + of the fourth null hypothesis was particularly important to the original + study as it led to the conclusion that the new treatment had a + "substantially lower in-hospital mortality rate." + + """ + # Input Validation and Special Cases + ps = np.asarray(ps) + + ps_in_range = (np.issubdtype(ps.dtype, np.number) + and np.all(ps == np.clip(ps, 0, 1))) + if not ps_in_range: + raise ValueError("`ps` must include only numbers between 0 and 1.") + + methods = {'bh', 'by'} + if method.lower() not in methods: + raise ValueError(f"Unrecognized `method` '{method}'." + f"Method must be one of {methods}.") + method = method.lower() + + if axis is None: + axis = 0 + ps = ps.ravel() + + axis = np.asarray(axis)[()] + if not np.issubdtype(axis.dtype, np.integer) or axis.size != 1: + raise ValueError("`axis` must be an integer or `None`") + + if ps.size <= 1 or ps.shape[axis] <= 1: + return ps[()] + + ps = np.moveaxis(ps, axis, -1) + m = ps.shape[-1] + + # Main Algorithm + # Equivalent to the ideas of [1] and [2], except that this adjusts the + # p-values as described in [3]. The results are similar to those produced + # by R's p.adjust. + + # "Let [ps] be the ordered observed p-values..." + order = np.argsort(ps, axis=-1) + ps = np.take_along_axis(ps, order, axis=-1) # this copies ps + + # Equation 1 of [1] rearranged to reject when p is less than specified q + i = np.arange(1, m+1) + ps *= m / i + + # Theorem 1.3 of [2] + if method == 'by': + ps *= np.sum(1 / i) + + # accounts for rejecting all null hypotheses i for i < k, where k is + # defined in Eq. 1 of either [1] or [2]. See [3]. Starting with the index j + # of the second to last element, we replace element j with element j+1 if + # the latter is smaller. + np.minimum.accumulate(ps[..., ::-1], out=ps[..., ::-1], axis=-1) + + # Restore original order of axes and data + np.put_along_axis(ps, order, values=ps.copy(), axis=-1) + ps = np.moveaxis(ps, -1, axis) + + return np.clip(ps, 0, 1) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_multicomp.py b/venv/lib/python3.10/site-packages/scipy/stats/_multicomp.py new file mode 100644 index 0000000000000000000000000000000000000000..c12ce65a91dbb0a6fed48e06127f8902ca71b9bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_multicomp.py @@ -0,0 +1,459 @@ +from __future__ import annotations + +import warnings +from dataclasses import dataclass, field +from typing import TYPE_CHECKING + +import numpy as np + +from scipy import stats +from scipy.optimize import minimize_scalar +from scipy.stats._common import ConfidenceInterval +from scipy.stats._qmc import check_random_state +from scipy.stats._stats_py import _var + +if TYPE_CHECKING: + import numpy.typing as npt + from scipy._lib._util import DecimalNumber, SeedType + from typing import Literal, Sequence # noqa: UP035 + + +__all__ = [ + 'dunnett' +] + + +@dataclass +class DunnettResult: + """Result object returned by `scipy.stats.dunnett`. + + Attributes + ---------- + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``i`` is the statistic for the comparison between + groups ``i`` and the control. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``i`` is the p-value for the comparison between + group ``i`` and the control. + """ + statistic: np.ndarray + pvalue: np.ndarray + _alternative: Literal['two-sided', 'less', 'greater'] = field(repr=False) + _rho: np.ndarray = field(repr=False) + _df: int = field(repr=False) + _std: float = field(repr=False) + _mean_samples: np.ndarray = field(repr=False) + _mean_control: np.ndarray = field(repr=False) + _n_samples: np.ndarray = field(repr=False) + _n_control: int = field(repr=False) + _rng: SeedType = field(repr=False) + _ci: ConfidenceInterval | None = field(default=None, repr=False) + _ci_cl: DecimalNumber | None = field(default=None, repr=False) + + def __str__(self): + # Note: `__str__` prints the confidence intervals from the most + # recent call to `confidence_interval`. If it has not been called, + # it will be called with the default CL of .95. + if self._ci is None: + self.confidence_interval(confidence_level=.95) + s = ( + "Dunnett's test" + f" ({self._ci_cl*100:.1f}% Confidence Interval)\n" + "Comparison Statistic p-value Lower CI Upper CI\n" + ) + for i in range(self.pvalue.size): + s += (f" (Sample {i} - Control) {self.statistic[i]:>10.3f}" + f"{self.pvalue[i]:>10.3f}" + f"{self._ci.low[i]:>10.3f}" + f"{self._ci.high[i]:>10.3f}\n") + + return s + + def _allowance( + self, confidence_level: DecimalNumber = 0.95, tol: DecimalNumber = 1e-3 + ) -> float: + """Allowance. + + It is the quantity to add/subtract from the observed difference + between the means of observed groups and the mean of the control + group. The result gives confidence limits. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval. + Default is .95. + tol : float, optional + A tolerance for numerical optimization: the allowance will produce + a confidence within ``10*tol*(1 - confidence_level)`` of the + specified level, or a warning will be emitted. Tight tolerances + may be impractical due to noisy evaluation of the objective. + Default is 1e-3. + + Returns + ------- + allowance : float + Allowance around the mean. + """ + alpha = 1 - confidence_level + + def pvalue_from_stat(statistic): + statistic = np.array(statistic) + sf = _pvalue_dunnett( + rho=self._rho, df=self._df, + statistic=statistic, alternative=self._alternative, + rng=self._rng + ) + return abs(sf - alpha)/alpha + + # Evaluation of `pvalue_from_stat` is noisy due to the use of RQMC to + # evaluate `multivariate_t.cdf`. `minimize_scalar` is not designed + # to tolerate a noisy objective function and may fail to find the + # minimum accurately. We mitigate this possibility with the validation + # step below, but implementation of a noise-tolerant root finder or + # minimizer would be a welcome enhancement. See gh-18150. + res = minimize_scalar(pvalue_from_stat, method='brent', tol=tol) + critical_value = res.x + + # validation + # tol*10 because tol=1e-3 means we tolerate a 1% change at most + if res.success is False or res.fun >= tol*10: + warnings.warn( + "Computation of the confidence interval did not converge to " + "the desired level. The confidence level corresponding with " + f"the returned interval is approximately {alpha*(1+res.fun)}.", + stacklevel=3 + ) + + # From [1] p. 1101 between (1) and (3) + allowance = critical_value*self._std*np.sqrt( + 1/self._n_samples + 1/self._n_control + ) + return abs(allowance) + + def confidence_interval( + self, confidence_level: DecimalNumber = 0.95 + ) -> ConfidenceInterval: + """Compute the confidence interval for the specified confidence level. + + Parameters + ---------- + confidence_level : float, optional + Confidence level for the computed confidence interval. + Default is .95. + + Returns + ------- + ci : ``ConfidenceInterval`` object + The object has attributes ``low`` and ``high`` that hold the + lower and upper bounds of the confidence intervals for each + comparison. The high and low values are accessible for each + comparison at index ``i`` for each group ``i``. + + """ + # check to see if the supplied confidence level matches that of the + # previously computed CI. + if (self._ci is not None) and (confidence_level == self._ci_cl): + return self._ci + + if not (0 < confidence_level < 1): + raise ValueError("Confidence level must be between 0 and 1.") + + allowance = self._allowance(confidence_level=confidence_level) + diff_means = self._mean_samples - self._mean_control + + low = diff_means-allowance + high = diff_means+allowance + + if self._alternative == 'greater': + high = [np.inf] * len(diff_means) + elif self._alternative == 'less': + low = [-np.inf] * len(diff_means) + + self._ci_cl = confidence_level + self._ci = ConfidenceInterval( + low=low, + high=high + ) + return self._ci + + +def dunnett( + *samples: npt.ArrayLike, # noqa: D417 + control: npt.ArrayLike, + alternative: Literal['two-sided', 'less', 'greater'] = "two-sided", + random_state: SeedType = None +) -> DunnettResult: + """Dunnett's test: multiple comparisons of means against a control group. + + This is an implementation of Dunnett's original, single-step test as + described in [1]_. + + Parameters + ---------- + sample1, sample2, ... : 1D array_like + The sample measurements for each experimental group. + control : 1D array_like + The sample measurements for the control group. + alternative : {'two-sided', 'less', 'greater'}, optional + Defines the alternative hypothesis. + + The null hypothesis is that the means of the distributions underlying + the samples and control are equal. The following alternative + hypotheses are available (default is 'two-sided'): + + * 'two-sided': the means of the distributions underlying the samples + and control are unequal. + * 'less': the means of the distributions underlying the samples + are less than the mean of the distribution underlying the control. + * 'greater': the means of the distributions underlying the + samples are greater than the mean of the distribution underlying + the control. + random_state : {None, int, `numpy.random.Generator`}, optional + If `random_state` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(random_state)``. + If `random_state` is already a ``Generator`` instance, then the + provided instance is used. + + The random number generator is used to control the randomized + Quasi-Monte Carlo integration of the multivariate-t distribution. + + Returns + ------- + res : `~scipy.stats._result_classes.DunnettResult` + An object containing attributes: + + statistic : float ndarray + The computed statistic of the test for each comparison. The element + at index ``i`` is the statistic for the comparison between + groups ``i`` and the control. + pvalue : float ndarray + The computed p-value of the test for each comparison. The element + at index ``i`` is the p-value for the comparison between + group ``i`` and the control. + + And the following method: + + confidence_interval(confidence_level=0.95) : + Compute the difference in means of the groups + with the control +- the allowance. + + See Also + -------- + tukey_hsd : performs pairwise comparison of means. + + Notes + ----- + Like the independent-sample t-test, Dunnett's test [1]_ is used to make + inferences about the means of distributions from which samples were drawn. + However, when multiple t-tests are performed at a fixed significance level, + the "family-wise error rate" - the probability of incorrectly rejecting the + null hypothesis in at least one test - will exceed the significance level. + Dunnett's test is designed to perform multiple comparisons while + controlling the family-wise error rate. + + Dunnett's test compares the means of multiple experimental groups + against a single control group. Tukey's Honestly Significant Difference Test + is another multiple-comparison test that controls the family-wise error + rate, but `tukey_hsd` performs *all* pairwise comparisons between groups. + When pairwise comparisons between experimental groups are not needed, + Dunnett's test is preferable due to its higher power. + + + The use of this test relies on several assumptions. + + 1. The observations are independent within and among groups. + 2. The observations within each group are normally distributed. + 3. The distributions from which the samples are drawn have the same finite + variance. + + References + ---------- + .. [1] Charles W. Dunnett. "A Multiple Comparison Procedure for Comparing + Several Treatments with a Control." + Journal of the American Statistical Association, 50:272, 1096-1121, + :doi:`10.1080/01621459.1955.10501294`, 1955. + + Examples + -------- + In [1]_, the influence of drugs on blood count measurements on three groups + of animal is investigated. + + The following table summarizes the results of the experiment in which + two groups received different drugs, and one group acted as a control. + Blood counts (in millions of cells per cubic millimeter) were recorded:: + + >>> import numpy as np + >>> control = np.array([7.40, 8.50, 7.20, 8.24, 9.84, 8.32]) + >>> drug_a = np.array([9.76, 8.80, 7.68, 9.36]) + >>> drug_b = np.array([12.80, 9.68, 12.16, 9.20, 10.55]) + + We would like to see if the means between any of the groups are + significantly different. First, visually examine a box and whisker plot. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 1) + >>> ax.boxplot([control, drug_a, drug_b]) + >>> ax.set_xticklabels(["Control", "Drug A", "Drug B"]) # doctest: +SKIP + >>> ax.set_ylabel("mean") # doctest: +SKIP + >>> plt.show() + + Note the overlapping interquartile ranges of the drug A group and control + group and the apparent separation between the drug B group and control + group. + + Next, we will use Dunnett's test to assess whether the difference + between group means is significant while controlling the family-wise error + rate: the probability of making any false discoveries. + Let the null hypothesis be that the experimental groups have the same + mean as the control and the alternative be that an experimental group does + not have the same mean as the control. We will consider a 5% family-wise + error rate to be acceptable, and therefore we choose 0.05 as the threshold + for significance. + + >>> from scipy.stats import dunnett + >>> res = dunnett(drug_a, drug_b, control=control) + >>> res.pvalue + array([0.62004941, 0.0059035 ]) # may vary + + The p-value corresponding with the comparison between group A and control + exceeds 0.05, so we do not reject the null hypothesis for that comparison. + However, the p-value corresponding with the comparison between group B + and control is less than 0.05, so we consider the experimental results + to be evidence against the null hypothesis in favor of the alternative: + group B has a different mean than the control group. + + """ + samples_, control_, rng = _iv_dunnett( + samples=samples, control=control, + alternative=alternative, random_state=random_state + ) + + rho, df, n_group, n_samples, n_control = _params_dunnett( + samples=samples_, control=control_ + ) + + statistic, std, mean_control, mean_samples = _statistic_dunnett( + samples_, control_, df, n_samples, n_control + ) + + pvalue = _pvalue_dunnett( + rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng + ) + + return DunnettResult( + statistic=statistic, pvalue=pvalue, + _alternative=alternative, + _rho=rho, _df=df, _std=std, + _mean_samples=mean_samples, + _mean_control=mean_control, + _n_samples=n_samples, + _n_control=n_control, + _rng=rng + ) + + +def _iv_dunnett( + samples: Sequence[npt.ArrayLike], + control: npt.ArrayLike, + alternative: Literal['two-sided', 'less', 'greater'], + random_state: SeedType +) -> tuple[list[np.ndarray], np.ndarray, SeedType]: + """Input validation for Dunnett's test.""" + rng = check_random_state(random_state) + + if alternative not in {'two-sided', 'less', 'greater'}: + raise ValueError( + "alternative must be 'less', 'greater' or 'two-sided'" + ) + + ndim_msg = "Control and samples groups must be 1D arrays" + n_obs_msg = "Control and samples groups must have at least 1 observation" + + control = np.asarray(control) + samples_ = [np.asarray(sample) for sample in samples] + + # samples checks + samples_control: list[np.ndarray] = samples_ + [control] + for sample in samples_control: + if sample.ndim > 1: + raise ValueError(ndim_msg) + + if sample.size < 1: + raise ValueError(n_obs_msg) + + return samples_, control, rng + + +def _params_dunnett( + samples: list[np.ndarray], control: np.ndarray +) -> tuple[np.ndarray, int, int, np.ndarray, int]: + """Specific parameters for Dunnett's test. + + Degree of freedom is the number of observations minus the number of groups + including the control. + """ + n_samples = np.array([sample.size for sample in samples]) + + # From [1] p. 1100 d.f. = (sum N)-(p+1) + n_sample = n_samples.sum() + n_control = control.size + n = n_sample + n_control + n_groups = len(samples) + df = n - n_groups - 1 + + # From [1] p. 1103 rho_ij = 1/sqrt((N0/Ni+1)(N0/Nj+1)) + rho = n_control/n_samples + 1 + rho = 1/np.sqrt(rho[:, None] * rho[None, :]) + np.fill_diagonal(rho, 1) + + return rho, df, n_groups, n_samples, n_control + + +def _statistic_dunnett( + samples: list[np.ndarray], control: np.ndarray, df: int, + n_samples: np.ndarray, n_control: int +) -> tuple[np.ndarray, float, np.ndarray, np.ndarray]: + """Statistic of Dunnett's test. + + Computation based on the original single-step test from [1]. + """ + mean_control = np.mean(control) + mean_samples = np.array([np.mean(sample) for sample in samples]) + all_samples = [control] + samples + all_means = np.concatenate([[mean_control], mean_samples]) + + # Variance estimate s^2 from [1] Eq. 1 + s2 = np.sum([_var(sample, mean=mean)*sample.size + for sample, mean in zip(all_samples, all_means)]) / df + std = np.sqrt(s2) + + # z score inferred from [1] unlabeled equation after Eq. 1 + z = (mean_samples - mean_control) / np.sqrt(1/n_samples + 1/n_control) + + return z / std, std, mean_control, mean_samples + + +def _pvalue_dunnett( + rho: np.ndarray, df: int, statistic: np.ndarray, + alternative: Literal['two-sided', 'less', 'greater'], + rng: SeedType = None +) -> np.ndarray: + """pvalue from the multivariate t-distribution. + + Critical values come from the multivariate student-t distribution. + """ + statistic = statistic.reshape(-1, 1) + + mvt = stats.multivariate_t(shape=rho, df=df, seed=rng) + if alternative == "two-sided": + statistic = abs(statistic) + pvalue = 1 - mvt.cdf(statistic, lower_limit=-statistic) + elif alternative == "greater": + pvalue = 1 - mvt.cdf(statistic, lower_limit=-np.inf) + else: + pvalue = 1 - mvt.cdf(np.inf, lower_limit=statistic) + + return np.atleast_1d(pvalue) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py b/venv/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd588c4972b39e720a4359099ff8157460f5497 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_odds_ratio.py @@ -0,0 +1,482 @@ +import numpy as np + +from scipy.special import ndtri +from scipy.optimize import brentq +from ._discrete_distns import nchypergeom_fisher +from ._common import ConfidenceInterval + + +def _sample_odds_ratio(table): + """ + Given a table [[a, b], [c, d]], compute a*d/(b*c). + + Return nan if the numerator and denominator are 0. + Return inf if just the denominator is 0. + """ + # table must be a 2x2 numpy array. + if table[1, 0] > 0 and table[0, 1] > 0: + oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1]) + elif table[0, 0] == 0 or table[1, 1] == 0: + oddsratio = np.nan + else: + oddsratio = np.inf + return oddsratio + + +def _solve(func): + """ + Solve func(nc) = 0. func must be an increasing function. + """ + # We could just as well call the variable `x` instead of `nc`, but we + # always call this function with functions for which nc (the noncentrality + # parameter) is the variable for which we are solving. + nc = 1.0 + value = func(nc) + if value == 0: + return nc + + # Multiplicative factor by which to increase or decrease nc when + # searching for a bracketing interval. + factor = 2.0 + # Find a bracketing interval. + if value > 0: + nc /= factor + while func(nc) > 0: + nc /= factor + lo = nc + hi = factor*nc + else: + nc *= factor + while func(nc) < 0: + nc *= factor + lo = nc/factor + hi = nc + + # lo and hi bracket the solution for nc. + nc = brentq(func, lo, hi, xtol=1e-13) + return nc + + +def _nc_hypergeom_mean_inverse(x, M, n, N): + """ + For the given noncentral hypergeometric parameters x, M, n,and N + (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2 + contingency table), find the noncentrality parameter of Fisher's + noncentral hypergeometric distribution whose mean is x. + """ + nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x) + return nc + + +def _hypergeom_params_from_table(table): + # The notation M, n and N is consistent with stats.hypergeom and + # stats.nchypergeom_fisher. + x = table[0, 0] + M = table.sum() + n = table[0].sum() + N = table[:, 0].sum() + return x, M, n, N + + +def _ci_upper(table, alpha): + """ + Compute the upper end of the confidence interval. + """ + if _sample_odds_ratio(table) == np.inf: + return np.inf + + x, M, n, N = _hypergeom_params_from_table(table) + + # nchypergeom_fisher.cdf is a decreasing function of nc, so we negate + # it in the lambda expression. + nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha) + return nc + + +def _ci_lower(table, alpha): + """ + Compute the lower end of the confidence interval. + """ + if _sample_odds_ratio(table) == 0: + return 0 + + x, M, n, N = _hypergeom_params_from_table(table) + + nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha) + return nc + + +def _conditional_oddsratio(table): + """ + Conditional MLE of the odds ratio for the 2x2 contingency table. + """ + x, M, n, N = _hypergeom_params_from_table(table) + # Get the bounds of the support. The support of the noncentral + # hypergeometric distribution with parameters M, n, and N is the same + # for all values of the noncentrality parameter, so we can use 1 here. + lo, hi = nchypergeom_fisher.support(M, n, N, 1) + + # Check if x is at one of the extremes of the support. If so, we know + # the odds ratio is either 0 or inf. + if x == lo: + # x is at the low end of the support. + return 0 + if x == hi: + # x is at the high end of the support. + return np.inf + + nc = _nc_hypergeom_mean_inverse(x, M, n, N) + return nc + + +def _conditional_oddsratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + """ + Conditional exact confidence interval for the odds ratio. + """ + if alternative == 'two-sided': + alpha = 0.5*(1 - confidence_level) + lower = _ci_lower(table, alpha) + upper = _ci_upper(table, alpha) + elif alternative == 'less': + lower = 0.0 + upper = _ci_upper(table, 1 - confidence_level) + else: + # alternative == 'greater' + lower = _ci_lower(table, 1 - confidence_level) + upper = np.inf + + return lower, upper + + +def _sample_odds_ratio_ci(table, confidence_level=0.95, + alternative='two-sided'): + oddsratio = _sample_odds_ratio(table) + log_or = np.log(oddsratio) + se = np.sqrt((1/table).sum()) + if alternative == 'less': + z = ndtri(confidence_level) + loglow = -np.inf + loghigh = log_or + z*se + elif alternative == 'greater': + z = ndtri(confidence_level) + loglow = log_or - z*se + loghigh = np.inf + else: + # alternative is 'two-sided' + z = ndtri(0.5*confidence_level + 0.5) + loglow = log_or - z*se + loghigh = log_or + z*se + + return np.exp(loglow), np.exp(loghigh) + + +class OddsRatioResult: + """ + Result of `scipy.stats.contingency.odds_ratio`. See the + docstring for `odds_ratio` for more details. + + Attributes + ---------- + statistic : float + The computed odds ratio. + + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + Methods + ------- + confidence_interval : + Confidence interval for the odds ratio. + """ + + def __init__(self, _table, _kind, statistic): + # for now, no need to make _table and _kind public, since this sort of + # information is returned in very few `scipy.stats` results + self._table = _table + self._kind = _kind + self.statistic = statistic + + def __repr__(self): + return f"OddsRatioResult(statistic={self.statistic})" + + def confidence_interval(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the odds ratio. + + Parameters + ---------- + confidence_level: float + Desired confidence level for the confidence interval. + The value must be given as a fraction between 0 and 1. + Default is 0.95 (meaning 95%). + + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis of the hypothesis test to which the + confidence interval corresponds. That is, suppose the null + hypothesis is that the true odds ratio equals ``OR`` and the + confidence interval is ``(low, high)``. Then the following options + for `alternative` are available (default is 'two-sided'): + + * 'two-sided': the true odds ratio is not equal to ``OR``. There + is evidence against the null hypothesis at the chosen + `confidence_level` if ``high < OR`` or ``low > OR``. + * 'less': the true odds ratio is less than ``OR``. The ``low`` end + of the confidence interval is 0, and there is evidence against + the null hypothesis at the chosen `confidence_level` if + ``high < OR``. + * 'greater': the true odds ratio is greater than ``OR``. The + ``high`` end of the confidence interval is ``np.inf``, and there + is evidence against the null hypothesis at the chosen + `confidence_level` if ``low > OR``. + + Returns + ------- + ci : ``ConfidenceInterval`` instance + The confidence interval, represented as an object with + attributes ``low`` and ``high``. + + Notes + ----- + When `kind` is ``'conditional'``, the limits of the confidence + interval are the conditional "exact confidence limits" as described + by Fisher [1]_. The conditional odds ratio and confidence interval are + also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_. + + When `kind` is ``'sample'``, the confidence interval is computed + under the assumption that the logarithm of the odds ratio is normally + distributed with standard error given by:: + + se = sqrt(1/a + 1/b + 1/c + 1/d) + + where ``a``, ``b``, ``c`` and ``d`` are the elements of the + contingency table. (See, for example, [2]_, section 3.1.3.2, + or [3]_, section 2.3.3). + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [3] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + """ + if alternative not in ['two-sided', 'less', 'greater']: + raise ValueError("`alternative` must be 'two-sided', 'less' or " + "'greater'.") + + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + if self._kind == 'conditional': + ci = self._conditional_odds_ratio_ci(confidence_level, alternative) + else: + ci = self._sample_odds_ratio_ci(confidence_level, alternative) + return ci + + def _conditional_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the conditional odds ratio. + """ + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _conditional_oddsratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + def _sample_odds_ratio_ci(self, confidence_level=0.95, + alternative='two-sided'): + """ + Confidence interval for the sample odds ratio. + """ + if confidence_level < 0 or confidence_level > 1: + raise ValueError('confidence_level must be between 0 and 1') + + table = self._table + if 0 in table.sum(axis=0) or 0 in table.sum(axis=1): + # If both values in a row or column are zero, the p-value is 1, + # the odds ratio is NaN and the confidence interval is (0, inf). + ci = (0, np.inf) + else: + ci = _sample_odds_ratio_ci(table, + confidence_level=confidence_level, + alternative=alternative) + return ConfidenceInterval(low=ci[0], high=ci[1]) + + +def odds_ratio(table, *, kind='conditional'): + r""" + Compute the odds ratio for a 2x2 contingency table. + + Parameters + ---------- + table : array_like of ints + A 2x2 contingency table. Elements must be non-negative integers. + kind : str, optional + Which kind of odds ratio to compute, either the sample + odds ratio (``kind='sample'``) or the conditional odds ratio + (``kind='conditional'``). Default is ``'conditional'``. + + Returns + ------- + result : `~scipy.stats._result_classes.OddsRatioResult` instance + The returned object has two computed attributes: + + statistic : float + * If `kind` is ``'sample'``, this is sample (or unconditional) + estimate, given by + ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``. + * If `kind` is ``'conditional'``, this is the conditional + maximum likelihood estimate for the odds ratio. It is + the noncentrality parameter of Fisher's noncentral + hypergeometric distribution with the same hypergeometric + parameters as `table` and whose mean is ``table[0, 0]``. + + The object has the method `confidence_interval` that computes + the confidence interval of the odds ratio. + + See Also + -------- + scipy.stats.fisher_exact + relative_risk + + Notes + ----- + The conditional odds ratio was discussed by Fisher (see "Example 1" + of [1]_). Texts that cover the odds ratio include [2]_ and [3]_. + + .. versionadded:: 1.10.0 + + References + ---------- + .. [1] R. A. Fisher (1935), The logic of inductive inference, + Journal of the Royal Statistical Society, Vol. 98, No. 1, + pp. 39-82. + .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research. + Volume I - The analysis of case-control studies. IARC Sci Publ. + (32):5-338. PMID: 7216345. (See section 4.2.) + .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology: + Methods, Techniques, and Applications, CRC Press LLC, Boca + Raton, Florida. + .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of + Cardiovascular Events in Women and Men: A Sex-Specific + Meta-analysis of Randomized Controlled Trials." + JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006. + + Examples + -------- + In epidemiology, individuals are classified as "exposed" or + "unexposed" to some factor or treatment. If the occurrence of some + illness is under study, those who have the illness are often + classified as "cases", and those without it are "noncases". The + counts of the occurrences of these classes gives a contingency + table:: + + exposed unexposed + cases a b + noncases c d + + The sample odds ratio may be written ``(a/c) / (b/d)``. ``a/c`` can + be interpreted as the odds of a case occurring in the exposed group, + and ``b/d`` as the odds of a case occurring in the unexposed group. + The sample odds ratio is the ratio of these odds. If the odds ratio + is greater than 1, it suggests that there is a positive association + between being exposed and being a case. + + Interchanging the rows or columns of the contingency table inverts + the odds ratio, so it is import to understand the meaning of labels + given to the rows and columns of the table when interpreting the + odds ratio. + + In [4]_, the use of aspirin to prevent cardiovascular events in women + and men was investigated. The study notably concluded: + + ...aspirin therapy reduced the risk of a composite of + cardiovascular events due to its effect on reducing the risk of + ischemic stroke in women [...] + + The article lists studies of various cardiovascular events. Let's + focus on the ischemic stoke in women. + + The following table summarizes the results of the experiment in which + participants took aspirin or a placebo on a regular basis for several + years. Cases of ischemic stroke were recorded:: + + Aspirin Control/Placebo + Ischemic stroke 176 230 + No stroke 21035 21018 + + The question we ask is "Is there evidence that the aspirin reduces the + risk of ischemic stroke?" + + Compute the odds ratio: + + >>> from scipy.stats.contingency import odds_ratio + >>> res = odds_ratio([[176, 230], [21035, 21018]]) + >>> res.statistic + 0.7646037659999126 + + For this sample, the odds of getting an ischemic stroke for those who have + been taking aspirin are 0.76 times that of those + who have received the placebo. + + To make statistical inferences about the population under study, + we can compute the 95% confidence interval for the odds ratio: + + >>> res.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=0.6241234078749812, high=0.9354102892100372) + + The 95% confidence interval for the conditional odds ratio is + approximately (0.62, 0.94). + + The fact that the entire 95% confidence interval falls below 1 supports + the authors' conclusion that the aspirin was associated with a + statistically significant reduction in ischemic stroke. + """ + if kind not in ['conditional', 'sample']: + raise ValueError("`kind` must be 'conditional' or 'sample'.") + + c = np.asarray(table) + + if c.shape != (2, 2): + raise ValueError(f"Invalid shape {c.shape}. The input `table` must be " + "of shape (2, 2).") + + if not np.issubdtype(c.dtype, np.integer): + raise ValueError("`table` must be an array of integers, but got " + f"type {c.dtype}") + c = c.astype(np.int64) + + if np.any(c < 0): + raise ValueError("All values in `table` must be nonnegative.") + + if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): + # If both values in a row or column are zero, the p-value is NaN and + # the odds ratio is NaN. + result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan) + return result + + if kind == 'sample': + oddsratio = _sample_odds_ratio(c) + else: # kind is 'conditional' + oddsratio = _conditional_oddsratio(c) + + result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio) + return result diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_qmc.py b/venv/lib/python3.10/site-packages/scipy/stats/_qmc.py new file mode 100644 index 0000000000000000000000000000000000000000..dfdd3fc233fc2c078a31a48327c042e0b0c36ae5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_qmc.py @@ -0,0 +1,2786 @@ +"""Quasi-Monte Carlo engines and helpers.""" +from __future__ import annotations + +import copy +import math +import numbers +import os +import warnings +from abc import ABC, abstractmethod +from functools import partial +from typing import ( + Callable, + ClassVar, + Literal, + overload, + TYPE_CHECKING, +) + +import numpy as np + +if TYPE_CHECKING: + import numpy.typing as npt + from scipy._lib._util import ( + DecimalNumber, GeneratorType, IntNumber, SeedType + ) + +import scipy.stats as stats +from scipy._lib._util import rng_integers, _rng_spawn +from scipy.sparse.csgraph import minimum_spanning_tree +from scipy.spatial import distance, Voronoi +from scipy.special import gammainc +from ._sobol import ( + _initialize_v, _cscramble, _fill_p_cumulative, _draw, _fast_forward, + _categorize, _MAXDIM +) +from ._qmc_cy import ( + _cy_wrapper_centered_discrepancy, + _cy_wrapper_wrap_around_discrepancy, + _cy_wrapper_mixture_discrepancy, + _cy_wrapper_l2_star_discrepancy, + _cy_wrapper_update_discrepancy, + _cy_van_der_corput_scrambled, + _cy_van_der_corput, +) + + +__all__ = ['scale', 'discrepancy', 'geometric_discrepancy', 'update_discrepancy', + 'QMCEngine', 'Sobol', 'Halton', 'LatinHypercube', 'PoissonDisk', + 'MultinomialQMC', 'MultivariateNormalQMC'] + + +@overload +def check_random_state(seed: IntNumber | None = ...) -> np.random.Generator: + ... + + +@overload +def check_random_state(seed: GeneratorType) -> GeneratorType: + ... + + +# Based on scipy._lib._util.check_random_state +def check_random_state(seed=None): + """Turn `seed` into a `numpy.random.Generator` instance. + + Parameters + ---------- + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` or ``RandomState`` instance, then + the provided instance is used. + + Returns + ------- + seed : {`numpy.random.Generator`, `numpy.random.RandomState`} + Random number generator. + + """ + if seed is None or isinstance(seed, (numbers.Integral, np.integer)): + return np.random.default_rng(seed) + elif isinstance(seed, (np.random.RandomState, np.random.Generator)): + return seed + else: + raise ValueError(f'{seed!r} cannot be used to seed a' + ' numpy.random.Generator instance') + + +def scale( + sample: npt.ArrayLike, + l_bounds: npt.ArrayLike, + u_bounds: npt.ArrayLike, + *, + reverse: bool = False +) -> np.ndarray: + r"""Sample scaling from unit hypercube to different bounds. + + To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`, + with :math:`a` the lower bounds and :math:`b` the upper bounds. + The following transformation is used: + + .. math:: + + (b - a) \cdot \text{sample} + a + + Parameters + ---------- + sample : array_like (n, d) + Sample to scale. + l_bounds, u_bounds : array_like (d,) + Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed + data. If `reverse` is True, range of the original data to transform + to the unit hypercube. + reverse : bool, optional + Reverse the transformation from different bounds to the unit hypercube. + Default is False. + + Returns + ------- + sample : array_like (n, d) + Scaled sample. + + Examples + -------- + Transform 3 samples in the unit hypercube to bounds: + + >>> from scipy.stats import qmc + >>> l_bounds = [-2, 0] + >>> u_bounds = [6, 5] + >>> sample = [[0.5 , 0.75], + ... [0.5 , 0.5], + ... [0.75, 0.25]] + >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds) + >>> sample_scaled + array([[2. , 3.75], + [2. , 2.5 ], + [4. , 1.25]]) + + And convert back to the unit hypercube: + + >>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True) + >>> sample_ + array([[0.5 , 0.75], + [0.5 , 0.5 ], + [0.75, 0.25]]) + + """ + sample = np.asarray(sample) + + # Checking bounds and sample + if not sample.ndim == 2: + raise ValueError('Sample is not a 2D array') + + lower, upper = _validate_bounds( + l_bounds=l_bounds, u_bounds=u_bounds, d=sample.shape[1] + ) + + if not reverse: + # Checking that sample is within the hypercube + if (sample.max() > 1.) or (sample.min() < 0.): + raise ValueError('Sample is not in unit hypercube') + + return sample * (upper - lower) + lower + else: + # Checking that sample is within the bounds + if not (np.all(sample >= lower) and np.all(sample <= upper)): + raise ValueError('Sample is out of bounds') + + return (sample - lower) / (upper - lower) + + +def _ensure_in_unit_hypercube(sample: npt.ArrayLike) -> np.ndarray: + """Ensure that sample is a 2D array and is within a unit hypercube + + Parameters + ---------- + sample : array_like (n, d) + A 2D array of points. + + Returns + ------- + np.ndarray + The array interpretation of the input sample + + Raises + ------ + ValueError + If the input is not a 2D array or contains points outside of + a unit hypercube. + """ + sample = np.asarray(sample, dtype=np.float64, order="C") + + if not sample.ndim == 2: + raise ValueError("Sample is not a 2D array") + + if (sample.max() > 1.) or (sample.min() < 0.): + raise ValueError("Sample is not in unit hypercube") + + return sample + + +def discrepancy( + sample: npt.ArrayLike, + *, + iterative: bool = False, + method: Literal["CD", "WD", "MD", "L2-star"] = "CD", + workers: IntNumber = 1) -> float: + """Discrepancy of a given sample. + + Parameters + ---------- + sample : array_like (n, d) + The sample to compute the discrepancy from. + iterative : bool, optional + Must be False if not using it for updating the discrepancy. + Default is False. Refer to the notes for more details. + method : str, optional + Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``. + Refer to the notes for more details. Default is ``CD``. + workers : int, optional + Number of workers to use for parallel processing. If -1 is given all + CPU threads are used. Default is 1. + + Returns + ------- + discrepancy : float + Discrepancy. + + See Also + -------- + geometric_discrepancy + + Notes + ----- + The discrepancy is a uniformity criterion used to assess the space filling + of a number of samples in a hypercube. A discrepancy quantifies the + distance between the continuous uniform distribution on a hypercube and the + discrete uniform distribution on :math:`n` distinct sample points. + + The lower the value is, the better the coverage of the parameter space is. + + For a collection of subsets of the hypercube, the discrepancy is the + difference between the fraction of sample points in one of those + subsets and the volume of that subset. There are different definitions of + discrepancy corresponding to different collections of subsets. Some + versions take a root mean square difference over subsets instead of + a maximum. + + A measure of uniformity is reasonable if it satisfies the following + criteria [1]_: + + 1. It is invariant under permuting factors and/or runs. + 2. It is invariant under rotation of the coordinates. + 3. It can measure not only uniformity of the sample over the hypercube, + but also the projection uniformity of the sample over non-empty + subset of lower dimension hypercubes. + 4. There is some reasonable geometric meaning. + 5. It is easy to compute. + 6. It satisfies the Koksma-Hlawka-like inequality. + 7. It is consistent with other criteria in experimental design. + + Four methods are available: + + * ``CD``: Centered Discrepancy - subspace involves a corner of the + hypercube + * ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds + * ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria + * ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation + + See [2]_ for precise definitions of each method. + + Lastly, using ``iterative=True``, it is possible to compute the + discrepancy as if we had :math:`n+1` samples. This is useful if we want + to add a point to a sampling and check the candidate which would give the + lowest discrepancy. Then you could just update the discrepancy with + each candidate using `update_discrepancy`. This method is faster than + computing the discrepancy for a large number of candidates. + + References + ---------- + .. [1] Fang et al. "Design and modeling for computer experiments". + Computer Science and Data Analysis Series, 2006. + .. [2] Zhou Y.-D. et al. "Mixture discrepancy for quasi-random point sets." + Journal of Complexity, 29 (3-4) , pp. 283-301, 2013. + .. [3] T. T. Warnock. "Computational investigations of low discrepancy + point sets." Applications of Number Theory to Numerical + Analysis, Academic Press, pp. 319-343, 1972. + + Examples + -------- + Calculate the quality of the sample using the discrepancy: + + >>> import numpy as np + >>> from scipy.stats import qmc + >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) + >>> l_bounds = [0.5, 0.5] + >>> u_bounds = [6.5, 6.5] + >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True) + >>> space + array([[0.08333333, 0.41666667], + [0.25 , 0.91666667], + [0.41666667, 0.25 ], + [0.58333333, 0.75 ], + [0.75 , 0.08333333], + [0.91666667, 0.58333333]]) + >>> qmc.discrepancy(space) + 0.008142039609053464 + + We can also compute iteratively the ``CD`` discrepancy by using + ``iterative=True``. + + >>> disc_init = qmc.discrepancy(space[:-1], iterative=True) + >>> disc_init + 0.04769081147119336 + >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init) + 0.008142039609053513 + + """ + sample = _ensure_in_unit_hypercube(sample) + + workers = _validate_workers(workers) + + methods = { + "CD": _cy_wrapper_centered_discrepancy, + "WD": _cy_wrapper_wrap_around_discrepancy, + "MD": _cy_wrapper_mixture_discrepancy, + "L2-star": _cy_wrapper_l2_star_discrepancy, + } + + if method in methods: + return methods[method](sample, iterative, workers=workers) + else: + raise ValueError(f"{method!r} is not a valid method. It must be one of" + f" {set(methods)!r}") + + +def geometric_discrepancy( + sample: npt.ArrayLike, + method: Literal["mindist", "mst"] = "mindist", + metric: str = "euclidean") -> float: + """Discrepancy of a given sample based on its geometric properties. + + Parameters + ---------- + sample : array_like (n, d) + The sample to compute the discrepancy from. + method : {"mindist", "mst"}, optional + The method to use. One of ``mindist`` for minimum distance (default) + or ``mst`` for minimum spanning tree. + metric : str or callable, optional + The distance metric to use. See the documentation + for `scipy.spatial.distance.pdist` for the available metrics and + the default. + + Returns + ------- + discrepancy : float + Discrepancy (higher values correspond to greater sample uniformity). + + See Also + -------- + discrepancy + + Notes + ----- + The discrepancy can serve as a simple measure of quality of a random sample. + This measure is based on the geometric properties of the distribution of points + in the sample, such as the minimum distance between any pair of points, or + the mean edge length in a minimum spanning tree. + + The higher the value is, the better the coverage of the parameter space is. + Note that this is different from `scipy.stats.qmc.discrepancy`, where lower + values correspond to higher quality of the sample. + + Also note that when comparing different sampling strategies using this function, + the sample size must be kept constant. + + It is possible to calculate two metrics from the minimum spanning tree: + the mean edge length and the standard deviation of edges lengths. Using + both metrics offers a better picture of uniformity than either metric alone, + with higher mean and lower standard deviation being preferable (see [1]_ + for a brief discussion). This function currently only calculates the mean + edge length. + + References + ---------- + .. [1] Franco J. et al. "Minimum Spanning Tree: A new approach to assess the quality + of the design of computer experiments." Chemometrics and Intelligent Laboratory + Systems, 97 (2), pp. 164-169, 2009. + + Examples + -------- + Calculate the quality of the sample using the minimum euclidean distance + (the defaults): + + >>> import numpy as np + >>> from scipy.stats import qmc + >>> rng = np.random.default_rng(191468432622931918890291693003068437394) + >>> sample = qmc.LatinHypercube(d=2, seed=rng).random(50) + >>> qmc.geometric_discrepancy(sample) + 0.03708161435687876 + + Calculate the quality using the mean edge length in the minimum + spanning tree: + + >>> qmc.geometric_discrepancy(sample, method='mst') + 0.1105149978798376 + + Display the minimum spanning tree and the points with + the smallest distance: + + >>> import matplotlib.pyplot as plt + >>> from matplotlib.lines import Line2D + >>> from scipy.sparse.csgraph import minimum_spanning_tree + >>> from scipy.spatial.distance import pdist, squareform + >>> dist = pdist(sample) + >>> mst = minimum_spanning_tree(squareform(dist)) + >>> edges = np.where(mst.toarray() > 0) + >>> edges = np.asarray(edges).T + >>> min_dist = np.min(dist) + >>> min_idx = np.argwhere(squareform(dist) == min_dist)[0] + >>> fig, ax = plt.subplots(figsize=(10, 5)) + >>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$', + ... xlim=[0, 1], ylim=[0, 1]) + >>> for edge in edges: + ... ax.plot(sample[edge, 0], sample[edge, 1], c='k') + >>> ax.scatter(sample[:, 0], sample[:, 1]) + >>> ax.add_patch(plt.Circle(sample[min_idx[0]], min_dist, color='red', fill=False)) + >>> markers = [ + ... Line2D([0], [0], marker='o', lw=0, label='Sample points'), + ... Line2D([0], [0], color='k', label='Minimum spanning tree'), + ... Line2D([0], [0], marker='o', lw=0, markerfacecolor='w', markeredgecolor='r', + ... label='Minimum point-to-point distance'), + ... ] + >>> ax.legend(handles=markers, loc='center left', bbox_to_anchor=(1, 0.5)); + >>> plt.show() + + """ + sample = _ensure_in_unit_hypercube(sample) + if sample.shape[0] < 2: + raise ValueError("Sample must contain at least two points") + + distances = distance.pdist(sample, metric=metric) # type: ignore[call-overload] + + if np.any(distances == 0.0): + warnings.warn("Sample contains duplicate points.", stacklevel=2) + + if method == "mindist": + return np.min(distances[distances.nonzero()]) + elif method == "mst": + fully_connected_graph = distance.squareform(distances) + mst = minimum_spanning_tree(fully_connected_graph) + distances = mst[mst.nonzero()] + # TODO consider returning both the mean and the standard deviation + # see [1] for a discussion + return np.mean(distances) + else: + raise ValueError(f"{method!r} is not a valid method. " + f"It must be one of {{'mindist', 'mst'}}") + + +def update_discrepancy( + x_new: npt.ArrayLike, + sample: npt.ArrayLike, + initial_disc: DecimalNumber) -> float: + """Update the centered discrepancy with a new sample. + + Parameters + ---------- + x_new : array_like (1, d) + The new sample to add in `sample`. + sample : array_like (n, d) + The initial sample. + initial_disc : float + Centered discrepancy of the `sample`. + + Returns + ------- + discrepancy : float + Centered discrepancy of the sample composed of `x_new` and `sample`. + + Examples + -------- + We can also compute iteratively the discrepancy by using + ``iterative=True``. + + >>> import numpy as np + >>> from scipy.stats import qmc + >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) + >>> l_bounds = [0.5, 0.5] + >>> u_bounds = [6.5, 6.5] + >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True) + >>> disc_init = qmc.discrepancy(space[:-1], iterative=True) + >>> disc_init + 0.04769081147119336 + >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init) + 0.008142039609053513 + + """ + sample = np.asarray(sample, dtype=np.float64, order="C") + x_new = np.asarray(x_new, dtype=np.float64, order="C") + + # Checking that sample is within the hypercube and 2D + if not sample.ndim == 2: + raise ValueError('Sample is not a 2D array') + + if (sample.max() > 1.) or (sample.min() < 0.): + raise ValueError('Sample is not in unit hypercube') + + # Checking that x_new is within the hypercube and 1D + if not x_new.ndim == 1: + raise ValueError('x_new is not a 1D array') + + if not (np.all(x_new >= 0) and np.all(x_new <= 1)): + raise ValueError('x_new is not in unit hypercube') + + if x_new.shape[0] != sample.shape[1]: + raise ValueError("x_new and sample must be broadcastable") + + return _cy_wrapper_update_discrepancy(x_new, sample, initial_disc) + + +def _perturb_discrepancy(sample: np.ndarray, i1: int, i2: int, k: int, + disc: float): + """Centered discrepancy after an elementary perturbation of a LHS. + + An elementary perturbation consists of an exchange of coordinates between + two points: ``sample[i1, k] <-> sample[i2, k]``. By construction, + this operation conserves the LHS properties. + + Parameters + ---------- + sample : array_like (n, d) + The sample (before permutation) to compute the discrepancy from. + i1 : int + The first line of the elementary permutation. + i2 : int + The second line of the elementary permutation. + k : int + The column of the elementary permutation. + disc : float + Centered discrepancy of the design before permutation. + + Returns + ------- + discrepancy : float + Centered discrepancy of the design after permutation. + + References + ---------- + .. [1] Jin et al. "An efficient algorithm for constructing optimal design + of computer experiments", Journal of Statistical Planning and + Inference, 2005. + + """ + n = sample.shape[0] + + z_ij = sample - 0.5 + + # Eq (19) + c_i1j = (1. / n ** 2. + * np.prod(0.5 * (2. + abs(z_ij[i1, :]) + + abs(z_ij) - abs(z_ij[i1, :] - z_ij)), axis=1)) + c_i2j = (1. / n ** 2. + * np.prod(0.5 * (2. + abs(z_ij[i2, :]) + + abs(z_ij) - abs(z_ij[i2, :] - z_ij)), axis=1)) + + # Eq (20) + c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :])) + - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :]) + - 0.5 * z_ij[i1, :] ** 2)) + c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :])) + - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :]) + - 0.5 * z_ij[i2, :] ** 2)) + + # Eq (22), typo in the article in the denominator i2 -> i1 + num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k]) + - abs(z_ij[i2, k] - z_ij[:, k])) + denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k]) + - abs(z_ij[i1, k] - z_ij[:, k])) + gamma = num / denum + + # Eq (23) + c_p_i1j = gamma * c_i1j + # Eq (24) + c_p_i2j = c_i2j / gamma + + alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k])) + beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k])) + + g_i1 = np.prod(1. + abs(z_ij[i1, :])) + g_i2 = np.prod(1. + abs(z_ij[i2, :])) + h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2)) + h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2)) + + # Eq (25), typo in the article g is missing + c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n) + # Eq (26), typo in the article n ** 2 + c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta))) + + # Eq (26) + sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j + + mask = np.ones(n, dtype=bool) + mask[[i1, i2]] = False + sum_ = sum(sum_[mask]) + + disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_) + + return disc_ep + + +def primes_from_2_to(n: int) -> np.ndarray: + """Prime numbers from 2 to *n*. + + Parameters + ---------- + n : int + Sup bound with ``n >= 6``. + + Returns + ------- + primes : list(int) + Primes in ``2 <= p < n``. + + Notes + ----- + Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021 + by the original author, Bruno Astrolino, for free use in SciPy under + the 3-clause BSD. + + References + ---------- + .. [1] `StackOverflow `_. + + """ + sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool) + for i in range(1, int(n ** 0.5) // 3 + 1): + k = 3 * i + 1 | 1 + sieve[k * k // 3::2 * k] = False + sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False + return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)] + + +def n_primes(n: IntNumber) -> list[int]: + """List of the n-first prime numbers. + + Parameters + ---------- + n : int + Number of prime numbers wanted. + + Returns + ------- + primes : list(int) + List of primes. + + """ + primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, + 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, + 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, + 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, + 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, + 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, + 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, + 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, + 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, + 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, + 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, + 953, 967, 971, 977, 983, 991, 997][:n] # type: ignore[misc] + + if len(primes) < n: + big_number = 2000 + while 'Not enough primes': + primes = primes_from_2_to(big_number)[:n] # type: ignore + if len(primes) == n: + break + big_number += 1000 + + return primes + + +def _van_der_corput_permutations( + base: IntNumber, *, random_state: SeedType = None +) -> np.ndarray: + """Permutations for scrambling a Van der Corput sequence. + + Parameters + ---------- + base : int + Base of the sequence. + random_state : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Returns + ------- + permutations : array_like + Permutation indices. + + Notes + ----- + In Algorithm 1 of Owen 2017, a permutation of `np.arange(base)` is + created for each positive integer `k` such that `1 - base**-k < 1` + using floating-point arithmetic. For double precision floats, the + condition `1 - base**-k < 1` can also be written as `base**-k > + 2**-54`, which makes it more apparent how many permutations we need + to create. + """ + rng = check_random_state(random_state) + count = math.ceil(54 / math.log2(base)) - 1 + permutations = np.repeat(np.arange(base)[None], count, axis=0) + for perm in permutations: + rng.shuffle(perm) + + return permutations + + +def van_der_corput( + n: IntNumber, + base: IntNumber = 2, + *, + start_index: IntNumber = 0, + scramble: bool = False, + permutations: npt.ArrayLike | None = None, + seed: SeedType = None, + workers: IntNumber = 1) -> np.ndarray: + """Van der Corput sequence. + + Pseudo-random number generator based on a b-adic expansion. + + Scrambling uses permutations of the remainders (see [1]_). Multiple + permutations are applied to construct a point. The sequence of + permutations has to be the same for all points of the sequence. + + Parameters + ---------- + n : int + Number of element of the sequence. + base : int, optional + Base of the sequence. Default is 2. + start_index : int, optional + Index to start the sequence from. Default is 0. + scramble : bool, optional + If True, use Owen scrambling. Otherwise no scrambling is done. + Default is True. + permutations : array_like, optional + Permutations used for scrambling. + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + workers : int, optional + Number of workers to use for parallel processing. If -1 is + given all CPU threads are used. Default is 1. + + Returns + ------- + sequence : list (n,) + Sequence of Van der Corput. + + References + ---------- + .. [1] A. B. Owen. "A randomized Halton algorithm in R", + :arxiv:`1706.02808`, 2017. + + """ + if base < 2: + raise ValueError("'base' must be at least 2") + + if scramble: + if permutations is None: + permutations = _van_der_corput_permutations( + base=base, random_state=seed + ) + else: + permutations = np.asarray(permutations) + + permutations = permutations.astype(np.int64) + return _cy_van_der_corput_scrambled(n, base, start_index, + permutations, workers) + + else: + return _cy_van_der_corput(n, base, start_index, workers) + + +class QMCEngine(ABC): + """A generic Quasi-Monte Carlo sampler class meant for subclassing. + + QMCEngine is a base class to construct a specific Quasi-Monte Carlo + sampler. It cannot be used directly as a sampler. + + Parameters + ---------- + d : int + Dimension of the parameter space. + optimization : {None, "random-cd", "lloyd"}, optional + Whether to use an optimization scheme to improve the quality after + sampling. Note that this is a post-processing step that does not + guarantee that all properties of the sample will be conserved. + Default is None. + + * ``random-cd``: random permutations of coordinates to lower the + centered discrepancy. The best sample based on the centered + discrepancy is constantly updated. Centered discrepancy-based + sampling shows better space-filling robustness toward 2D and 3D + subprojections compared to using other discrepancy measures. + * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm. + The process converges to equally spaced samples. + + .. versionadded:: 1.10.0 + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Notes + ----- + By convention samples are distributed over the half-open interval + ``[0, 1)``. Instances of the class can access the attributes: ``d`` for + the dimension; and ``rng`` for the random number generator (used for the + ``seed``). + + **Subclassing** + + When subclassing `QMCEngine` to create a new sampler, ``__init__`` and + ``random`` must be redefined. + + * ``__init__(d, seed=None)``: at least fix the dimension. If the sampler + does not take advantage of a ``seed`` (deterministic methods like + Halton), this parameter can be omitted. + * ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers`` + is used for parallelism. See `Halton` for example. + + Optionally, two other methods can be overwritten by subclasses: + + * ``reset``: Reset the engine to its original state. + * ``fast_forward``: If the sequence is deterministic (like Halton + sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw. + + Examples + -------- + To create a random sampler based on ``np.random.random``, we would do the + following: + + >>> from scipy.stats import qmc + >>> class RandomEngine(qmc.QMCEngine): + ... def __init__(self, d, seed=None): + ... super().__init__(d=d, seed=seed) + ... + ... + ... def _random(self, n=1, *, workers=1): + ... return self.rng.random((n, self.d)) + ... + ... + ... def reset(self): + ... super().__init__(d=self.d, seed=self.rng_seed) + ... return self + ... + ... + ... def fast_forward(self, n): + ... self.random(n) + ... return self + + After subclassing `QMCEngine` to define the sampling strategy we want to + use, we can create an instance to sample from. + + >>> engine = RandomEngine(2) + >>> engine.random(5) + array([[0.22733602, 0.31675834], # random + [0.79736546, 0.67625467], + [0.39110955, 0.33281393], + [0.59830875, 0.18673419], + [0.67275604, 0.94180287]]) + + We can also reset the state of the generator and resample again. + + >>> _ = engine.reset() + >>> engine.random(5) + array([[0.22733602, 0.31675834], # random + [0.79736546, 0.67625467], + [0.39110955, 0.33281393], + [0.59830875, 0.18673419], + [0.67275604, 0.94180287]]) + + """ + + @abstractmethod + def __init__( + self, + d: IntNumber, + *, + optimization: Literal["random-cd", "lloyd"] | None = None, + seed: SeedType = None + ) -> None: + if not np.issubdtype(type(d), np.integer) or d < 0: + raise ValueError('d must be a non-negative integer value') + + self.d = d + + if isinstance(seed, np.random.Generator): + # Spawn a Generator that we can own and reset. + self.rng = _rng_spawn(seed, 1)[0] + else: + # Create our instance of Generator, does not need spawning + # Also catch RandomState which cannot be spawned + self.rng = check_random_state(seed) + self.rng_seed = copy.deepcopy(self.rng) + + self.num_generated = 0 + + config = { + # random-cd + "n_nochange": 100, + "n_iters": 10_000, + "rng": self.rng, + + # lloyd + "tol": 1e-5, + "maxiter": 10, + "qhull_options": None, + } + self.optimization_method = _select_optimizer(optimization, config) + + @abstractmethod + def _random( + self, n: IntNumber = 1, *, workers: IntNumber = 1 + ) -> np.ndarray: + ... + + def random( + self, n: IntNumber = 1, *, workers: IntNumber = 1 + ) -> np.ndarray: + """Draw `n` in the half-open interval ``[0, 1)``. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. + Default is 1. + workers : int, optional + Only supported with `Halton`. + Number of workers to use for parallel processing. If -1 is + given all CPU threads are used. Default is 1. It becomes faster + than one worker for `n` greater than :math:`10^3`. + + Returns + ------- + sample : array_like (n, d) + QMC sample. + + """ + sample = self._random(n, workers=workers) + if self.optimization_method is not None: + sample = self.optimization_method(sample) + + self.num_generated += n + return sample + + def integers( + self, + l_bounds: npt.ArrayLike, + *, + u_bounds: npt.ArrayLike | None = None, + n: IntNumber = 1, + endpoint: bool = False, + workers: IntNumber = 1 + ) -> np.ndarray: + r""" + Draw `n` integers from `l_bounds` (inclusive) to `u_bounds` + (exclusive), or if endpoint=True, `l_bounds` (inclusive) to + `u_bounds` (inclusive). + + Parameters + ---------- + l_bounds : int or array-like of ints + Lowest (signed) integers to be drawn (unless ``u_bounds=None``, + in which case this parameter is 0 and this value is used for + `u_bounds`). + u_bounds : int or array-like of ints, optional + If provided, one above the largest (signed) integer to be drawn + (see above for behavior if ``u_bounds=None``). + If array-like, must contain integer values. + n : int, optional + Number of samples to generate in the parameter space. + Default is 1. + endpoint : bool, optional + If true, sample from the interval ``[l_bounds, u_bounds]`` instead + of the default ``[l_bounds, u_bounds)``. Defaults is False. + workers : int, optional + Number of workers to use for parallel processing. If -1 is + given all CPU threads are used. Only supported when using `Halton` + Default is 1. + + Returns + ------- + sample : array_like (n, d) + QMC sample. + + Notes + ----- + It is safe to just use the same ``[0, 1)`` to integer mapping + with QMC that you would use with MC. You still get unbiasedness, + a strong law of large numbers, an asymptotically infinite variance + reduction and a finite sample variance bound. + + To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`, + with :math:`a` the lower bounds and :math:`b` the upper bounds, + the following transformation is used: + + .. math:: + + \text{floor}((b - a) \cdot \text{sample} + a) + + """ + if u_bounds is None: + u_bounds = l_bounds + l_bounds = 0 + + u_bounds = np.atleast_1d(u_bounds) + l_bounds = np.atleast_1d(l_bounds) + + if endpoint: + u_bounds = u_bounds + 1 + + if (not np.issubdtype(l_bounds.dtype, np.integer) or + not np.issubdtype(u_bounds.dtype, np.integer)): + message = ("'u_bounds' and 'l_bounds' must be integers or" + " array-like of integers") + raise ValueError(message) + + if isinstance(self, Halton): + sample = self.random(n=n, workers=workers) + else: + sample = self.random(n=n) + + sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds) + sample = np.floor(sample).astype(np.int64) + + return sample + + def reset(self) -> QMCEngine: + """Reset the engine to base state. + + Returns + ------- + engine : QMCEngine + Engine reset to its base state. + + """ + seed = copy.deepcopy(self.rng_seed) + self.rng = check_random_state(seed) + self.num_generated = 0 + return self + + def fast_forward(self, n: IntNumber) -> QMCEngine: + """Fast-forward the sequence by `n` positions. + + Parameters + ---------- + n : int + Number of points to skip in the sequence. + + Returns + ------- + engine : QMCEngine + Engine reset to its base state. + + """ + self.random(n=n) + return self + + +class Halton(QMCEngine): + """Halton sequence. + + Pseudo-random number generator that generalize the Van der Corput sequence + for multiple dimensions. The Halton sequence uses the base-two Van der + Corput sequence for the first dimension, base-three for its second and + base-:math:`n` for its n-dimension. + + Parameters + ---------- + d : int + Dimension of the parameter space. + scramble : bool, optional + If True, use Owen scrambling. Otherwise no scrambling is done. + Default is True. + optimization : {None, "random-cd", "lloyd"}, optional + Whether to use an optimization scheme to improve the quality after + sampling. Note that this is a post-processing step that does not + guarantee that all properties of the sample will be conserved. + Default is None. + + * ``random-cd``: random permutations of coordinates to lower the + centered discrepancy. The best sample based on the centered + discrepancy is constantly updated. Centered discrepancy-based + sampling shows better space-filling robustness toward 2D and 3D + subprojections compared to using other discrepancy measures. + * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm. + The process converges to equally spaced samples. + + .. versionadded:: 1.10.0 + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Notes + ----- + The Halton sequence has severe striping artifacts for even modestly + large dimensions. These can be ameliorated by scrambling. Scrambling + also supports replication-based error estimates and extends + applicabiltiy to unbounded integrands. + + References + ---------- + .. [1] Halton, "On the efficiency of certain quasi-random sequences of + points in evaluating multi-dimensional integrals", Numerische + Mathematik, 1960. + .. [2] A. B. Owen. "A randomized Halton algorithm in R", + :arxiv:`1706.02808`, 2017. + + Examples + -------- + Generate samples from a low discrepancy sequence of Halton. + + >>> from scipy.stats import qmc + >>> sampler = qmc.Halton(d=2, scramble=False) + >>> sample = sampler.random(n=5) + >>> sample + array([[0. , 0. ], + [0.5 , 0.33333333], + [0.25 , 0.66666667], + [0.75 , 0.11111111], + [0.125 , 0.44444444]]) + + Compute the quality of the sample using the discrepancy criterion. + + >>> qmc.discrepancy(sample) + 0.088893711419753 + + If some wants to continue an existing design, extra points can be obtained + by calling again `random`. Alternatively, you can skip some points like: + + >>> _ = sampler.fast_forward(5) + >>> sample_continued = sampler.random(n=5) + >>> sample_continued + array([[0.3125 , 0.37037037], + [0.8125 , 0.7037037 ], + [0.1875 , 0.14814815], + [0.6875 , 0.48148148], + [0.4375 , 0.81481481]]) + + Finally, samples can be scaled to bounds. + + >>> l_bounds = [0, 2] + >>> u_bounds = [10, 5] + >>> qmc.scale(sample_continued, l_bounds, u_bounds) + array([[3.125 , 3.11111111], + [8.125 , 4.11111111], + [1.875 , 2.44444444], + [6.875 , 3.44444444], + [4.375 , 4.44444444]]) + + """ + + def __init__( + self, d: IntNumber, *, scramble: bool = True, + optimization: Literal["random-cd", "lloyd"] | None = None, + seed: SeedType = None + ) -> None: + # Used in `scipy.integrate.qmc_quad` + self._init_quad = {'d': d, 'scramble': True, + 'optimization': optimization} + super().__init__(d=d, optimization=optimization, seed=seed) + self.seed = seed + + # important to have ``type(bdim) == int`` for performance reason + self.base = [int(bdim) for bdim in n_primes(d)] + self.scramble = scramble + + self._initialize_permutations() + + def _initialize_permutations(self) -> None: + """Initialize permutations for all Van der Corput sequences. + + Permutations are only needed for scrambling. + """ + self._permutations: list = [None] * len(self.base) + if self.scramble: + for i, bdim in enumerate(self.base): + permutations = _van_der_corput_permutations( + base=bdim, random_state=self.rng + ) + + self._permutations[i] = permutations + + def _random( + self, n: IntNumber = 1, *, workers: IntNumber = 1 + ) -> np.ndarray: + """Draw `n` in the half-open interval ``[0, 1)``. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. Default is 1. + workers : int, optional + Number of workers to use for parallel processing. If -1 is + given all CPU threads are used. Default is 1. It becomes faster + than one worker for `n` greater than :math:`10^3`. + + Returns + ------- + sample : array_like (n, d) + QMC sample. + + """ + workers = _validate_workers(workers) + # Generate a sample using a Van der Corput sequence per dimension. + sample = [van_der_corput(n, bdim, start_index=self.num_generated, + scramble=self.scramble, + permutations=self._permutations[i], + workers=workers) + for i, bdim in enumerate(self.base)] + + return np.array(sample).T.reshape(n, self.d) + + +class LatinHypercube(QMCEngine): + r"""Latin hypercube sampling (LHS). + + A Latin hypercube sample [1]_ generates :math:`n` points in + :math:`[0,1)^{d}`. Each univariate marginal distribution is stratified, + placing exactly one point in :math:`[j/n, (j+1)/n)` for + :math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`. + + Parameters + ---------- + d : int + Dimension of the parameter space. + scramble : bool, optional + When False, center samples within cells of a multi-dimensional grid. + Otherwise, samples are randomly placed within cells of the grid. + + .. note:: + Setting ``scramble=False`` does not ensure deterministic output. + For that, use the `seed` parameter. + + Default is True. + + .. versionadded:: 1.10.0 + + optimization : {None, "random-cd", "lloyd"}, optional + Whether to use an optimization scheme to improve the quality after + sampling. Note that this is a post-processing step that does not + guarantee that all properties of the sample will be conserved. + Default is None. + + * ``random-cd``: random permutations of coordinates to lower the + centered discrepancy. The best sample based on the centered + discrepancy is constantly updated. Centered discrepancy-based + sampling shows better space-filling robustness toward 2D and 3D + subprojections compared to using other discrepancy measures. + * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm. + The process converges to equally spaced samples. + + .. versionadded:: 1.8.0 + .. versionchanged:: 1.10.0 + Add ``lloyd``. + + strength : {1, 2}, optional + Strength of the LHS. ``strength=1`` produces a plain LHS while + ``strength=2`` produces an orthogonal array based LHS of strength 2 + [7]_, [8]_. In that case, only ``n=p**2`` points can be sampled, + with ``p`` a prime number. It also constrains ``d <= p + 1``. + Default is 1. + + .. versionadded:: 1.8.0 + + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Notes + ----- + + When LHS is used for integrating a function :math:`f` over :math:`n`, + LHS is extremely effective on integrands that are nearly additive [2]_. + With a LHS of :math:`n` points, the variance of the integral is always + lower than plain MC on :math:`n-1` points [3]_. There is a central limit + theorem for LHS on the mean and variance of the integral [4]_, but not + necessarily for optimized LHS due to the randomization. + + :math:`A` is called an orthogonal array of strength :math:`t` if in each + n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible + distinct rows occur the same number of times. The elements of :math:`A` + are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols. + The constraint that :math:`p` must be a prime number is to allow modular + arithmetic. Increasing strength adds some symmetry to the sub-projections + of a sample. With strength 2, samples are symmetric along the diagonals of + 2D sub-projections. This may be undesirable, but on the other hand, the + sample dispersion is improved. + + Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and + strength 2 is a useful increment over strength 1. Going to strength 3 is + a smaller increment and scrambled QMC like Sobol', Halton are more + performant [7]_. + + To create a LHS of strength 2, the orthogonal array :math:`A` is + randomized by applying a random, bijective map of the set of symbols onto + itself. For example, in column 0, all 0s might become 2; in column 1, + all 0s might become 1, etc. + Then, for each column :math:`i` and symbol :math:`j`, we add a plain, + one-dimensional LHS of size :math:`p` to the subarray where + :math:`A^i = j`. The resulting matrix is finally divided by :math:`p`. + + References + ---------- + .. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values + of Input Variables in the Analysis of Output from a Computer Code." + Technometrics, 1979. + .. [2] M. Stein, "Large sample properties of simulations using Latin + hypercube sampling." Technometrics 29, no. 2: 143-151, 1987. + .. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature." + SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997 + .. [4] Loh, W.-L. "On Latin hypercube sampling." The annals of statistics + 24, no. 5: 2058-2080, 1996. + .. [5] Fang et al. "Design and modeling for computer experiments". + Computer Science and Data Analysis Series, 2006. + .. [6] Damblin et al., "Numerical studies of space filling designs: + optimization of Latin Hypercube Samples and subprojection properties." + Journal of Simulation, 2013. + .. [7] A. B. Owen , "Orthogonal arrays for computer experiments, + integration and visualization." Statistica Sinica, 1992. + .. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes." + Journal of the American Statistical Association, 1993. + .. [9] Susan K. Seaholm et al. "Latin hypercube sampling and the + sensitivity analysis of a Monte Carlo epidemic model". + Int J Biomed Comput, 23(1-2), 97-112, + :doi:`10.1016/0020-7101(88)90067-0`, 1988. + + Examples + -------- + In [9]_, a Latin Hypercube sampling strategy was used to sample a + parameter space to study the importance of each parameter of an epidemic + model. Such analysis is also called a sensitivity analysis. + + Since the dimensionality of the problem is high (6), it is computationally + expensive to cover the space. When numerical experiments are costly, + QMC enables analysis that may not be possible if using a grid. + + The six parameters of the model represented the probability of illness, + the probability of withdrawal, and four contact probabilities, + The authors assumed uniform distributions for all parameters and generated + 50 samples. + + Using `scipy.stats.qmc.LatinHypercube` to replicate the protocol, the + first step is to create a sample in the unit hypercube: + + >>> from scipy.stats import qmc + >>> sampler = qmc.LatinHypercube(d=6) + >>> sample = sampler.random(n=50) + + Then the sample can be scaled to the appropriate bounds: + + >>> l_bounds = [0.000125, 0.01, 0.0025, 0.05, 0.47, 0.7] + >>> u_bounds = [0.000375, 0.03, 0.0075, 0.15, 0.87, 0.9] + >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds) + + Such a sample was used to run the model 50 times, and a polynomial + response surface was constructed. This allowed the authors to study the + relative importance of each parameter across the range of + possibilities of every other parameter. + In this computer experiment, they showed a 14-fold reduction in the number + of samples required to maintain an error below 2% on their response surface + when compared to a grid sampling. + + Below are other examples showing alternative ways to construct LHS + with even better coverage of the space. + + Using a base LHS as a baseline. + + >>> sampler = qmc.LatinHypercube(d=2) + >>> sample = sampler.random(n=5) + >>> qmc.discrepancy(sample) + 0.0196... # random + + Use the `optimization` keyword argument to produce a LHS with + lower discrepancy at higher computational cost. + + >>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd") + >>> sample = sampler.random(n=5) + >>> qmc.discrepancy(sample) + 0.0176... # random + + Use the `strength` keyword argument to produce an orthogonal array based + LHS of strength 2. In this case, the number of sample points must be the + square of a prime number. + + >>> sampler = qmc.LatinHypercube(d=2, strength=2) + >>> sample = sampler.random(n=9) + >>> qmc.discrepancy(sample) + 0.00526... # random + + Options could be combined to produce an optimized centered + orthogonal array based LHS. After optimization, the result would not + be guaranteed to be of strength 2. + + """ + + def __init__( + self, d: IntNumber, *, + scramble: bool = True, + strength: int = 1, + optimization: Literal["random-cd", "lloyd"] | None = None, + seed: SeedType = None + ) -> None: + # Used in `scipy.integrate.qmc_quad` + self._init_quad = {'d': d, 'scramble': True, 'strength': strength, + 'optimization': optimization} + super().__init__(d=d, seed=seed, optimization=optimization) + self.scramble = scramble + + lhs_method_strength = { + 1: self._random_lhs, + 2: self._random_oa_lhs + } + + try: + self.lhs_method: Callable = lhs_method_strength[strength] + except KeyError as exc: + message = (f"{strength!r} is not a valid strength. It must be one" + f" of {set(lhs_method_strength)!r}") + raise ValueError(message) from exc + + def _random( + self, n: IntNumber = 1, *, workers: IntNumber = 1 + ) -> np.ndarray: + lhs = self.lhs_method(n) + return lhs + + def _random_lhs(self, n: IntNumber = 1) -> np.ndarray: + """Base LHS algorithm.""" + if not self.scramble: + samples: np.ndarray | float = 0.5 + else: + samples = self.rng.uniform(size=(n, self.d)) + + perms = np.tile(np.arange(1, n + 1), + (self.d, 1)) # type: ignore[arg-type] + for i in range(self.d): + self.rng.shuffle(perms[i, :]) + perms = perms.T + + samples = (perms - samples) / n + return samples + + def _random_oa_lhs(self, n: IntNumber = 4) -> np.ndarray: + """Orthogonal array based LHS of strength 2.""" + p = np.sqrt(n).astype(int) + n_row = p**2 + n_col = p + 1 + + primes = primes_from_2_to(p + 1) + if p not in primes or n != n_row: + raise ValueError( + "n is not the square of a prime number. Close" + f" values are {primes[-2:]**2}" + ) + if self.d > p + 1: + raise ValueError("n is too small for d. Must be n > (d-1)**2") + + oa_sample = np.zeros(shape=(n_row, n_col), dtype=int) + + # OA of strength 2 + arrays = np.tile(np.arange(p), (2, 1)) + oa_sample[:, :2] = np.stack(np.meshgrid(*arrays), + axis=-1).reshape(-1, 2) + for p_ in range(1, p): + oa_sample[:, 2+p_-1] = np.mod(oa_sample[:, 0] + + p_*oa_sample[:, 1], p) + + # scramble the OA + oa_sample_ = np.empty(shape=(n_row, n_col), dtype=int) + for j in range(n_col): + perms = self.rng.permutation(p) + oa_sample_[:, j] = perms[oa_sample[:, j]] + + # following is making a scrambled OA into an OA-LHS + oa_lhs_sample = np.zeros(shape=(n_row, n_col)) + lhs_engine = LatinHypercube(d=1, scramble=self.scramble, strength=1, + seed=self.rng) # type: QMCEngine + for j in range(n_col): + for k in range(p): + idx = oa_sample[:, j] == k + lhs = lhs_engine.random(p).flatten() + oa_lhs_sample[:, j][idx] = lhs + oa_sample[:, j][idx] + + lhs_engine = lhs_engine.reset() + + oa_lhs_sample /= p + + return oa_lhs_sample[:, :self.d] # type: ignore + + +class Sobol(QMCEngine): + """Engine for generating (scrambled) Sobol' sequences. + + Sobol' sequences are low-discrepancy, quasi-random numbers. Points + can be drawn using two methods: + + * `random_base2`: safely draw :math:`n=2^m` points. This method + guarantees the balance properties of the sequence. + * `random`: draw an arbitrary number of points from the + sequence. See warning below. + + Parameters + ---------- + d : int + Dimensionality of the sequence. Max dimensionality is 21201. + scramble : bool, optional + If True, use LMS+shift scrambling. Otherwise, no scrambling is done. + Default is True. + bits : int, optional + Number of bits of the generator. Control the maximum number of points + that can be generated, which is ``2**bits``. Maximal value is 64. + It does not correspond to the return type, which is always + ``np.float64`` to prevent points from repeating themselves. + Default is None, which for backward compatibility, corresponds to 30. + + .. versionadded:: 1.9.0 + optimization : {None, "random-cd", "lloyd"}, optional + Whether to use an optimization scheme to improve the quality after + sampling. Note that this is a post-processing step that does not + guarantee that all properties of the sample will be conserved. + Default is None. + + * ``random-cd``: random permutations of coordinates to lower the + centered discrepancy. The best sample based on the centered + discrepancy is constantly updated. Centered discrepancy-based + sampling shows better space-filling robustness toward 2D and 3D + subprojections compared to using other discrepancy measures. + * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm. + The process converges to equally spaced samples. + + .. versionadded:: 1.10.0 + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Notes + ----- + Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in + :math:`[0,1)^{d}`. Scrambling them [3]_ makes them suitable for singular + integrands, provides a means of error estimation, and can improve their + rate of convergence. The scrambling strategy which is implemented is a + (left) linear matrix scramble (LMS) followed by a digital random shift + (LMS+shift) [2]_. + + There are many versions of Sobol' sequences depending on their + 'direction numbers'. This code uses direction numbers from [4]_. Hence, + the maximum number of dimension is 21201. The direction numbers have been + precomputed with search criterion 6 and can be retrieved at + https://web.maths.unsw.edu.au/~fkuo/sobol/. + + .. warning:: + + Sobol' sequences are a quadrature rule and they lose their balance + properties if one uses a sample size that is not a power of 2, or skips + the first point, or thins the sequence [5]_. + + If :math:`n=2^m` points are not enough then one should take :math:`2^M` + points for :math:`M>m`. When scrambling, the number R of independent + replicates does not have to be a power of 2. + + Sobol' sequences are generated to some number :math:`B` of bits. + After :math:`2^B` points have been generated, the sequence would + repeat. Hence, an error is raised. + The number of bits can be controlled with the parameter `bits`. + + References + ---------- + .. [1] I. M. Sobol', "The distribution of points in a cube and the accurate + evaluation of integrals." Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, + 1967. + .. [2] J. Matousek, "On the L2-discrepancy for anchored boxes." + J. of Complexity 14, 527-556, 1998. + .. [3] Art B. Owen, "Scrambling Sobol and Niederreiter-Xing points." + Journal of Complexity, 14(4):466-489, December 1998. + .. [4] S. Joe and F. Y. Kuo, "Constructing sobol sequences with better + two-dimensional projections." SIAM Journal on Scientific Computing, + 30(5):2635-2654, 2008. + .. [5] Art B. Owen, "On dropping the first Sobol' point." + :arxiv:`2008.08051`, 2020. + + Examples + -------- + Generate samples from a low discrepancy sequence of Sobol'. + + >>> from scipy.stats import qmc + >>> sampler = qmc.Sobol(d=2, scramble=False) + >>> sample = sampler.random_base2(m=3) + >>> sample + array([[0. , 0. ], + [0.5 , 0.5 ], + [0.75 , 0.25 ], + [0.25 , 0.75 ], + [0.375, 0.375], + [0.875, 0.875], + [0.625, 0.125], + [0.125, 0.625]]) + + Compute the quality of the sample using the discrepancy criterion. + + >>> qmc.discrepancy(sample) + 0.013882107204860938 + + To continue an existing design, extra points can be obtained + by calling again `random_base2`. Alternatively, you can skip some + points like: + + >>> _ = sampler.reset() + >>> _ = sampler.fast_forward(4) + >>> sample_continued = sampler.random_base2(m=2) + >>> sample_continued + array([[0.375, 0.375], + [0.875, 0.875], + [0.625, 0.125], + [0.125, 0.625]]) + + Finally, samples can be scaled to bounds. + + >>> l_bounds = [0, 2] + >>> u_bounds = [10, 5] + >>> qmc.scale(sample_continued, l_bounds, u_bounds) + array([[3.75 , 3.125], + [8.75 , 4.625], + [6.25 , 2.375], + [1.25 , 3.875]]) + + """ + + MAXDIM: ClassVar[int] = _MAXDIM + + def __init__( + self, d: IntNumber, *, scramble: bool = True, + bits: IntNumber | None = None, seed: SeedType = None, + optimization: Literal["random-cd", "lloyd"] | None = None + ) -> None: + # Used in `scipy.integrate.qmc_quad` + self._init_quad = {'d': d, 'scramble': True, 'bits': bits, + 'optimization': optimization} + + super().__init__(d=d, optimization=optimization, seed=seed) + if d > self.MAXDIM: + raise ValueError( + f"Maximum supported dimensionality is {self.MAXDIM}." + ) + + self.bits = bits + self.dtype_i: type + + if self.bits is None: + self.bits = 30 + + if self.bits <= 32: + self.dtype_i = np.uint32 + elif 32 < self.bits <= 64: + self.dtype_i = np.uint64 + else: + raise ValueError("Maximum supported 'bits' is 64") + + self.maxn = 2**self.bits + + # v is d x maxbit matrix + self._sv: np.ndarray = np.zeros((d, self.bits), dtype=self.dtype_i) + _initialize_v(self._sv, dim=d, bits=self.bits) + + if not scramble: + self._shift: np.ndarray = np.zeros(d, dtype=self.dtype_i) + else: + # scramble self._shift and self._sv + self._scramble() + + self._quasi = self._shift.copy() + + # normalization constant with the largest possible number + # calculate in Python to not overflow int with 2**64 + self._scale = 1.0 / 2 ** self.bits + + self._first_point = (self._quasi * self._scale).reshape(1, -1) + # explicit casting to float64 + self._first_point = self._first_point.astype(np.float64) + + def _scramble(self) -> None: + """Scramble the sequence using LMS+shift.""" + # Generate shift vector + self._shift = np.dot( + rng_integers(self.rng, 2, size=(self.d, self.bits), + dtype=self.dtype_i), + 2 ** np.arange(self.bits, dtype=self.dtype_i), + ) + # Generate lower triangular matrices (stacked across dimensions) + ltm = np.tril(rng_integers(self.rng, 2, + size=(self.d, self.bits, self.bits), + dtype=self.dtype_i)) + _cscramble( + dim=self.d, bits=self.bits, # type: ignore[arg-type] + ltm=ltm, sv=self._sv + ) + + def _random( + self, n: IntNumber = 1, *, workers: IntNumber = 1 + ) -> np.ndarray: + """Draw next point(s) in the Sobol' sequence. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. Default is 1. + + Returns + ------- + sample : array_like (n, d) + Sobol' sample. + + """ + sample: np.ndarray = np.empty((n, self.d), dtype=np.float64) + + if n == 0: + return sample + + total_n = self.num_generated + n + if total_n > self.maxn: + msg = ( + f"At most 2**{self.bits}={self.maxn} distinct points can be " + f"generated. {self.num_generated} points have been previously " + f"generated, then: n={self.num_generated}+{n}={total_n}. " + ) + if self.bits != 64: + msg += "Consider increasing `bits`." + raise ValueError(msg) + + if self.num_generated == 0: + # verify n is 2**n + if not (n & (n - 1) == 0): + warnings.warn("The balance properties of Sobol' points require" + " n to be a power of 2.", stacklevel=2) + + if n == 1: + sample = self._first_point + else: + _draw( + n=n - 1, num_gen=self.num_generated, dim=self.d, + scale=self._scale, sv=self._sv, quasi=self._quasi, + sample=sample + ) + sample = np.concatenate( + [self._first_point, sample] + )[:n] # type: ignore[misc] + else: + _draw( + n=n, num_gen=self.num_generated - 1, dim=self.d, + scale=self._scale, sv=self._sv, quasi=self._quasi, + sample=sample + ) + + return sample + + def random_base2(self, m: IntNumber) -> np.ndarray: + """Draw point(s) from the Sobol' sequence. + + This function draws :math:`n=2^m` points in the parameter space + ensuring the balance properties of the sequence. + + Parameters + ---------- + m : int + Logarithm in base 2 of the number of samples; i.e., n = 2^m. + + Returns + ------- + sample : array_like (n, d) + Sobol' sample. + + """ + n = 2 ** m + + total_n = self.num_generated + n + if not (total_n & (total_n - 1) == 0): + raise ValueError("The balance properties of Sobol' points require " + "n to be a power of 2. {0} points have been " + "previously generated, then: n={0}+2**{1}={2}. " + "If you still want to do this, the function " + "'Sobol.random()' can be used." + .format(self.num_generated, m, total_n)) + + return self.random(n) + + def reset(self) -> Sobol: + """Reset the engine to base state. + + Returns + ------- + engine : Sobol + Engine reset to its base state. + + """ + super().reset() + self._quasi = self._shift.copy() + return self + + def fast_forward(self, n: IntNumber) -> Sobol: + """Fast-forward the sequence by `n` positions. + + Parameters + ---------- + n : int + Number of points to skip in the sequence. + + Returns + ------- + engine : Sobol + The fast-forwarded engine. + + """ + if self.num_generated == 0: + _fast_forward( + n=n - 1, num_gen=self.num_generated, dim=self.d, + sv=self._sv, quasi=self._quasi + ) + else: + _fast_forward( + n=n, num_gen=self.num_generated - 1, dim=self.d, + sv=self._sv, quasi=self._quasi + ) + self.num_generated += n + return self + + +class PoissonDisk(QMCEngine): + """Poisson disk sampling. + + Parameters + ---------- + d : int + Dimension of the parameter space. + radius : float + Minimal distance to keep between points when sampling new candidates. + hypersphere : {"volume", "surface"}, optional + Sampling strategy to generate potential candidates to be added in the + final sample. Default is "volume". + + * ``volume``: original Bridson algorithm as described in [1]_. + New candidates are sampled *within* the hypersphere. + * ``surface``: only sample the surface of the hypersphere. + ncandidates : int + Number of candidates to sample per iteration. More candidates result + in a denser sampling as more candidates can be accepted per iteration. + optimization : {None, "random-cd", "lloyd"}, optional + Whether to use an optimization scheme to improve the quality after + sampling. Note that this is a post-processing step that does not + guarantee that all properties of the sample will be conserved. + Default is None. + + * ``random-cd``: random permutations of coordinates to lower the + centered discrepancy. The best sample based on the centered + discrepancy is constantly updated. Centered discrepancy-based + sampling shows better space-filling robustness toward 2D and 3D + subprojections compared to using other discrepancy measures. + * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm. + The process converges to equally spaced samples. + + .. versionadded:: 1.10.0 + seed : {None, int, `numpy.random.Generator`}, optional + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Notes + ----- + Poisson disk sampling is an iterative sampling strategy. Starting from + a seed sample, `ncandidates` are sampled in the hypersphere + surrounding the seed. Candidates below a certain `radius` or outside the + domain are rejected. New samples are added in a pool of sample seed. The + process stops when the pool is empty or when the number of required + samples is reached. + + The maximum number of point that a sample can contain is directly linked + to the `radius`. As the dimension of the space increases, a higher radius + spreads the points further and help overcome the curse of dimensionality. + See the :ref:`quasi monte carlo tutorial ` for more + details. + + .. warning:: + + The algorithm is more suitable for low dimensions and sampling size + due to its iterative nature and memory requirements. + Selecting a small radius with a high dimension would + mean that the space could contain more samples than using lower + dimension or a bigger radius. + + Some code taken from [2]_, written consent given on 31.03.2021 + by the original author, Shamis, for free use in SciPy under + the 3-clause BSD. + + References + ---------- + .. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary + Dimensions." SIGGRAPH, 2007. + .. [2] `StackOverflow `__. + + Examples + -------- + Generate a 2D sample using a `radius` of 0.2. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from matplotlib.collections import PatchCollection + >>> from scipy.stats import qmc + >>> + >>> rng = np.random.default_rng() + >>> radius = 0.2 + >>> engine = qmc.PoissonDisk(d=2, radius=radius, seed=rng) + >>> sample = engine.random(20) + + Visualizing the 2D sample and showing that no points are closer than + `radius`. ``radius/2`` is used to visualize non-intersecting circles. + If two samples are exactly at `radius` from each other, then their circle + of radius ``radius/2`` will touch. + + >>> fig, ax = plt.subplots() + >>> _ = ax.scatter(sample[:, 0], sample[:, 1]) + >>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False) + ... for xi, yi in sample] + >>> collection = PatchCollection(circles, match_original=True) + >>> ax.add_collection(collection) + >>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$', + ... xlim=[0, 1], ylim=[0, 1]) + >>> plt.show() + + Such visualization can be seen as circle packing: how many circle can + we put in the space. It is a np-hard problem. The method `fill_space` + can be used to add samples until no more samples can be added. This is + a hard problem and parameters may need to be adjusted manually. Beware of + the dimension: as the dimensionality increases, the number of samples + required to fill the space increases exponentially + (curse-of-dimensionality). + + """ + + def __init__( + self, + d: IntNumber, + *, + radius: DecimalNumber = 0.05, + hypersphere: Literal["volume", "surface"] = "volume", + ncandidates: IntNumber = 30, + optimization: Literal["random-cd", "lloyd"] | None = None, + seed: SeedType = None + ) -> None: + # Used in `scipy.integrate.qmc_quad` + self._init_quad = {'d': d, 'radius': radius, + 'hypersphere': hypersphere, + 'ncandidates': ncandidates, + 'optimization': optimization} + super().__init__(d=d, optimization=optimization, seed=seed) + + hypersphere_sample = { + "volume": self._hypersphere_volume_sample, + "surface": self._hypersphere_surface_sample + } + + try: + self.hypersphere_method = hypersphere_sample[hypersphere] + except KeyError as exc: + message = ( + f"{hypersphere!r} is not a valid hypersphere sampling" + f" method. It must be one of {set(hypersphere_sample)!r}") + raise ValueError(message) from exc + + # size of the sphere from which the samples are drawn relative to the + # size of a disk (radius) + # for the surface sampler, all new points are almost exactly 1 radius + # away from at least one existing sample +eps to avoid rejection + self.radius_factor = 2 if hypersphere == "volume" else 1.001 + self.radius = radius + self.radius_squared = self.radius**2 + + # sample to generate per iteration in the hypersphere around center + self.ncandidates = ncandidates + + with np.errstate(divide='ignore'): + self.cell_size = self.radius / np.sqrt(self.d) + self.grid_size = ( + np.ceil(np.ones(self.d) / self.cell_size) + ).astype(int) + + self._initialize_grid_pool() + + def _initialize_grid_pool(self): + """Sampling pool and sample grid.""" + self.sample_pool = [] + # Positions of cells + # n-dim value for each grid cell + self.sample_grid = np.empty( + np.append(self.grid_size, self.d), + dtype=np.float32 + ) + # Initialise empty cells with NaNs + self.sample_grid.fill(np.nan) + + def _random( + self, n: IntNumber = 1, *, workers: IntNumber = 1 + ) -> np.ndarray: + """Draw `n` in the interval ``[0, 1]``. + + Note that it can return fewer samples if the space is full. + See the note section of the class. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. Default is 1. + + Returns + ------- + sample : array_like (n, d) + QMC sample. + + """ + if n == 0 or self.d == 0: + return np.empty((n, self.d)) + + def in_limits(sample: np.ndarray) -> bool: + return (sample.max() <= 1.) and (sample.min() >= 0.) + + def in_neighborhood(candidate: np.ndarray, n: int = 2) -> bool: + """ + Check if there are samples closer than ``radius_squared`` to the + `candidate` sample. + """ + indices = (candidate / self.cell_size).astype(int) + ind_min = np.maximum(indices - n, np.zeros(self.d, dtype=int)) + ind_max = np.minimum(indices + n + 1, self.grid_size) + + # Check if the center cell is empty + if not np.isnan(self.sample_grid[tuple(indices)][0]): + return True + + a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)] + + # guards against: invalid value encountered in less as we are + # comparing with nan and returns False. Which is wanted. + with np.errstate(invalid='ignore'): + if np.any( + np.sum( + np.square(candidate - self.sample_grid[tuple(a)]), + axis=self.d + ) < self.radius_squared + ): + return True + + return False + + def add_sample(candidate: np.ndarray) -> None: + self.sample_pool.append(candidate) + indices = (candidate / self.cell_size).astype(int) + self.sample_grid[tuple(indices)] = candidate + curr_sample.append(candidate) + + curr_sample: list[np.ndarray] = [] + + if len(self.sample_pool) == 0: + # the pool is being initialized with a single random sample + add_sample(self.rng.random(self.d)) + num_drawn = 1 + else: + num_drawn = 0 + + # exhaust sample pool to have up to n sample + while len(self.sample_pool) and num_drawn < n: + # select a sample from the available pool + idx_center = rng_integers(self.rng, len(self.sample_pool)) + center = self.sample_pool[idx_center] + del self.sample_pool[idx_center] + + # generate candidates around the center sample + candidates = self.hypersphere_method( + center, self.radius * self.radius_factor, self.ncandidates + ) + + # keep candidates that satisfy some conditions + for candidate in candidates: + if in_limits(candidate) and not in_neighborhood(candidate): + add_sample(candidate) + + num_drawn += 1 + if num_drawn >= n: + break + + self.num_generated += num_drawn + return np.array(curr_sample) + + def fill_space(self) -> np.ndarray: + """Draw ``n`` samples in the interval ``[0, 1]``. + + Unlike `random`, this method will try to add points until + the space is full. Depending on ``candidates`` (and to a lesser extent + other parameters), some empty areas can still be present in the sample. + + .. warning:: + + This can be extremely slow in high dimensions or if the + ``radius`` is very small-with respect to the dimensionality. + + Returns + ------- + sample : array_like (n, d) + QMC sample. + + """ + return self.random(np.inf) # type: ignore[arg-type] + + def reset(self) -> PoissonDisk: + """Reset the engine to base state. + + Returns + ------- + engine : PoissonDisk + Engine reset to its base state. + + """ + super().reset() + self._initialize_grid_pool() + return self + + def _hypersphere_volume_sample( + self, center: np.ndarray, radius: DecimalNumber, + candidates: IntNumber = 1 + ) -> np.ndarray: + """Uniform sampling within hypersphere.""" + # should remove samples within r/2 + x = self.rng.standard_normal(size=(candidates, self.d)) + ssq = np.sum(x**2, axis=1) + fr = radius * gammainc(self.d/2, ssq/2)**(1/self.d) / np.sqrt(ssq) + fr_tiled = np.tile( + fr.reshape(-1, 1), (1, self.d) # type: ignore[arg-type] + ) + p = center + np.multiply(x, fr_tiled) + return p + + def _hypersphere_surface_sample( + self, center: np.ndarray, radius: DecimalNumber, + candidates: IntNumber = 1 + ) -> np.ndarray: + """Uniform sampling on the hypersphere's surface.""" + vec = self.rng.standard_normal(size=(candidates, self.d)) + vec /= np.linalg.norm(vec, axis=1)[:, None] + p = center + np.multiply(vec, radius) + return p + + +class MultivariateNormalQMC: + r"""QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`. + + Parameters + ---------- + mean : array_like (d,) + The mean vector. Where ``d`` is the dimension. + cov : array_like (d, d), optional + The covariance matrix. If omitted, use `cov_root` instead. + If both `cov` and `cov_root` are omitted, use the identity matrix. + cov_root : array_like (d, d'), optional + A root decomposition of the covariance matrix, where ``d'`` may be less + than ``d`` if the covariance is not full rank. If omitted, use `cov`. + inv_transform : bool, optional + If True, use inverse transform instead of Box-Muller. Default is True. + engine : QMCEngine, optional + Quasi-Monte Carlo engine sampler. If None, `Sobol` is used. + seed : {None, int, `numpy.random.Generator`}, optional + Used only if `engine` is None. + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import qmc + >>> dist = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]]) + >>> sample = dist.random(512) + >>> _ = plt.scatter(sample[:, 0], sample[:, 1]) + >>> plt.show() + + """ + + def __init__( + self, mean: npt.ArrayLike, cov: npt.ArrayLike | None = None, *, + cov_root: npt.ArrayLike | None = None, + inv_transform: bool = True, + engine: QMCEngine | None = None, + seed: SeedType = None + ) -> None: + mean = np.asarray(np.atleast_1d(mean)) + d = mean.shape[0] + if cov is not None: + # covariance matrix provided + cov = np.asarray(np.atleast_2d(cov)) + # check for square/symmetric cov matrix and mean vector has the + # same d + if not mean.shape[0] == cov.shape[0]: + raise ValueError("Dimension mismatch between mean and " + "covariance.") + if not np.allclose(cov, cov.transpose()): + raise ValueError("Covariance matrix is not symmetric.") + # compute Cholesky decomp; if it fails, do the eigen decomposition + try: + cov_root = np.linalg.cholesky(cov).transpose() + except np.linalg.LinAlgError: + eigval, eigvec = np.linalg.eigh(cov) + if not np.all(eigval >= -1.0e-8): + raise ValueError("Covariance matrix not PSD.") + eigval = np.clip(eigval, 0.0, None) + cov_root = (eigvec * np.sqrt(eigval)).transpose() + elif cov_root is not None: + # root decomposition provided + cov_root = np.atleast_2d(cov_root) + if not mean.shape[0] == cov_root.shape[0]: + raise ValueError("Dimension mismatch between mean and " + "covariance.") + else: + # corresponds to identity covariance matrix + cov_root = None + + self._inv_transform = inv_transform + + if not inv_transform: + # to apply Box-Muller, we need an even number of dimensions + engine_dim = 2 * math.ceil(d / 2) + else: + engine_dim = d + if engine is None: + self.engine = Sobol( + d=engine_dim, scramble=True, bits=30, seed=seed + ) # type: QMCEngine + elif isinstance(engine, QMCEngine): + if engine.d != engine_dim: + raise ValueError("Dimension of `engine` must be consistent" + " with dimensions of mean and covariance." + " If `inv_transform` is False, it must be" + " an even number.") + self.engine = engine + else: + raise ValueError("`engine` must be an instance of " + "`scipy.stats.qmc.QMCEngine` or `None`.") + + self._mean = mean + self._corr_matrix = cov_root + + self._d = d + + def random(self, n: IntNumber = 1) -> np.ndarray: + """Draw `n` QMC samples from the multivariate Normal. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. Default is 1. + + Returns + ------- + sample : array_like (n, d) + Sample. + + """ + base_samples = self._standard_normal_samples(n) + return self._correlate(base_samples) + + def _correlate(self, base_samples: np.ndarray) -> np.ndarray: + if self._corr_matrix is not None: + return base_samples @ self._corr_matrix + self._mean + else: + # avoid multiplying with identity here + return base_samples + self._mean + + def _standard_normal_samples(self, n: IntNumber = 1) -> np.ndarray: + """Draw `n` QMC samples from the standard Normal :math:`N(0, I_d)`. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. Default is 1. + + Returns + ------- + sample : array_like (n, d) + Sample. + + """ + # get base samples + samples = self.engine.random(n) + if self._inv_transform: + # apply inverse transform + # (values to close to 0/1 result in inf values) + return stats.norm.ppf(0.5 + (1 - 1e-10) * (samples - 0.5)) # type: ignore[attr-defined] # noqa: E501 + else: + # apply Box-Muller transform (note: indexes starting from 1) + even = np.arange(0, samples.shape[-1], 2) + Rs = np.sqrt(-2 * np.log(samples[:, even])) + thetas = 2 * math.pi * samples[:, 1 + even] + cos = np.cos(thetas) + sin = np.sin(thetas) + transf_samples = np.stack([Rs * cos, Rs * sin], + -1).reshape(n, -1) + # make sure we only return the number of dimension requested + return transf_samples[:, : self._d] + + +class MultinomialQMC: + r"""QMC sampling from a multinomial distribution. + + Parameters + ---------- + pvals : array_like (k,) + Vector of probabilities of size ``k``, where ``k`` is the number + of categories. Elements must be non-negative and sum to 1. + n_trials : int + Number of trials. + engine : QMCEngine, optional + Quasi-Monte Carlo engine sampler. If None, `Sobol` is used. + seed : {None, int, `numpy.random.Generator`}, optional + Used only if `engine` is None. + If `seed` is an int or None, a new `numpy.random.Generator` is + created using ``np.random.default_rng(seed)``. + If `seed` is already a ``Generator`` instance, then the provided + instance is used. + + Examples + -------- + Let's define 3 categories and for a given sample, the sum of the trials + of each category is 8. The number of trials per category is determined + by the `pvals` associated to each category. + Then, we sample this distribution 64 times. + + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import qmc + >>> dist = qmc.MultinomialQMC( + ... pvals=[0.2, 0.4, 0.4], n_trials=10, engine=qmc.Halton(d=1) + ... ) + >>> sample = dist.random(64) + + We can plot the sample and verify that the median of number of trials + for each category is following the `pvals`. That would be + ``pvals * n_trials = [2, 4, 4]``. + + >>> fig, ax = plt.subplots() + >>> ax.yaxis.get_major_locator().set_params(integer=True) + >>> _ = ax.boxplot(sample) + >>> ax.set(xlabel="Categories", ylabel="Trials") + >>> plt.show() + + """ + + def __init__( + self, pvals: npt.ArrayLike, n_trials: IntNumber, + *, engine: QMCEngine | None = None, + seed: SeedType = None + ) -> None: + self.pvals = np.atleast_1d(np.asarray(pvals)) + if np.min(pvals) < 0: + raise ValueError('Elements of pvals must be non-negative.') + if not np.isclose(np.sum(pvals), 1): + raise ValueError('Elements of pvals must sum to 1.') + self.n_trials = n_trials + if engine is None: + self.engine = Sobol( + d=1, scramble=True, bits=30, seed=seed + ) # type: QMCEngine + elif isinstance(engine, QMCEngine): + if engine.d != 1: + raise ValueError("Dimension of `engine` must be 1.") + self.engine = engine + else: + raise ValueError("`engine` must be an instance of " + "`scipy.stats.qmc.QMCEngine` or `None`.") + + def random(self, n: IntNumber = 1) -> np.ndarray: + """Draw `n` QMC samples from the multinomial distribution. + + Parameters + ---------- + n : int, optional + Number of samples to generate in the parameter space. Default is 1. + + Returns + ------- + samples : array_like (n, pvals) + Sample. + + """ + sample = np.empty((n, len(self.pvals))) + for i in range(n): + base_draws = self.engine.random(self.n_trials).ravel() + p_cumulative = np.empty_like(self.pvals, dtype=float) + _fill_p_cumulative(np.array(self.pvals, dtype=float), p_cumulative) + sample_ = np.zeros_like(self.pvals, dtype=np.intp) + _categorize(base_draws, p_cumulative, sample_) + sample[i] = sample_ + return sample + + +def _select_optimizer( + optimization: Literal["random-cd", "lloyd"] | None, config: dict +) -> Callable | None: + """A factory for optimization methods.""" + optimization_method: dict[str, Callable] = { + "random-cd": _random_cd, + "lloyd": _lloyd_centroidal_voronoi_tessellation + } + + optimizer: partial | None + if optimization is not None: + try: + optimization = optimization.lower() # type: ignore[assignment] + optimizer_ = optimization_method[optimization] + except KeyError as exc: + message = (f"{optimization!r} is not a valid optimization" + f" method. It must be one of" + f" {set(optimization_method)!r}") + raise ValueError(message) from exc + + # config + optimizer = partial(optimizer_, **config) + else: + optimizer = None + + return optimizer + + +def _random_cd( + best_sample: np.ndarray, n_iters: int, n_nochange: int, rng: GeneratorType, + **kwargs: dict +) -> np.ndarray: + """Optimal LHS on CD. + + Create a base LHS and do random permutations of coordinates to + lower the centered discrepancy. + Because it starts with a normal LHS, it also works with the + `scramble` keyword argument. + + Two stopping criterion are used to stop the algorithm: at most, + `n_iters` iterations are performed; or if there is no improvement + for `n_nochange` consecutive iterations. + """ + del kwargs # only use keywords which are defined, needed by factory + + n, d = best_sample.shape + + if d == 0 or n == 0: + return np.empty((n, d)) + + if d == 1 or n == 1: + # discrepancy measures are invariant under permuting factors and runs + return best_sample + + best_disc = discrepancy(best_sample) + + bounds = ([0, d - 1], + [0, n - 1], + [0, n - 1]) + + n_nochange_ = 0 + n_iters_ = 0 + while n_nochange_ < n_nochange and n_iters_ < n_iters: + n_iters_ += 1 + + col = rng_integers(rng, *bounds[0], endpoint=True) # type: ignore[misc] + row_1 = rng_integers(rng, *bounds[1], endpoint=True) # type: ignore[misc] + row_2 = rng_integers(rng, *bounds[2], endpoint=True) # type: ignore[misc] + disc = _perturb_discrepancy(best_sample, + row_1, row_2, col, + best_disc) + if disc < best_disc: + best_sample[row_1, col], best_sample[row_2, col] = ( + best_sample[row_2, col], best_sample[row_1, col]) + + best_disc = disc + n_nochange_ = 0 + else: + n_nochange_ += 1 + + return best_sample + + +def _l1_norm(sample: np.ndarray) -> float: + return distance.pdist(sample, 'cityblock').min() + + +def _lloyd_iteration( + sample: np.ndarray, + decay: float, + qhull_options: str +) -> np.ndarray: + """Lloyd-Max algorithm iteration. + + Based on the implementation of Stéfan van der Walt: + + https://github.com/stefanv/lloyd + + which is: + + Copyright (c) 2021-04-21 Stéfan van der Walt + https://github.com/stefanv/lloyd + MIT License + + Parameters + ---------- + sample : array_like (n, d) + The sample to iterate on. + decay : float + Relaxation decay. A positive value would move the samples toward + their centroid, and negative value would move them away. + 1 would move the samples to their centroid. + qhull_options : str + Additional options to pass to Qhull. See Qhull manual + for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and + "Qbb Qc Qz Qj" otherwise.) + + Returns + ------- + sample : array_like (n, d) + The sample after an iteration of Lloyd's algorithm. + + """ + new_sample = np.empty_like(sample) + + voronoi = Voronoi(sample, qhull_options=qhull_options) + + for ii, idx in enumerate(voronoi.point_region): + # the region is a series of indices into self.voronoi.vertices + # remove samples at infinity, designated by index -1 + region = [i for i in voronoi.regions[idx] if i != -1] + + # get the vertices for this region + verts = voronoi.vertices[region] + + # clipping would be wrong, we need to intersect + # verts = np.clip(verts, 0, 1) + + # move samples towards centroids: + # Centroid in n-D is the mean for uniformly distributed nodes + # of a geometry. + centroid = np.mean(verts, axis=0) + new_sample[ii] = sample[ii] + (centroid - sample[ii]) * decay + + # only update sample to centroid within the region + is_valid = np.all(np.logical_and(new_sample >= 0, new_sample <= 1), axis=1) + sample[is_valid] = new_sample[is_valid] + + return sample + + +def _lloyd_centroidal_voronoi_tessellation( + sample: npt.ArrayLike, + *, + tol: DecimalNumber = 1e-5, + maxiter: IntNumber = 10, + qhull_options: str | None = None, + **kwargs: dict +) -> np.ndarray: + """Approximate Centroidal Voronoi Tessellation. + + Perturb samples in N-dimensions using Lloyd-Max algorithm. + + Parameters + ---------- + sample : array_like (n, d) + The sample to iterate on. With ``n`` the number of samples and ``d`` + the dimension. Samples must be in :math:`[0, 1]^d`, with ``d>=2``. + tol : float, optional + Tolerance for termination. If the min of the L1-norm over the samples + changes less than `tol`, it stops the algorithm. Default is 1e-5. + maxiter : int, optional + Maximum number of iterations. It will stop the algorithm even if + `tol` is above the threshold. + Too many iterations tend to cluster the samples as a hypersphere. + Default is 10. + qhull_options : str, optional + Additional options to pass to Qhull. See Qhull manual + for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and + "Qbb Qc Qz Qj" otherwise.) + + Returns + ------- + sample : array_like (n, d) + The sample after being processed by Lloyd-Max algorithm. + + Notes + ----- + Lloyd-Max algorithm is an iterative process with the purpose of improving + the dispersion of samples. For given sample: (i) compute a Voronoi + Tessellation; (ii) find the centroid of each Voronoi cell; (iii) move the + samples toward the centroid of their respective cell. See [1]_, [2]_. + + A relaxation factor is used to control how fast samples can move at each + iteration. This factor is starting at 2 and ending at 1 after `maxiter` + following an exponential decay. + + The process converges to equally spaced samples. It implies that measures + like the discrepancy could suffer from too many iterations. On the other + hand, L1 and L2 distances should improve. This is especially true with + QMC methods which tend to favor the discrepancy over other criteria. + + .. note:: + + The current implementation does not intersect the Voronoi Tessellation + with the boundaries. This implies that for a low number of samples, + empirically below 20, no Voronoi cell is touching the boundaries. + Hence, samples cannot be moved close to the boundaries. + + Further improvements could consider the samples at infinity so that + all boundaries are segments of some Voronoi cells. This would fix + the computation of the centroid position. + + .. warning:: + + The Voronoi Tessellation step is expensive and quickly becomes + intractable with dimensions as low as 10 even for a sample + of size as low as 1000. + + .. versionadded:: 1.9.0 + + References + ---------- + .. [1] Lloyd. "Least Squares Quantization in PCM". + IEEE Transactions on Information Theory, 1982. + .. [2] Max J. "Quantizing for minimum distortion". + IEEE Transactions on Information Theory, 1960. + + Examples + -------- + >>> import numpy as np + >>> from scipy.spatial import distance + >>> from scipy.stats._qmc import _lloyd_centroidal_voronoi_tessellation + >>> rng = np.random.default_rng() + >>> sample = rng.random((128, 2)) + + .. note:: + + The samples need to be in :math:`[0, 1]^d`. `scipy.stats.qmc.scale` + can be used to scale the samples from their + original bounds to :math:`[0, 1]^d`. And back to their original bounds. + + Compute the quality of the sample using the L1 criterion. + + >>> def l1_norm(sample): + ... return distance.pdist(sample, 'cityblock').min() + + >>> l1_norm(sample) + 0.00161... # random + + Now process the sample using Lloyd's algorithm and check the improvement + on the L1. The value should increase. + + >>> sample = _lloyd_centroidal_voronoi_tessellation(sample) + >>> l1_norm(sample) + 0.0278... # random + + """ + del kwargs # only use keywords which are defined, needed by factory + + sample = np.asarray(sample).copy() + + if not sample.ndim == 2: + raise ValueError('`sample` is not a 2D array') + + if not sample.shape[1] >= 2: + raise ValueError('`sample` dimension is not >= 2') + + # Checking that sample is within the hypercube + if (sample.max() > 1.) or (sample.min() < 0.): + raise ValueError('`sample` is not in unit hypercube') + + if qhull_options is None: + qhull_options = 'Qbb Qc Qz QJ' + + if sample.shape[1] >= 5: + qhull_options += ' Qx' + + # Fit an exponential to be 2 at 0 and 1 at `maxiter`. + # The decay is used for relaxation. + # analytical solution for y=exp(-maxiter/x) - 0.1 + root = -maxiter / np.log(0.1) + decay = [np.exp(-x / root)+0.9 for x in range(maxiter)] + + l1_old = _l1_norm(sample=sample) + for i in range(maxiter): + sample = _lloyd_iteration( + sample=sample, decay=decay[i], + qhull_options=qhull_options, + ) + + l1_new = _l1_norm(sample=sample) + + if abs(l1_new - l1_old) < tol: + break + else: + l1_old = l1_new + + return sample + + +def _validate_workers(workers: IntNumber = 1) -> IntNumber: + """Validate `workers` based on platform and value. + + Parameters + ---------- + workers : int, optional + Number of workers to use for parallel processing. If -1 is + given all CPU threads are used. Default is 1. + + Returns + ------- + Workers : int + Number of CPU used by the algorithm + + """ + workers = int(workers) + if workers == -1: + workers = os.cpu_count() # type: ignore[assignment] + if workers is None: + raise NotImplementedError( + "Cannot determine the number of cpus using os.cpu_count(), " + "cannot use -1 for the number of workers" + ) + elif workers <= 0: + raise ValueError(f"Invalid number of workers: {workers}, must be -1 " + "or > 0") + + return workers + + +def _validate_bounds( + l_bounds: npt.ArrayLike, u_bounds: npt.ArrayLike, d: int +) -> tuple[np.ndarray, ...]: + """Bounds input validation. + + Parameters + ---------- + l_bounds, u_bounds : array_like (d,) + Lower and upper bounds. + d : int + Dimension to use for broadcasting. + + Returns + ------- + l_bounds, u_bounds : array_like (d,) + Lower and upper bounds. + + """ + try: + lower = np.broadcast_to(l_bounds, d) + upper = np.broadcast_to(u_bounds, d) + except ValueError as exc: + msg = ("'l_bounds' and 'u_bounds' must be broadcastable and respect" + " the sample dimension") + raise ValueError(msg) from exc + + if not np.all(lower < upper): + raise ValueError("Bounds are not consistent 'l_bounds' < 'u_bounds'") + + return lower, upper diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..685573c90d9e2f2d74936996dd276348e7fcd48c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_qmvnt.py b/venv/lib/python3.10/site-packages/scipy/stats/_qmvnt.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e9c5ebb3cba91e0bfa5e600a1a04d2459280ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_qmvnt.py @@ -0,0 +1,533 @@ +# Integration of multivariate normal and t distributions. + +# Adapted from the MATLAB original implementations by Dr. Alan Genz. + +# http://www.math.wsu.edu/faculty/genz/software/software.html + +# Copyright (C) 2013, Alan Genz, All rights reserved. +# Python implementation is copyright (C) 2022, Robert Kern, All rights +# reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided the following conditions are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# 3. The contributor name(s) may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import numpy as np + +from scipy.fft import fft, ifft +from scipy.special import gammaincinv, ndtr, ndtri +from scipy.stats._qmc import primes_from_2_to + + +phi = ndtr +phinv = ndtri + + +def _factorize_int(n): + """Return a sorted list of the unique prime factors of a positive integer. + """ + # NOTE: There are lots faster ways to do this, but this isn't terrible. + factors = set() + for p in primes_from_2_to(int(np.sqrt(n)) + 1): + while not (n % p): + factors.add(p) + n //= p + if n == 1: + break + if n != 1: + factors.add(n) + return sorted(factors) + + +def _primitive_root(p): + """Compute a primitive root of the prime number `p`. + + Used in the CBC lattice construction. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Primitive_root_modulo_n + """ + # p is prime + pm = p - 1 + factors = _factorize_int(pm) + n = len(factors) + r = 2 + k = 0 + while k < n: + d = pm // factors[k] + # pow() doesn't like numpy scalar types. + rd = pow(int(r), int(d), int(p)) + if rd == 1: + r += 1 + k = 0 + else: + k += 1 + return r + + +def _cbc_lattice(n_dim, n_qmc_samples): + """Compute a QMC lattice generator using a Fast CBC construction. + + Parameters + ---------- + n_dim : int > 0 + The number of dimensions for the lattice. + n_qmc_samples : int > 0 + The desired number of QMC samples. This will be rounded down to the + nearest prime to enable the CBC construction. + + Returns + ------- + q : float array : shape=(n_dim,) + The lattice generator vector. All values are in the open interval + `(0, 1)`. + actual_n_qmc_samples : int + The prime number of QMC samples that must be used with this lattice, + no more, no less. + + References + ---------- + .. [1] Nuyens, D. and Cools, R. "Fast Component-by-Component Construction, + a Reprise for Different Kernels", In H. Niederreiter and D. Talay, + editors, Monte-Carlo and Quasi-Monte Carlo Methods 2004, + Springer-Verlag, 2006, 371-385. + """ + # Round down to the nearest prime number. + primes = primes_from_2_to(n_qmc_samples + 1) + n_qmc_samples = primes[-1] + + bt = np.ones(n_dim) + gm = np.hstack([1.0, 0.8 ** np.arange(n_dim - 1)]) + q = 1 + w = 0 + z = np.arange(1, n_dim + 1) + m = (n_qmc_samples - 1) // 2 + g = _primitive_root(n_qmc_samples) + # Slightly faster way to compute perm[j] = pow(g, j, n_qmc_samples) + # Shame that we don't have modulo pow() implemented as a ufunc. + perm = np.ones(m, dtype=int) + for j in range(m - 1): + perm[j + 1] = (g * perm[j]) % n_qmc_samples + perm = np.minimum(n_qmc_samples - perm, perm) + pn = perm / n_qmc_samples + c = pn * pn - pn + 1.0 / 6 + fc = fft(c) + for s in range(1, n_dim): + reordered = np.hstack([ + c[:w+1][::-1], + c[w+1:m][::-1], + ]) + q = q * (bt[s-1] + gm[s-1] * reordered) + w = ifft(fc * fft(q)).real.argmin() + z[s] = perm[w] + q = z / n_qmc_samples + return q, n_qmc_samples + + +# Note: this function is not currently used or tested by any SciPy code. It is +# included in this file to facilitate the development of a parameter for users +# to set the desired CDF accuracy, but must be reviewed and tested before use. +def _qauto(func, covar, low, high, rng, error=1e-3, limit=10_000, **kwds): + """Automatically rerun the integration to get the required error bound. + + Parameters + ---------- + func : callable + Either :func:`_qmvn` or :func:`_qmvt`. + covar, low, high : array + As specified in :func:`_qmvn` and :func:`_qmvt`. + rng : Generator, optional + default_rng(), yada, yada + error : float > 0 + The desired error bound. + limit : int > 0: + The rough limit of the number of integration points to consider. The + integration will stop looping once this limit has been *exceeded*. + **kwds : + Other keyword arguments to pass to `func`. When using :func:`_qmvt`, be + sure to include ``nu=`` as one of these. + + Returns + ------- + prob : float + The estimated probability mass within the bounds. + est_error : float + 3 times the standard error of the batch estimates. + n_samples : int + The number of integration points actually used. + """ + n = len(covar) + n_samples = 0 + if n == 1: + prob = phi(high) - phi(low) + # More or less + est_error = 1e-15 + else: + mi = min(limit, n * 1000) + prob = 0.0 + est_error = 1.0 + ei = 0.0 + while est_error > error and n_samples < limit: + mi = round(np.sqrt(2) * mi) + pi, ei, ni = func(mi, covar, low, high, rng=rng, **kwds) + n_samples += ni + wt = 1.0 / (1 + (ei / est_error)**2) + prob += wt * (pi - prob) + est_error = np.sqrt(wt) * ei + return prob, est_error, n_samples + + +# Note: this function is not currently used or tested by any SciPy code. It is +# included in this file to facilitate the resolution of gh-8367, gh-16142, and +# possibly gh-14286, but must be reviewed and tested before use. +def _qmvn(m, covar, low, high, rng, lattice='cbc', n_batches=10): + """Multivariate normal integration over box bounds. + + Parameters + ---------- + m : int > n_batches + The number of points to sample. This number will be divided into + `n_batches` batches that apply random offsets of the sampling lattice + for each batch in order to estimate the error. + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + rng : Generator, optional + default_rng(), yada, yada + lattice : 'cbc' or callable + The type of lattice rule to use to construct the integration points. + n_batches : int > 0, optional + The number of QMC batches to apply. + + Returns + ------- + prob : float + The estimated probability mass within the bounds. + est_error : float + 3 times the standard error of the batch estimates. + """ + cho, lo, hi = _permuted_cholesky(covar, low, high) + n = cho.shape[0] + ct = cho[0, 0] + c = phi(lo[0] / ct) + d = phi(hi[0] / ct) + ci = c + dci = d - ci + prob = 0.0 + error_var = 0.0 + q, n_qmc_samples = _cbc_lattice(n - 1, max(m // n_batches, 1)) + y = np.zeros((n - 1, n_qmc_samples)) + i_samples = np.arange(n_qmc_samples) + 1 + for j in range(n_batches): + c = np.full(n_qmc_samples, ci) + dc = np.full(n_qmc_samples, dci) + pv = dc.copy() + for i in range(1, n): + # Pseudorandomly-shifted lattice coordinate. + z = q[i - 1] * i_samples + rng.random() + # Fast remainder(z, 1.0) + z -= z.astype(int) + # Tent periodization transform. + x = abs(2 * z - 1) + y[i - 1, :] = phinv(c + x * dc) + s = cho[i, :i] @ y[:i, :] + ct = cho[i, i] + c = phi((lo[i] - s) / ct) + d = phi((hi[i] - s) / ct) + dc = d - c + pv = pv * dc + # Accumulate the mean and error variances with online formulations. + d = (pv.mean() - prob) / (j + 1) + prob += d + error_var = (j - 1) * error_var / (j + 1) + d * d + # Error bounds are 3 times the standard error of the estimates. + est_error = 3 * np.sqrt(error_var) + n_samples = n_qmc_samples * n_batches + return prob, est_error, n_samples + + +# Note: this function is not currently used or tested by any SciPy code. It is +# included in this file to facilitate the resolution of gh-8367, gh-16142, and +# possibly gh-14286, but must be reviewed and tested before use. +def _mvn_qmc_integrand(covar, low, high, use_tent=False): + """Transform the multivariate normal integration into a QMC integrand over + a unit hypercube. + + The dimensionality of the resulting hypercube integration domain is one + less than the dimensionality of the original integrand. Note that this + transformation subsumes the integration bounds in order to account for + infinite bounds. The QMC integration one does with the returned integrand + should be on the unit hypercube. + + Parameters + ---------- + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + use_tent : bool, optional + If True, then use tent periodization. Only helpful for lattice rules. + + Returns + ------- + integrand : Callable[[NDArray], NDArray] + The QMC-integrable integrand. It takes an + ``(n_qmc_samples, ndim_integrand)`` array of QMC samples in the unit + hypercube and returns the ``(n_qmc_samples,)`` evaluations of at these + QMC points. + ndim_integrand : int + The dimensionality of the integrand. Equal to ``n-1``. + """ + cho, lo, hi = _permuted_cholesky(covar, low, high) + n = cho.shape[0] + ndim_integrand = n - 1 + ct = cho[0, 0] + c = phi(lo[0] / ct) + d = phi(hi[0] / ct) + ci = c + dci = d - ci + + def integrand(*zs): + ndim_qmc = len(zs) + n_qmc_samples = len(np.atleast_1d(zs[0])) + assert ndim_qmc == ndim_integrand + y = np.zeros((ndim_qmc, n_qmc_samples)) + c = np.full(n_qmc_samples, ci) + dc = np.full(n_qmc_samples, dci) + pv = dc.copy() + for i in range(1, n): + if use_tent: + # Tent periodization transform. + x = abs(2 * zs[i-1] - 1) + else: + x = zs[i-1] + y[i - 1, :] = phinv(c + x * dc) + s = cho[i, :i] @ y[:i, :] + ct = cho[i, i] + c = phi((lo[i] - s) / ct) + d = phi((hi[i] - s) / ct) + dc = d - c + pv = pv * dc + return pv + + return integrand, ndim_integrand + + +def _qmvt(m, nu, covar, low, high, rng, lattice='cbc', n_batches=10): + """Multivariate t integration over box bounds. + + Parameters + ---------- + m : int > n_batches + The number of points to sample. This number will be divided into + `n_batches` batches that apply random offsets of the sampling lattice + for each batch in order to estimate the error. + nu : float >= 0 + The shape parameter of the multivariate t distribution. + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + rng : Generator, optional + default_rng(), yada, yada + lattice : 'cbc' or callable + The type of lattice rule to use to construct the integration points. + n_batches : int > 0, optional + The number of QMC batches to apply. + + Returns + ------- + prob : float + The estimated probability mass within the bounds. + est_error : float + 3 times the standard error of the batch estimates. + n_samples : int + The number of samples actually used. + """ + sn = max(1.0, np.sqrt(nu)) + low = np.asarray(low, dtype=np.float64) + high = np.asarray(high, dtype=np.float64) + cho, lo, hi = _permuted_cholesky(covar, low / sn, high / sn) + n = cho.shape[0] + prob = 0.0 + error_var = 0.0 + q, n_qmc_samples = _cbc_lattice(n, max(m // n_batches, 1)) + i_samples = np.arange(n_qmc_samples) + 1 + for j in range(n_batches): + pv = np.ones(n_qmc_samples) + s = np.zeros((n, n_qmc_samples)) + for i in range(n): + # Pseudorandomly-shifted lattice coordinate. + z = q[i] * i_samples + rng.random() + # Fast remainder(z, 1.0) + z -= z.astype(int) + # Tent periodization transform. + x = abs(2 * z - 1) + # FIXME: Lift the i==0 case out of the loop to make the logic + # easier to follow. + if i == 0: + # We'll use one of the QR variates to pull out the + # t-distribution scaling. + if nu > 0: + r = np.sqrt(2 * gammaincinv(nu / 2, x)) + else: + r = np.ones_like(x) + else: + y = phinv(c + x * dc) # noqa: F821 + with np.errstate(invalid='ignore'): + s[i:, :] += cho[i:, i - 1][:, np.newaxis] * y + si = s[i, :] + + c = np.ones(n_qmc_samples) + d = np.ones(n_qmc_samples) + with np.errstate(invalid='ignore'): + lois = lo[i] * r - si + hiis = hi[i] * r - si + c[lois < -9] = 0.0 + d[hiis < -9] = 0.0 + lo_mask = abs(lois) < 9 + hi_mask = abs(hiis) < 9 + c[lo_mask] = phi(lois[lo_mask]) + d[hi_mask] = phi(hiis[hi_mask]) + + dc = d - c + pv *= dc + + # Accumulate the mean and error variances with online formulations. + d = (pv.mean() - prob) / (j + 1) + prob += d + error_var = (j - 1) * error_var / (j + 1) + d * d + # Error bounds are 3 times the standard error of the estimates. + est_error = 3 * np.sqrt(error_var) + n_samples = n_qmc_samples * n_batches + return prob, est_error, n_samples + + +def _permuted_cholesky(covar, low, high, tol=1e-10): + """Compute a scaled, permuted Cholesky factor, with integration bounds. + + The scaling and permuting of the dimensions accomplishes part of the + transformation of the original integration problem into a more numerically + tractable form. The lower-triangular Cholesky factor will then be used in + the subsequent integration. The integration bounds will be scaled and + permuted as well. + + Parameters + ---------- + covar : (n, n) float array + Possibly singular, positive semidefinite symmetric covariance matrix. + low, high : (n,) float array + The low and high integration bounds. + tol : float, optional + The singularity tolerance. + + Returns + ------- + cho : (n, n) float array + Lower Cholesky factor, scaled and permuted. + new_low, new_high : (n,) float array + The scaled and permuted low and high integration bounds. + """ + # Make copies for outputting. + cho = np.array(covar, dtype=np.float64) + new_lo = np.array(low, dtype=np.float64) + new_hi = np.array(high, dtype=np.float64) + n = cho.shape[0] + if cho.shape != (n, n): + raise ValueError("expected a square symmetric array") + if new_lo.shape != (n,) or new_hi.shape != (n,): + raise ValueError( + "expected integration boundaries the same dimensions " + "as the covariance matrix" + ) + # Scale by the sqrt of the diagonal. + dc = np.sqrt(np.maximum(np.diag(cho), 0.0)) + # But don't divide by 0. + dc[dc == 0.0] = 1.0 + new_lo /= dc + new_hi /= dc + cho /= dc + cho /= dc[:, np.newaxis] + + y = np.zeros(n) + sqtp = np.sqrt(2 * np.pi) + for k in range(n): + epk = (k + 1) * tol + im = k + ck = 0.0 + dem = 1.0 + s = 0.0 + lo_m = 0.0 + hi_m = 0.0 + for i in range(k, n): + if cho[i, i] > tol: + ci = np.sqrt(cho[i, i]) + if i > 0: + s = cho[i, :k] @ y[:k] + lo_i = (new_lo[i] - s) / ci + hi_i = (new_hi[i] - s) / ci + de = phi(hi_i) - phi(lo_i) + if de <= dem: + ck = ci + dem = de + lo_m = lo_i + hi_m = hi_i + im = i + if im > k: + # Swap im and k + cho[im, im] = cho[k, k] + _swap_slices(cho, np.s_[im, :k], np.s_[k, :k]) + _swap_slices(cho, np.s_[im + 1:, im], np.s_[im + 1:, k]) + _swap_slices(cho, np.s_[k + 1:im, k], np.s_[im, k + 1:im]) + _swap_slices(new_lo, k, im) + _swap_slices(new_hi, k, im) + if ck > epk: + cho[k, k] = ck + cho[k, k + 1:] = 0.0 + for i in range(k + 1, n): + cho[i, k] /= ck + cho[i, k + 1:i + 1] -= cho[i, k] * cho[k + 1:i + 1, k] + if abs(dem) > tol: + y[k] = ((np.exp(-lo_m * lo_m / 2) - np.exp(-hi_m * hi_m / 2)) / + (sqtp * dem)) + else: + y[k] = (lo_m + hi_m) / 2 + if lo_m < -10: + y[k] = hi_m + elif hi_m > 10: + y[k] = lo_m + cho[k, :k + 1] /= ck + new_lo[k] /= ck + new_hi[k] /= ck + else: + cho[k:, k] = 0.0 + y[k] = (new_lo[k] + new_hi[k]) / 2 + return cho, new_lo, new_hi + + +def _swap_slices(x, slc1, slc2): + t = x[slc1].copy() + x[slc1] = x[slc2].copy() + x[slc2] = t diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_relative_risk.py b/venv/lib/python3.10/site-packages/scipy/stats/_relative_risk.py new file mode 100644 index 0000000000000000000000000000000000000000..51525fd28adb37c72b12106450e4178c786091b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_relative_risk.py @@ -0,0 +1,263 @@ +import operator +from dataclasses import dataclass +import numpy as np +from scipy.special import ndtri +from ._common import ConfidenceInterval + + +def _validate_int(n, bound, name): + msg = f'{name} must be an integer not less than {bound}, but got {n!r}' + try: + n = operator.index(n) + except TypeError: + raise TypeError(msg) from None + if n < bound: + raise ValueError(msg) + return n + + +@dataclass +class RelativeRiskResult: + """ + Result of `scipy.stats.contingency.relative_risk`. + + Attributes + ---------- + relative_risk : float + This is:: + + (exposed_cases/exposed_total) / (control_cases/control_total) + + exposed_cases : int + The number of "cases" (i.e. occurrence of disease or other event + of interest) among the sample of "exposed" individuals. + exposed_total : int + The total number of "exposed" individuals in the sample. + control_cases : int + The number of "cases" among the sample of "control" or non-exposed + individuals. + control_total : int + The total number of "control" individuals in the sample. + + Methods + ------- + confidence_interval : + Compute the confidence interval for the relative risk estimate. + """ + + relative_risk: float + exposed_cases: int + exposed_total: int + control_cases: int + control_total: int + + def confidence_interval(self, confidence_level=0.95): + """ + Compute the confidence interval for the relative risk. + + The confidence interval is computed using the Katz method + (i.e. "Method C" of [1]_; see also [2]_, section 3.1.2). + + Parameters + ---------- + confidence_level : float, optional + The confidence level to use for the confidence interval. + Default is 0.95. + + Returns + ------- + ci : ConfidenceInterval instance + The return value is an object with attributes ``low`` and + ``high`` that hold the confidence interval. + + References + ---------- + .. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining + confidence intervals for the risk ratio in cohort studies", + Biometrics, 34, 469-474 (1978). + .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology, + CRC Press LLC, Boca Raton, FL, USA (1996). + + + Examples + -------- + >>> from scipy.stats.contingency import relative_risk + >>> result = relative_risk(exposed_cases=10, exposed_total=75, + ... control_cases=12, control_total=225) + >>> result.relative_risk + 2.5 + >>> result.confidence_interval() + ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033) + """ + if not 0 <= confidence_level <= 1: + raise ValueError('confidence_level must be in the interval ' + '[0, 1].') + + # Handle edge cases where either exposed_cases or control_cases + # is zero. We follow the convention of the R function riskratio + # from the epitools library. + if self.exposed_cases == 0 and self.control_cases == 0: + # relative risk is nan. + return ConfidenceInterval(low=np.nan, high=np.nan) + elif self.exposed_cases == 0: + # relative risk is 0. + return ConfidenceInterval(low=0.0, high=np.nan) + elif self.control_cases == 0: + # relative risk is inf + return ConfidenceInterval(low=np.nan, high=np.inf) + + alpha = 1 - confidence_level + z = ndtri(1 - alpha/2) + rr = self.relative_risk + + # Estimate of the variance of log(rr) is + # var(log(rr)) = 1/exposed_cases - 1/exposed_total + + # 1/control_cases - 1/control_total + # and the standard error is the square root of that. + se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total + + 1/self.control_cases - 1/self.control_total) + delta = z*se + katz_lo = rr*np.exp(-delta) + katz_hi = rr*np.exp(delta) + return ConfidenceInterval(low=katz_lo, high=katz_hi) + + +def relative_risk(exposed_cases, exposed_total, control_cases, control_total): + """ + Compute the relative risk (also known as the risk ratio). + + This function computes the relative risk associated with a 2x2 + contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead + of accepting a table as an argument, the individual numbers that are + used to compute the relative risk are given as separate parameters. + This is to avoid the ambiguity of which row or column of the contingency + table corresponds to the "exposed" cases and which corresponds to the + "control" cases. Unlike, say, the odds ratio, the relative risk is not + invariant under an interchange of the rows or columns. + + Parameters + ---------- + exposed_cases : nonnegative int + The number of "cases" (i.e. occurrence of disease or other event + of interest) among the sample of "exposed" individuals. + exposed_total : positive int + The total number of "exposed" individuals in the sample. + control_cases : nonnegative int + The number of "cases" among the sample of "control" or non-exposed + individuals. + control_total : positive int + The total number of "control" individuals in the sample. + + Returns + ------- + result : instance of `~scipy.stats._result_classes.RelativeRiskResult` + The object has the float attribute ``relative_risk``, which is:: + + rr = (exposed_cases/exposed_total) / (control_cases/control_total) + + The object also has the method ``confidence_interval`` to compute + the confidence interval of the relative risk for a given confidence + level. + + See Also + -------- + odds_ratio + + Notes + ----- + The R package epitools has the function `riskratio`, which accepts + a table with the following layout:: + + disease=0 disease=1 + exposed=0 (ref) n00 n01 + exposed=1 n10 n11 + + With a 2x2 table in the above format, the estimate of the CI is + computed by `riskratio` when the argument method="wald" is given, + or with the function `riskratio.wald`. + + For example, in a test of the incidence of lung cancer among a + sample of smokers and nonsmokers, the "exposed" category would + correspond to "is a smoker" and the "disease" category would + correspond to "has or had lung cancer". + + To pass the same data to ``relative_risk``, use:: + + relative_risk(n11, n10 + n11, n01, n00 + n01) + + .. versionadded:: 1.7.0 + + References + ---------- + .. [1] Alan Agresti, An Introduction to Categorical Data Analysis + (second edition), Wiley, Hoboken, NJ, USA (2007). + .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology, + CRC Press LLC, Boca Raton, FL, USA (1996). + + Examples + -------- + >>> from scipy.stats.contingency import relative_risk + + This example is from Example 3.1 of [2]_. The results of a heart + disease study are summarized in the following table:: + + High CAT Low CAT Total + -------- ------- ----- + CHD 27 44 71 + No CHD 95 443 538 + + Total 122 487 609 + + CHD is coronary heart disease, and CAT refers to the level of + circulating catecholamine. CAT is the "exposure" variable, and + high CAT is the "exposed" category. So the data from the table + to be passed to ``relative_risk`` is:: + + exposed_cases = 27 + exposed_total = 122 + control_cases = 44 + control_total = 487 + + >>> result = relative_risk(27, 122, 44, 487) + >>> result.relative_risk + 2.4495156482861398 + + Find the confidence interval for the relative risk. + + >>> result.confidence_interval(confidence_level=0.95) + ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354) + + The interval does not contain 1, so the data supports the statement + that high CAT is associated with greater risk of CHD. + """ + # Relative risk is a trivial calculation. The nontrivial part is in the + # `confidence_interval` method of the RelativeRiskResult class. + + exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases") + exposed_total = _validate_int(exposed_total, 1, "exposed_total") + control_cases = _validate_int(control_cases, 0, "control_cases") + control_total = _validate_int(control_total, 1, "control_total") + + if exposed_cases > exposed_total: + raise ValueError('exposed_cases must not exceed exposed_total.') + if control_cases > control_total: + raise ValueError('control_cases must not exceed control_total.') + + if exposed_cases == 0 and control_cases == 0: + # relative risk is 0/0. + rr = np.nan + elif exposed_cases == 0: + # relative risk is 0/nonzero + rr = 0.0 + elif control_cases == 0: + # relative risk is nonzero/0. + rr = np.inf + else: + p1 = exposed_cases / exposed_total + p2 = control_cases / control_total + rr = p1 / p2 + return RelativeRiskResult(relative_risk=rr, + exposed_cases=exposed_cases, + exposed_total=exposed_total, + control_cases=control_cases, + control_total=control_total) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_resampling.py b/venv/lib/python3.10/site-packages/scipy/stats/_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..2e8184ea1686abbcf79f39cc1314b060e2bb5ed9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_resampling.py @@ -0,0 +1,1870 @@ +from __future__ import annotations + +import warnings +import numpy as np +from itertools import combinations, permutations, product +from collections.abc import Sequence +import inspect + +from scipy._lib._util import check_random_state, _rename_parameter +from scipy.special import ndtr, ndtri, comb, factorial +from scipy._lib._util import rng_integers +from dataclasses import dataclass +from ._common import ConfidenceInterval +from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays +from ._warnings_errors import DegenerateDataWarning + +__all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test'] + + +def _vectorize_statistic(statistic): + """Vectorize an n-sample statistic""" + # This is a little cleaner than np.nditer at the expense of some data + # copying: concatenate samples together, then use np.apply_along_axis + def stat_nd(*data, axis=0): + lengths = [sample.shape[axis] for sample in data] + split_indices = np.cumsum(lengths)[:-1] + z = _broadcast_concatenate(data, axis) + + # move working axis to position 0 so that new dimensions in the output + # of `statistic` are _prepended_. ("This axis is removed, and replaced + # with new dimensions...") + z = np.moveaxis(z, axis, 0) + + def stat_1d(z): + data = np.split(z, split_indices) + return statistic(*data) + + return np.apply_along_axis(stat_1d, 0, z)[()] + return stat_nd + + +def _jackknife_resample(sample, batch=None): + """Jackknife resample the sample. Only one-sample stats for now.""" + n = sample.shape[-1] + batch_nominal = batch or n + + for k in range(0, n, batch_nominal): + # col_start:col_end are the observations to remove + batch_actual = min(batch_nominal, n-k) + + # jackknife - each row leaves out one observation + j = np.ones((batch_actual, n), dtype=bool) + np.fill_diagonal(j[:, k:k+batch_actual], False) + i = np.arange(n) + i = np.broadcast_to(i, (batch_actual, n)) + i = i[j].reshape((batch_actual, n-1)) + + resamples = sample[..., i] + yield resamples + + +def _bootstrap_resample(sample, n_resamples=None, random_state=None): + """Bootstrap resample the sample.""" + n = sample.shape[-1] + + # bootstrap - each row is a random resample of original observations + i = rng_integers(random_state, 0, n, (n_resamples, n)) + + resamples = sample[..., i] + return resamples + + +def _percentile_of_score(a, score, axis): + """Vectorized, simplified `scipy.stats.percentileofscore`. + Uses logic of the 'mean' value of percentileofscore's kind parameter. + + Unlike `stats.percentileofscore`, the percentile returned is a fraction + in [0, 1]. + """ + B = a.shape[axis] + return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B) + + +def _percentile_along_axis(theta_hat_b, alpha): + """`np.percentile` with different percentile for each slice.""" + # the difference between _percentile_along_axis and np.percentile is that + # np.percentile gets _all_ the qs for each axis slice, whereas + # _percentile_along_axis gets the q corresponding with each axis slice + shape = theta_hat_b.shape[:-1] + alpha = np.broadcast_to(alpha, shape) + percentiles = np.zeros_like(alpha, dtype=np.float64) + for indices, alpha_i in np.ndenumerate(alpha): + if np.isnan(alpha_i): + # e.g. when bootstrap distribution has only one unique element + msg = ( + "The BCa confidence interval cannot be calculated." + " This problem is known to occur when the distribution" + " is degenerate or the statistic is np.min." + ) + warnings.warn(DegenerateDataWarning(msg), stacklevel=3) + percentiles[indices] = np.nan + else: + theta_hat_b_i = theta_hat_b[indices] + percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i) + return percentiles[()] # return scalar instead of 0d array + + +def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch): + """Bias-corrected and accelerated interval.""" + # closely follows [1] 14.3 and 15.4 (Eq. 15.36) + + # calculate z0_hat + theta_hat = np.asarray(statistic(*data, axis=axis))[..., None] + percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1) + z0_hat = ndtri(percentile) + + # calculate a_hat + theta_hat_ji = [] # j is for sample of data, i is for jackknife resample + for j, sample in enumerate(data): + # _jackknife_resample will add an axis prior to the last axis that + # corresponds with the different jackknife resamples. Do the same for + # each sample of the data to ensure broadcastability. We need to + # create a copy of the list containing the samples anyway, so do this + # in the loop to simplify the code. This is not the bottleneck... + samples = [np.expand_dims(sample, -2) for sample in data] + theta_hat_i = [] + for jackknife_sample in _jackknife_resample(sample, batch): + samples[j] = jackknife_sample + broadcasted = _broadcast_arrays(samples, axis=-1) + theta_hat_i.append(statistic(*broadcasted, axis=-1)) + theta_hat_ji.append(theta_hat_i) + + theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1) + for theta_hat_i in theta_hat_ji] + + n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji] + + theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True) + for theta_hat_i in theta_hat_ji] + + U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i) + for theta_hat_dot, theta_hat_i, n + in zip(theta_hat_j_dot, theta_hat_ji, n_j)] + + nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)] + dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)] + a_hat = 1/6 * sum(nums) / sum(dens)**(3/2) + + # calculate alpha_1, alpha_2 + z_alpha = ndtri(alpha) + z_1alpha = -z_alpha + num1 = z0_hat + z_alpha + alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1)) + num2 = z0_hat + z_1alpha + alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2)) + return alpha_1, alpha_2, a_hat # return a_hat for testing + + +def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level, + alternative, n_resamples, batch, method, bootstrap_result, + random_state): + """Input validation and standardization for `bootstrap`.""" + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic = _vectorize_statistic(statistic) + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + n_samples = 0 + try: + n_samples = len(data) + except TypeError: + raise ValueError("`data` must be a sequence of samples.") + + if n_samples == 0: + raise ValueError("`data` must contain at least one sample.") + + data_iv = [] + for sample in data: + sample = np.atleast_1d(sample) + if sample.shape[axis_int] <= 1: + raise ValueError("each sample in `data` must contain two or more " + "observations along `axis`.") + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + if paired not in {True, False}: + raise ValueError("`paired` must be `True` or `False`.") + + if paired: + n = data_iv[0].shape[-1] + for sample in data_iv[1:]: + if sample.shape[-1] != n: + message = ("When `paired is True`, all samples must have the " + "same length along `axis`") + raise ValueError(message) + + # to generate the bootstrap distribution for paired-sample statistics, + # resample the indices of the observations + def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic): + data = [sample[..., i] for sample in data] + return unpaired_statistic(*data, axis=axis) + + data_iv = [np.arange(n)] + + confidence_level_float = float(confidence_level) + + alternative = alternative.lower() + alternatives = {'two-sided', 'less', 'greater'} + if alternative not in alternatives: + raise ValueError(f"`alternative` must be one of {alternatives}") + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int < 0: + raise ValueError("`n_resamples` must be a non-negative integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + methods = {'percentile', 'basic', 'bca'} + method = method.lower() + if method not in methods: + raise ValueError(f"`method` must be in {methods}") + + message = "`bootstrap_result` must have attribute `bootstrap_distribution'" + if (bootstrap_result is not None + and not hasattr(bootstrap_result, "bootstrap_distribution")): + raise ValueError(message) + + message = ("Either `bootstrap_result.bootstrap_distribution.size` or " + "`n_resamples` must be positive.") + if ((not bootstrap_result or + not bootstrap_result.bootstrap_distribution.size) + and n_resamples_int == 0): + raise ValueError(message) + + random_state = check_random_state(random_state) + + return (data_iv, statistic, vectorized, paired, axis_int, + confidence_level_float, alternative, n_resamples_int, batch_iv, + method, bootstrap_result, random_state) + + +@dataclass +class BootstrapResult: + """Result object returned by `scipy.stats.bootstrap`. + + Attributes + ---------- + confidence_interval : ConfidenceInterval + The bootstrap confidence interval as an instance of + `collections.namedtuple` with attributes `low` and `high`. + bootstrap_distribution : ndarray + The bootstrap distribution, that is, the value of `statistic` for + each resample. The last dimension corresponds with the resamples + (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). + standard_error : float or ndarray + The bootstrap standard error, that is, the sample standard + deviation of the bootstrap distribution. + + """ + confidence_interval: ConfidenceInterval + bootstrap_distribution: np.ndarray + standard_error: float | np.ndarray + + +def bootstrap(data, statistic, *, n_resamples=9999, batch=None, + vectorized=None, paired=False, axis=0, confidence_level=0.95, + alternative='two-sided', method='BCa', bootstrap_result=None, + random_state=None): + r""" + Compute a two-sided bootstrap confidence interval of a statistic. + + When `method` is ``'percentile'`` and `alternative` is ``'two-sided'``, + a bootstrap confidence interval is computed according to the following + procedure. + + 1. Resample the data: for each sample in `data` and for each of + `n_resamples`, take a random sample of the original sample + (with replacement) of the same size as the original sample. + + 2. Compute the bootstrap distribution of the statistic: for each set of + resamples, compute the test statistic. + + 3. Determine the confidence interval: find the interval of the bootstrap + distribution that is + + - symmetric about the median and + - contains `confidence_level` of the resampled statistic values. + + While the ``'percentile'`` method is the most intuitive, it is rarely + used in practice. Two more common methods are available, ``'basic'`` + ('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated'); + they differ in how step 3 is performed. + + If the samples in `data` are taken at random from their respective + distributions :math:`n` times, the confidence interval returned by + `bootstrap` will contain the true value of the statistic for those + distributions approximately `confidence_level`:math:`\, \times \, n` times. + + Parameters + ---------- + data : sequence of array-like + Each element of data is a sample from an underlying distribution. + statistic : callable + Statistic for which the confidence interval is to be calculated. + `statistic` must be a callable that accepts ``len(data)`` samples + as separate arguments and returns the resulting statistic. + If `vectorized` is set ``True``, + `statistic` must also accept a keyword argument `axis` and be + vectorized to compute the statistic along the provided `axis`. + n_resamples : int, default: ``9999`` + The number of resamples performed to form the bootstrap distribution + of the statistic. + batch : int, optional + The number of resamples to process in each vectorized call to + `statistic`. Memory usage is O( `batch` * ``n`` ), where ``n`` is the + sample size. Default is ``None``, in which case ``batch = n_resamples`` + (or ``batch = max(n_resamples, n)`` for ``method='BCa'``). + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed an ND sample array. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of + a vectorized statistic typically reduces computation time. + paired : bool, default: ``False`` + Whether the statistic treats corresponding elements of the samples + in `data` as paired. + axis : int, default: ``0`` + The axis of the samples in `data` along which the `statistic` is + calculated. + confidence_level : float, default: ``0.95`` + The confidence level of the confidence interval. + alternative : {'two-sided', 'less', 'greater'}, default: ``'two-sided'`` + Choose ``'two-sided'`` (default) for a two-sided confidence interval, + ``'less'`` for a one-sided confidence interval with the lower bound + at ``-np.inf``, and ``'greater'`` for a one-sided confidence interval + with the upper bound at ``np.inf``. The other bound of the one-sided + confidence intervals is the same as that of a two-sided confidence + interval with `confidence_level` twice as far from 1.0; e.g. the upper + bound of a 95% ``'less'`` confidence interval is the same as the upper + bound of a 90% ``'two-sided'`` confidence interval. + method : {'percentile', 'basic', 'bca'}, default: ``'BCa'`` + Whether to return the 'percentile' bootstrap confidence interval + (``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence + interval (``'basic'``), or the bias-corrected and accelerated bootstrap + confidence interval (``'BCa'``). + bootstrap_result : BootstrapResult, optional + Provide the result object returned by a previous call to `bootstrap` + to include the previous bootstrap distribution in the new bootstrap + distribution. This can be used, for example, to change + `confidence_level`, change `method`, or see the effect of performing + additional resampling without repeating computations. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + res : BootstrapResult + An object with attributes: + + confidence_interval : ConfidenceInterval + The bootstrap confidence interval as an instance of + `collections.namedtuple` with attributes `low` and `high`. + bootstrap_distribution : ndarray + The bootstrap distribution, that is, the value of `statistic` for + each resample. The last dimension corresponds with the resamples + (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``). + standard_error : float or ndarray + The bootstrap standard error, that is, the sample standard + deviation of the bootstrap distribution. + + Warns + ----- + `~scipy.stats.DegenerateDataWarning` + Generated when ``method='BCa'`` and the bootstrap distribution is + degenerate (e.g. all elements are identical). + + Notes + ----- + Elements of the confidence interval may be NaN for ``method='BCa'`` if + the bootstrap distribution is degenerate (e.g. all elements are identical). + In this case, consider using another `method` or inspecting `data` for + indications that other analysis may be more appropriate (e.g. all + observations are identical). + + References + ---------- + .. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap, + Chapman & Hall/CRC, Boca Raton, FL, USA (1993) + .. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals", + http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf + .. [3] Bootstrapping (statistics), Wikipedia, + https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 + + Examples + -------- + Suppose we have sampled data from an unknown distribution. + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> from scipy.stats import norm + >>> dist = norm(loc=2, scale=4) # our "unknown" distribution + >>> data = dist.rvs(size=100, random_state=rng) + + We are interested in the standard deviation of the distribution. + + >>> std_true = dist.std() # the true value of the statistic + >>> print(std_true) + 4.0 + >>> std_sample = np.std(data) # the sample statistic + >>> print(std_sample) + 3.9460644295563863 + + The bootstrap is used to approximate the variability we would expect if we + were to repeatedly sample from the unknown distribution and calculate the + statistic of the sample each time. It does this by repeatedly resampling + values *from the original sample* with replacement and calculating the + statistic of each resample. This results in a "bootstrap distribution" of + the statistic. + + >>> import matplotlib.pyplot as plt + >>> from scipy.stats import bootstrap + >>> data = (data,) # samples must be in a sequence + >>> res = bootstrap(data, np.std, confidence_level=0.9, + ... random_state=rng) + >>> fig, ax = plt.subplots() + >>> ax.hist(res.bootstrap_distribution, bins=25) + >>> ax.set_title('Bootstrap Distribution') + >>> ax.set_xlabel('statistic value') + >>> ax.set_ylabel('frequency') + >>> plt.show() + + The standard error quantifies this variability. It is calculated as the + standard deviation of the bootstrap distribution. + + >>> res.standard_error + 0.24427002125829136 + >>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1) + True + + The bootstrap distribution of the statistic is often approximately normal + with scale equal to the standard error. + + >>> x = np.linspace(3, 5) + >>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error) + >>> fig, ax = plt.subplots() + >>> ax.hist(res.bootstrap_distribution, bins=25, density=True) + >>> ax.plot(x, pdf) + >>> ax.set_title('Normal Approximation of the Bootstrap Distribution') + >>> ax.set_xlabel('statistic value') + >>> ax.set_ylabel('pdf') + >>> plt.show() + + This suggests that we could construct a 90% confidence interval on the + statistic based on quantiles of this normal distribution. + + >>> norm.interval(0.9, loc=std_sample, scale=res.standard_error) + (3.5442759991341726, 4.3478528599786) + + Due to central limit theorem, this normal approximation is accurate for a + variety of statistics and distributions underlying the samples; however, + the approximation is not reliable in all cases. Because `bootstrap` is + designed to work with arbitrary underlying distributions and statistics, + it uses more advanced techniques to generate an accurate confidence + interval. + + >>> print(res.confidence_interval) + ConfidenceInterval(low=3.57655333533867, high=4.382043696342881) + + If we sample from the original distribution 1000 times and form a bootstrap + confidence interval for each sample, the confidence interval + contains the true value of the statistic approximately 90% of the time. + + >>> n_trials = 1000 + >>> ci_contains_true_std = 0 + >>> for i in range(n_trials): + ... data = (dist.rvs(size=100, random_state=rng),) + ... ci = bootstrap(data, np.std, confidence_level=0.9, n_resamples=1000, + ... random_state=rng).confidence_interval + ... if ci[0] < std_true < ci[1]: + ... ci_contains_true_std += 1 + >>> print(ci_contains_true_std) + 875 + + Rather than writing a loop, we can also determine the confidence intervals + for all 1000 samples at once. + + >>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),) + >>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9, + ... n_resamples=1000, random_state=rng) + >>> ci_l, ci_u = res.confidence_interval + + Here, `ci_l` and `ci_u` contain the confidence interval for each of the + ``n_trials = 1000`` samples. + + >>> print(ci_l[995:]) + [3.77729695 3.75090233 3.45829131 3.34078217 3.48072829] + >>> print(ci_u[995:]) + [4.88316666 4.86924034 4.32032996 4.2822427 4.59360598] + + And again, approximately 90% contain the true value, ``std_true = 4``. + + >>> print(np.sum((ci_l < std_true) & (std_true < ci_u))) + 900 + + `bootstrap` can also be used to estimate confidence intervals of + multi-sample statistics, including those calculated by hypothesis + tests. `scipy.stats.mood` perform's Mood's test for equal scale parameters, + and it returns two outputs: a statistic, and a p-value. To get a + confidence interval for the test statistic, we first wrap + `scipy.stats.mood` in a function that accepts two sample arguments, + accepts an `axis` keyword argument, and returns only the statistic. + + >>> from scipy.stats import mood + >>> def my_statistic(sample1, sample2, axis): + ... statistic, _ = mood(sample1, sample2, axis=-1) + ... return statistic + + Here, we use the 'percentile' method with the default 95% confidence level. + + >>> sample1 = norm.rvs(scale=1, size=100, random_state=rng) + >>> sample2 = norm.rvs(scale=2, size=100, random_state=rng) + >>> data = (sample1, sample2) + >>> res = bootstrap(data, my_statistic, method='basic', random_state=rng) + >>> print(mood(sample1, sample2)[0]) # element 0 is the statistic + -5.521109549096542 + >>> print(res.confidence_interval) + ConfidenceInterval(low=-7.255994487314675, high=-4.016202624747605) + + The bootstrap estimate of the standard error is also available. + + >>> print(res.standard_error) + 0.8344963846318795 + + Paired-sample statistics work, too. For example, consider the Pearson + correlation coefficient. + + >>> from scipy.stats import pearsonr + >>> n = 100 + >>> x = np.linspace(0, 10, n) + >>> y = x + rng.uniform(size=n) + >>> print(pearsonr(x, y)[0]) # element 0 is the statistic + 0.9962357936065914 + + We wrap `pearsonr` so that it returns only the statistic. + + >>> def my_statistic(x, y): + ... return pearsonr(x, y)[0] + + We call `bootstrap` using ``paired=True``. + Also, since ``my_statistic`` isn't vectorized to calculate the statistic + along a given axis, we pass in ``vectorized=False``. + + >>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True, + ... random_state=rng) + >>> print(res.confidence_interval) + ConfidenceInterval(low=0.9950085825848624, high=0.9971212407917498) + + The result object can be passed back into `bootstrap` to perform additional + resampling: + + >>> len(res.bootstrap_distribution) + 9999 + >>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True, + ... n_resamples=1001, random_state=rng, + ... bootstrap_result=res) + >>> len(res.bootstrap_distribution) + 11000 + + or to change the confidence interval options: + + >>> res2 = bootstrap((x, y), my_statistic, vectorized=False, paired=True, + ... n_resamples=0, random_state=rng, bootstrap_result=res, + ... method='percentile', confidence_level=0.9) + >>> np.testing.assert_equal(res2.bootstrap_distribution, + ... res.bootstrap_distribution) + >>> res.confidence_interval + ConfidenceInterval(low=0.9950035351407804, high=0.9971170323404578) + + without repeating computation of the original bootstrap distribution. + + """ + # Input validation + args = _bootstrap_iv(data, statistic, vectorized, paired, axis, + confidence_level, alternative, n_resamples, batch, + method, bootstrap_result, random_state) + (data, statistic, vectorized, paired, axis, confidence_level, + alternative, n_resamples, batch, method, bootstrap_result, + random_state) = args + + theta_hat_b = ([] if bootstrap_result is None + else [bootstrap_result.bootstrap_distribution]) + + batch_nominal = batch or n_resamples or 1 + + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples-k) + # Generate resamples + resampled_data = [] + for sample in data: + resample = _bootstrap_resample(sample, n_resamples=batch_actual, + random_state=random_state) + resampled_data.append(resample) + + # Compute bootstrap distribution of statistic + theta_hat_b.append(statistic(*resampled_data, axis=-1)) + theta_hat_b = np.concatenate(theta_hat_b, axis=-1) + + # Calculate percentile interval + alpha = ((1 - confidence_level)/2 if alternative == 'two-sided' + else (1 - confidence_level)) + if method == 'bca': + interval = _bca_interval(data, statistic, axis=-1, alpha=alpha, + theta_hat_b=theta_hat_b, batch=batch)[:2] + percentile_fun = _percentile_along_axis + else: + interval = alpha, 1-alpha + + def percentile_fun(a, q): + return np.percentile(a=a, q=q, axis=-1) + + # Calculate confidence interval of statistic + ci_l = percentile_fun(theta_hat_b, interval[0]*100) + ci_u = percentile_fun(theta_hat_b, interval[1]*100) + if method == 'basic': # see [3] + theta_hat = statistic(*data, axis=-1) + ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l + + if alternative == 'less': + ci_l = np.full_like(ci_l, -np.inf) + elif alternative == 'greater': + ci_u = np.full_like(ci_u, np.inf) + + return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u), + bootstrap_distribution=theta_hat_b, + standard_error=np.std(theta_hat_b, ddof=1, axis=-1)) + + +def _monte_carlo_test_iv(data, rvs, statistic, vectorized, n_resamples, + batch, alternative, axis): + """Input validation for `monte_carlo_test`.""" + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if not isinstance(rvs, Sequence): + rvs = (rvs,) + data = (data,) + for rvs_i in rvs: + if not callable(rvs_i): + raise TypeError("`rvs` must be callable or sequence of callables.") + + if not len(rvs) == len(data): + message = "If `rvs` is a sequence, `len(rvs)` must equal `len(data)`." + raise ValueError(message) + + if not callable(statistic): + raise TypeError("`statistic` must be callable.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic_vectorized = _vectorize_statistic(statistic) + else: + statistic_vectorized = statistic + + data = _broadcast_arrays(data, axis) + data_iv = [] + for sample in data: + sample = np.atleast_1d(sample) + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + n_resamples_int = int(n_resamples) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + alternatives = {'two-sided', 'greater', 'less'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f"`alternative` must be in {alternatives}") + + return (data_iv, rvs, statistic_vectorized, vectorized, n_resamples_int, + batch_iv, alternative, axis_int) + + +@dataclass +class MonteCarloTestResult: + """Result object returned by `scipy.stats.monte_carlo_test`. + + Attributes + ---------- + statistic : float or ndarray + The observed test statistic of the sample. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + """ + statistic: float | np.ndarray + pvalue: float | np.ndarray + null_distribution: np.ndarray + + +@_rename_parameter('sample', 'data') +def monte_carlo_test(data, rvs, statistic, *, vectorized=None, + n_resamples=9999, batch=None, alternative="two-sided", + axis=0): + r"""Perform a Monte Carlo hypothesis test. + + `data` contains a sample or a sequence of one or more samples. `rvs` + specifies the distribution(s) of the sample(s) in `data` under the null + hypothesis. The value of `statistic` for the given `data` is compared + against a Monte Carlo null distribution: the value of the statistic for + each of `n_resamples` sets of samples generated using `rvs`. This gives + the p-value, the probability of observing such an extreme value of the + test statistic under the null hypothesis. + + Parameters + ---------- + data : array-like or sequence of array-like + An array or sequence of arrays of observations. + rvs : callable or tuple of callables + A callable or sequence of callables that generates random variates + under the null hypothesis. Each element of `rvs` must be a callable + that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and + returns an N-d array sample of that shape. If `rvs` is a sequence, the + number of callables in `rvs` must match the number of samples in + `data`, i.e. ``len(rvs) == len(data)``. If `rvs` is a single callable, + `data` is treated as a single sample. + statistic : callable + Statistic for which the p-value of the hypothesis test is to be + calculated. `statistic` must be a callable that accepts a sample + (e.g. ``statistic(sample)``) or ``len(rvs)`` separate samples (e.g. + ``statistic(samples1, sample2)`` if `rvs` contains two callables and + `data` contains two samples) and returns the resulting statistic. + If `vectorized` is set ``True``, `statistic` must also accept a keyword + argument `axis` and be vectorized to compute the statistic along the + provided `axis` of the samples in `data`. + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed ND sample arrays. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of + a vectorized statistic typically reduces computation time. + n_resamples : int, default: 9999 + Number of samples drawn from each of the callables of `rvs`. + Equivalently, the number statistic values under the null hypothesis + used as the Monte Carlo null distribution. + batch : int, optional + The number of Monte Carlo samples to process in each call to + `statistic`. Memory usage is O( `batch` * ``sample.size[axis]`` ). Default + is ``None``, in which case `batch` equals `n_resamples`. + alternative : {'two-sided', 'less', 'greater'} + The alternative hypothesis for which the p-value is calculated. + For each alternative, the p-value is defined as follows. + + - ``'greater'`` : the percentage of the null distribution that is + greater than or equal to the observed value of the test statistic. + - ``'less'`` : the percentage of the null distribution that is + less than or equal to the observed value of the test statistic. + - ``'two-sided'`` : twice the smaller of the p-values above. + + axis : int, default: 0 + The axis of `data` (or each sample within `data`) over which to + calculate the statistic. + + Returns + ------- + res : MonteCarloTestResult + An object with attributes: + + statistic : float or ndarray + The test statistic of the observed `data`. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + + .. warning:: + The p-value is calculated by counting the elements of the null + distribution that are as extreme or more extreme than the observed + value of the statistic. Due to the use of finite precision arithmetic, + some statistic functions return numerically distinct values when the + theoretical values would be exactly equal. In some cases, this could + lead to a large error in the calculated p-value. `monte_carlo_test` + guards against this by considering elements in the null distribution + that are "close" (within a relative tolerance of 100 times the + floating point epsilon of inexact dtypes) to the observed + value of the test statistic as equal to the observed value of the + test statistic. However, the user is advised to inspect the null + distribution to assess whether this method of comparison is + appropriate, and if not, calculate the p-value manually. + + References + ---------- + + .. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + + Examples + -------- + + Suppose we wish to test whether a small sample has been drawn from a normal + distribution. We decide that we will use the skew of the sample as a + test statistic, and we will consider a p-value of 0.05 to be statistically + significant. + + >>> import numpy as np + >>> from scipy import stats + >>> def statistic(x, axis): + ... return stats.skew(x, axis) + + After collecting our data, we calculate the observed value of the test + statistic. + + >>> rng = np.random.default_rng() + >>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng) + >>> statistic(x, axis=0) + 0.12457412450240658 + + To determine the probability of observing such an extreme value of the + skewness by chance if the sample were drawn from the normal distribution, + we can perform a Monte Carlo hypothesis test. The test will draw many + samples at random from their normal distribution, calculate the skewness + of each sample, and compare our original skewness against this + distribution to determine an approximate p-value. + + >>> from scipy.stats import monte_carlo_test + >>> # because our statistic is vectorized, we pass `vectorized=True` + >>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng) + >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) + >>> print(res.statistic) + 0.12457412450240658 + >>> print(res.pvalue) + 0.7012 + + The probability of obtaining a test statistic less than or equal to the + observed value under the null hypothesis is ~70%. This is greater than + our chosen threshold of 5%, so we cannot consider this to be significant + evidence against the null hypothesis. + + Note that this p-value essentially matches that of + `scipy.stats.skewtest`, which relies on an asymptotic distribution of a + test statistic based on the sample skewness. + + >>> stats.skewtest(x).pvalue + 0.6892046027110614 + + This asymptotic approximation is not valid for small sample sizes, but + `monte_carlo_test` can be used with samples of any size. + + >>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng) + >>> # stats.skewtest(x) would produce an error due to small sample + >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True) + + The Monte Carlo distribution of the test statistic is provided for + further investigation. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> ax.hist(res.null_distribution, bins=50) + >>> ax.set_title("Monte Carlo distribution of test statistic") + >>> ax.set_xlabel("Value of Statistic") + >>> ax.set_ylabel("Frequency") + >>> plt.show() + + """ + args = _monte_carlo_test_iv(data, rvs, statistic, vectorized, + n_resamples, batch, alternative, axis) + (data, rvs, statistic, vectorized, + n_resamples, batch, alternative, axis) = args + + # Some statistics return plain floats; ensure they're at least a NumPy float + observed = np.asarray(statistic(*data, axis=-1))[()] + + n_observations = [sample.shape[-1] for sample in data] + batch_nominal = batch or n_resamples + null_distribution = [] + for k in range(0, n_resamples, batch_nominal): + batch_actual = min(batch_nominal, n_resamples - k) + resamples = [rvs_i(size=(batch_actual, n_observations_i)) + for rvs_i, n_observations_i in zip(rvs, n_observations)] + null_distribution.append(statistic(*resamples, axis=-1)) + null_distribution = np.concatenate(null_distribution) + null_distribution = null_distribution.reshape([-1] + [1]*observed.ndim) + + # relative tolerance for detecting numerically distinct but + # theoretically equal values in the null distribution + eps = (0 if not np.issubdtype(observed.dtype, np.inexact) + else np.finfo(observed.dtype).eps*100) + gamma = np.abs(eps * observed) + + def less(null_distribution, observed): + cmps = null_distribution <= observed + gamma + pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1] + return pvalues + + def greater(null_distribution, observed): + cmps = null_distribution >= observed - gamma + pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1) # see [1] + return pvalues + + def two_sided(null_distribution, observed): + pvalues_less = less(null_distribution, observed) + pvalues_greater = greater(null_distribution, observed) + pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 + return pvalues + + compare = {"less": less, + "greater": greater, + "two-sided": two_sided} + + pvalues = compare[alternative](null_distribution, observed) + pvalues = np.clip(pvalues, 0, 1) + + return MonteCarloTestResult(observed, pvalues, null_distribution) + + +@dataclass +class PermutationTestResult: + """Result object returned by `scipy.stats.permutation_test`. + + Attributes + ---------- + statistic : float or ndarray + The observed test statistic of the data. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + """ + statistic: float | np.ndarray + pvalue: float | np.ndarray + null_distribution: np.ndarray + + +def _all_partitions_concatenated(ns): + """ + Generate all partitions of indices of groups of given sizes, concatenated + + `ns` is an iterable of ints. + """ + def all_partitions(z, n): + for c in combinations(z, n): + x0 = set(c) + x1 = z - x0 + yield [x0, x1] + + def all_partitions_n(z, ns): + if len(ns) == 0: + yield [z] + return + for c in all_partitions(z, ns[0]): + for d in all_partitions_n(c[1], ns[1:]): + yield c[0:1] + d + + z = set(range(np.sum(ns))) + for partitioning in all_partitions_n(z, ns[:]): + x = np.concatenate([list(partition) + for partition in partitioning]).astype(int) + yield x + + +def _batch_generator(iterable, batch): + """A generator that yields batches of elements from an iterable""" + iterator = iter(iterable) + if batch <= 0: + raise ValueError("`batch` must be positive.") + z = [item for i, item in zip(range(batch), iterator)] + while z: # we don't want StopIteration without yielding an empty list + yield z + z = [item for i, item in zip(range(batch), iterator)] + + +def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch, + random_state): + # Returns a generator that yields arrays of size + # `(batch, n_samples, n_obs_sample)`. + # Each row is an independent permutation of indices 0 to `n_obs_sample`. + batch = min(batch, n_permutations) + + if hasattr(random_state, 'permuted'): + def batched_perm_generator(): + indices = np.arange(n_obs_sample) + indices = np.tile(indices, (batch, n_samples, 1)) + for k in range(0, n_permutations, batch): + batch_actual = min(batch, n_permutations-k) + # Don't permute in place, otherwise results depend on `batch` + permuted_indices = random_state.permuted(indices, axis=-1) + yield permuted_indices[:batch_actual] + else: # RandomState and early Generators don't have `permuted` + def batched_perm_generator(): + for k in range(0, n_permutations, batch): + batch_actual = min(batch, n_permutations-k) + size = (batch_actual, n_samples, n_obs_sample) + x = random_state.random(size=size) + yield np.argsort(x, axis=-1)[:batch_actual] + + return batched_perm_generator() + + +def _calculate_null_both(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for independent sample tests. + """ + n_samples = len(data) + + # compute number of permutations + # (distinct partitions of data into samples of these sizes) + n_obs_i = [sample.shape[-1] for sample in data] # observations per sample + n_obs_ic = np.cumsum(n_obs_i) + n_obs = n_obs_ic[-1] # total number of observations + n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1]) + for i in range(n_samples-1, 0, -1)]) + + # perm_generator is an iterator that produces permutations of indices + # from 0 to n_obs. We'll concatenate the samples, use these indices to + # permute the data, then split the samples apart again. + if n_permutations >= n_max: + exact_test = True + n_permutations = n_max + perm_generator = _all_partitions_concatenated(n_obs_i) + else: + exact_test = False + # Neither RandomState.permutation nor Generator.permutation + # can permute axis-slices independently. If this feature is + # added in the future, batches of the desired size should be + # generated in a single call. + perm_generator = (random_state.permutation(n_obs) + for i in range(n_permutations)) + + batch = batch or int(n_permutations) + null_distribution = [] + + # First, concatenate all the samples. In batches, permute samples with + # indices produced by the `perm_generator`, split them into new samples of + # the original sizes, compute the statistic for each batch, and add these + # statistic values to the null distribution. + data = np.concatenate(data, axis=-1) + for indices in _batch_generator(perm_generator, batch=batch): + indices = np.array(indices) + + # `indices` is 2D: each row is a permutation of the indices. + # We use it to index `data` along its last axis, which corresponds + # with observations. + # After indexing, the second to last axis of `data_batch` corresponds + # with permutations, and the last axis corresponds with observations. + data_batch = data[..., indices] + + # Move the permutation axis to the front: we'll concatenate a list + # of batched statistic values along this zeroth axis to form the + # null distribution. + data_batch = np.moveaxis(data_batch, -2, 0) + data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1) + null_distribution.append(statistic(*data_batch, axis=-1)) + null_distribution = np.concatenate(null_distribution, axis=0) + + return null_distribution, n_permutations, exact_test + + +def _calculate_null_pairings(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for association tests. + """ + n_samples = len(data) + + # compute number of permutations (factorial(n) permutations of each sample) + n_obs_sample = data[0].shape[-1] # observations per sample; same for each + n_max = factorial(n_obs_sample)**n_samples + + # `perm_generator` is an iterator that produces a list of permutations of + # indices from 0 to n_obs_sample, one for each sample. + if n_permutations >= n_max: + exact_test = True + n_permutations = n_max + batch = batch or int(n_permutations) + # cartesian product of the sets of all permutations of indices + perm_generator = product(*(permutations(range(n_obs_sample)) + for i in range(n_samples))) + batched_perm_generator = _batch_generator(perm_generator, batch=batch) + else: + exact_test = False + batch = batch or int(n_permutations) + # Separate random permutations of indices for each sample. + # Again, it would be nice if RandomState/Generator.permutation + # could permute each axis-slice separately. + args = n_permutations, n_samples, n_obs_sample, batch, random_state + batched_perm_generator = _pairings_permutations_gen(*args) + + null_distribution = [] + + for indices in batched_perm_generator: + indices = np.array(indices) + + # `indices` is 3D: the zeroth axis is for permutations, the next is + # for samples, and the last is for observations. Swap the first two + # to make the zeroth axis correspond with samples, as it does for + # `data`. + indices = np.swapaxes(indices, 0, 1) + + # When we're done, `data_batch` will be a list of length `n_samples`. + # Each element will be a batch of random permutations of one sample. + # The zeroth axis of each batch will correspond with permutations, + # and the last will correspond with observations. (This makes it + # easy to pass into `statistic`.) + data_batch = [None]*n_samples + for i in range(n_samples): + data_batch[i] = data[i][..., indices[i]] + data_batch[i] = np.moveaxis(data_batch[i], -2, 0) + + null_distribution.append(statistic(*data_batch, axis=-1)) + null_distribution = np.concatenate(null_distribution, axis=0) + + return null_distribution, n_permutations, exact_test + + +def _calculate_null_samples(data, statistic, n_permutations, batch, + random_state=None): + """ + Calculate null distribution for paired-sample tests. + """ + n_samples = len(data) + + # By convention, the meaning of the "samples" permutations type for + # data with only one sample is to flip the sign of the observations. + # Achieve this by adding a second sample - the negative of the original. + if n_samples == 1: + data = [data[0], -data[0]] + + # The "samples" permutation strategy is the same as the "pairings" + # strategy except the roles of samples and observations are flipped. + # So swap these axes, then we'll use the function for the "pairings" + # strategy to do all the work! + data = np.swapaxes(data, 0, -1) + + # (Of course, the user's statistic doesn't know what we've done here, + # so we need to pass it what it's expecting.) + def statistic_wrapped(*data, axis): + data = np.swapaxes(data, 0, -1) + if n_samples == 1: + data = data[0:1] + return statistic(*data, axis=axis) + + return _calculate_null_pairings(data, statistic_wrapped, n_permutations, + batch, random_state) + + +def _permutation_test_iv(data, statistic, permutation_type, vectorized, + n_resamples, batch, alternative, axis, random_state): + """Input validation for `permutation_test`.""" + + axis_int = int(axis) + if axis != axis_int: + raise ValueError("`axis` must be an integer.") + + permutation_types = {'samples', 'pairings', 'independent'} + permutation_type = permutation_type.lower() + if permutation_type not in permutation_types: + raise ValueError(f"`permutation_type` must be in {permutation_types}.") + + if vectorized not in {True, False, None}: + raise ValueError("`vectorized` must be `True`, `False`, or `None`.") + + if vectorized is None: + vectorized = 'axis' in inspect.signature(statistic).parameters + + if not vectorized: + statistic = _vectorize_statistic(statistic) + + message = "`data` must be a tuple containing at least two samples" + try: + if len(data) < 2 and permutation_type == 'independent': + raise ValueError(message) + except TypeError: + raise TypeError(message) + + data = _broadcast_arrays(data, axis) + data_iv = [] + for sample in data: + sample = np.atleast_1d(sample) + if sample.shape[axis] <= 1: + raise ValueError("each sample in `data` must contain two or more " + "observations along `axis`.") + sample = np.moveaxis(sample, axis_int, -1) + data_iv.append(sample) + + n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples) + else np.inf) + if n_resamples != n_resamples_int or n_resamples_int <= 0: + raise ValueError("`n_resamples` must be a positive integer.") + + if batch is None: + batch_iv = batch + else: + batch_iv = int(batch) + if batch != batch_iv or batch_iv <= 0: + raise ValueError("`batch` must be a positive integer or None.") + + alternatives = {'two-sided', 'greater', 'less'} + alternative = alternative.lower() + if alternative not in alternatives: + raise ValueError(f"`alternative` must be in {alternatives}") + + random_state = check_random_state(random_state) + + return (data_iv, statistic, permutation_type, vectorized, n_resamples_int, + batch_iv, alternative, axis_int, random_state) + + +def permutation_test(data, statistic, *, permutation_type='independent', + vectorized=None, n_resamples=9999, batch=None, + alternative="two-sided", axis=0, random_state=None): + r""" + Performs a permutation test of a given statistic on provided data. + + For independent sample statistics, the null hypothesis is that the data are + randomly sampled from the same distribution. + For paired sample statistics, two null hypothesis can be tested: + that the data are paired at random or that the data are assigned to samples + at random. + + Parameters + ---------- + data : iterable of array-like + Contains the samples, each of which is an array of observations. + Dimensions of sample arrays must be compatible for broadcasting except + along `axis`. + statistic : callable + Statistic for which the p-value of the hypothesis test is to be + calculated. `statistic` must be a callable that accepts samples + as separate arguments (e.g. ``statistic(*data)``) and returns the + resulting statistic. + If `vectorized` is set ``True``, `statistic` must also accept a keyword + argument `axis` and be vectorized to compute the statistic along the + provided `axis` of the sample arrays. + permutation_type : {'independent', 'samples', 'pairings'}, optional + The type of permutations to be performed, in accordance with the + null hypothesis. The first two permutation types are for paired sample + statistics, in which all samples contain the same number of + observations and observations with corresponding indices along `axis` + are considered to be paired; the third is for independent sample + statistics. + + - ``'samples'`` : observations are assigned to different samples + but remain paired with the same observations from other samples. + This permutation type is appropriate for paired sample hypothesis + tests such as the Wilcoxon signed-rank test and the paired t-test. + - ``'pairings'`` : observations are paired with different observations, + but they remain within the same sample. This permutation type is + appropriate for association/correlation tests with statistics such + as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's + :math:`r`. + - ``'independent'`` (default) : observations are assigned to different + samples. Samples may contain different numbers of observations. This + permutation type is appropriate for independent sample hypothesis + tests such as the Mann-Whitney :math:`U` test and the independent + sample t-test. + + Please see the Notes section below for more detailed descriptions + of the permutation types. + + vectorized : bool, optional + If `vectorized` is set ``False``, `statistic` will not be passed + keyword argument `axis` and is expected to calculate the statistic + only for 1D samples. If ``True``, `statistic` will be passed keyword + argument `axis` and is expected to calculate the statistic along `axis` + when passed an ND sample array. If ``None`` (default), `vectorized` + will be set ``True`` if ``axis`` is a parameter of `statistic`. Use + of a vectorized statistic typically reduces computation time. + n_resamples : int or np.inf, default: 9999 + Number of random permutations (resamples) used to approximate the null + distribution. If greater than or equal to the number of distinct + permutations, the exact null distribution will be computed. + Note that the number of distinct permutations grows very rapidly with + the sizes of samples, so exact tests are feasible only for very small + data sets. + batch : int, optional + The number of permutations to process in each call to `statistic`. + Memory usage is O( `batch` * ``n`` ), where ``n`` is the total size + of all samples, regardless of the value of `vectorized`. Default is + ``None``, in which case ``batch`` is the number of permutations. + alternative : {'two-sided', 'less', 'greater'}, optional + The alternative hypothesis for which the p-value is calculated. + For each alternative, the p-value is defined for exact tests as + follows. + + - ``'greater'`` : the percentage of the null distribution that is + greater than or equal to the observed value of the test statistic. + - ``'less'`` : the percentage of the null distribution that is + less than or equal to the observed value of the test statistic. + - ``'two-sided'`` (default) : twice the smaller of the p-values above. + + Note that p-values for randomized tests are calculated according to the + conservative (over-estimated) approximation suggested in [2]_ and [3]_ + rather than the unbiased estimator suggested in [4]_. That is, when + calculating the proportion of the randomized null distribution that is + as extreme as the observed value of the test statistic, the values in + the numerator and denominator are both increased by one. An + interpretation of this adjustment is that the observed value of the + test statistic is always included as an element of the randomized + null distribution. + The convention used for two-sided p-values is not universal; + the observed test statistic and null distribution are returned in + case a different definition is preferred. + + axis : int, default: 0 + The axis of the (broadcasted) samples over which to calculate the + statistic. If samples have a different number of dimensions, + singleton dimensions are prepended to samples with fewer dimensions + before `axis` is considered. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate permutations. + + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + res : PermutationTestResult + An object with attributes: + + statistic : float or ndarray + The observed test statistic of the data. + pvalue : float or ndarray + The p-value for the given alternative. + null_distribution : ndarray + The values of the test statistic generated under the null + hypothesis. + + Notes + ----- + + The three types of permutation tests supported by this function are + described below. + + **Unpaired statistics** (``permutation_type='independent'``): + + The null hypothesis associated with this permutation type is that all + observations are sampled from the same underlying distribution and that + they have been assigned to one of the samples at random. + + Suppose ``data`` contains two samples; e.g. ``a, b = data``. + When ``1 < n_resamples < binom(n, k)``, where + + * ``k`` is the number of observations in ``a``, + * ``n`` is the total number of observations in ``a`` and ``b``, and + * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), + + the data are pooled (concatenated), randomly assigned to either the first + or second sample, and the statistic is calculated. This process is + performed repeatedly, `permutation` times, generating a distribution of the + statistic under the null hypothesis. The statistic of the original + data is compared to this distribution to determine the p-value. + + When ``n_resamples >= binom(n, k)``, an exact test is performed: the data + are *partitioned* between the samples in each distinct way exactly once, + and the exact null distribution is formed. + Note that for a given partitioning of the data between the samples, + only one ordering/permutation of the data *within* each sample is + considered. For statistics that do not depend on the order of the data + within samples, this dramatically reduces computational cost without + affecting the shape of the null distribution (because the frequency/count + of each value is affected by the same factor). + + For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. + Because only one ordering/permutation of the data *within* each sample + is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` + and ``y = [a4, a3, b1]`` would *not* be considered distinct from the + example above. + + ``permutation_type='independent'`` does not support one-sample statistics, + but it can be applied to statistics with more than two samples. In this + case, if ``n`` is an array of the number of observations within each + sample, the number of distinct partitions is:: + + np.prod([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) + + **Paired statistics, permute pairings** (``permutation_type='pairings'``): + + The null hypothesis associated with this permutation type is that + observations within each sample are drawn from the same underlying + distribution and that pairings with elements of other samples are + assigned at random. + + Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we + wish to consider all possible pairings of elements of ``a`` with elements + of a second sample, ``b``. Let ``n`` be the number of observations in + ``a``, which must also equal the number of observations in ``b``. + + When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are + randomly permuted. The user-supplied statistic accepts one data argument, + say ``a_perm``, and calculates the statistic considering ``a_perm`` and + ``b``. This process is performed repeatedly, `permutation` times, + generating a distribution of the statistic under the null hypothesis. + The statistic of the original data is compared to this distribution to + determine the p-value. + + When ``n_resamples >= factorial(n)``, an exact test is performed: + ``a`` is permuted in each distinct way exactly once. Therefore, the + `statistic` is computed for each unique pairing of samples between ``a`` + and ``b`` exactly once. + + For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left + in its original order. + + ``permutation_type='pairings'`` supports ``data`` containing any number + of samples, each of which must contain the same number of observations. + All samples provided in ``data`` are permuted *independently*. Therefore, + if ``m`` is the number of samples and ``n`` is the number of observations + within each sample, then the number of permutations in an exact test is:: + + factorial(n)**m + + Note that if a two-sample statistic, for example, does not inherently + depend on the order in which observations are provided - only on the + *pairings* of observations - then only one of the two samples should be + provided in ``data``. This dramatically reduces computational cost without + affecting the shape of the null distribution (because the frequency/count + of each value is affected by the same factor). + + **Paired statistics, permute samples** (``permutation_type='samples'``): + + The null hypothesis associated with this permutation type is that + observations within each pair are drawn from the same underlying + distribution and that the sample to which they are assigned is random. + + Suppose ``data`` contains two samples; e.g. ``a, b = data``. + Let ``n`` be the number of observations in ``a``, which must also equal + the number of observations in ``b``. + + When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are + randomly swapped between samples (maintaining their pairings) and the + statistic is calculated. This process is performed repeatedly, + `permutation` times, generating a distribution of the statistic under the + null hypothesis. The statistic of the original data is compared to this + distribution to determine the p-value. + + When ``n_resamples >= 2**n``, an exact test is performed: the observations + are assigned to the two samples in each distinct way (while maintaining + pairings) exactly once. + + For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this + permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. + + ``permutation_type='samples'`` supports ``data`` containing any number + of samples, each of which must contain the same number of observations. + If ``data`` contains more than one sample, paired observations within + ``data`` are exchanged between samples *independently*. Therefore, if ``m`` + is the number of samples and ``n`` is the number of observations within + each sample, then the number of permutations in an exact test is:: + + factorial(m)**n + + Several paired-sample statistical tests, such as the Wilcoxon signed rank + test and paired-sample t-test, can be performed considering only the + *difference* between two paired elements. Accordingly, if ``data`` contains + only one sample, then the null distribution is formed by independently + changing the *sign* of each observation. + + .. warning:: + The p-value is calculated by counting the elements of the null + distribution that are as extreme or more extreme than the observed + value of the statistic. Due to the use of finite precision arithmetic, + some statistic functions return numerically distinct values when the + theoretical values would be exactly equal. In some cases, this could + lead to a large error in the calculated p-value. `permutation_test` + guards against this by considering elements in the null distribution + that are "close" (within a relative tolerance of 100 times the + floating point epsilon of inexact dtypes) to the observed + value of the test statistic as equal to the observed value of the + test statistic. However, the user is advised to inspect the null + distribution to assess whether this method of comparison is + appropriate, and if not, calculate the p-value manually. See example + below. + + References + ---------- + + .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). + .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be + Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." + Statistical Applications in Genetics and Molecular Biology 9.1 (2010). + .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference". + Statistical Science (2004). + .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap + (1993). + + Examples + -------- + + Suppose we wish to test whether two samples are drawn from the same + distribution. Assume that the underlying distributions are unknown to us, + and that before observing the data, we hypothesized that the mean of the + first sample would be less than that of the second sample. We decide that + we will use the difference between the sample means as a test statistic, + and we will consider a p-value of 0.05 to be statistically significant. + + For efficiency, we write the function defining the test statistic in a + vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the + statistic will be calculated for each axis-slice along `axis`. + + >>> import numpy as np + >>> def statistic(x, y, axis): + ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) + + After collecting our data, we calculate the observed value of the test + statistic. + + >>> from scipy.stats import norm + >>> rng = np.random.default_rng() + >>> x = norm.rvs(size=5, random_state=rng) + >>> y = norm.rvs(size=6, loc = 3, random_state=rng) + >>> statistic(x, y, 0) + -3.5411688580987266 + + Indeed, the test statistic is negative, suggesting that the true mean of + the distribution underlying ``x`` is less than that of the distribution + underlying ``y``. To determine the probability of this occurring by chance + if the two samples were drawn from the same distribution, we perform + a permutation test. + + >>> from scipy.stats import permutation_test + >>> # because our statistic is vectorized, we pass `vectorized=True` + >>> # `n_resamples=np.inf` indicates that an exact test is to be performed + >>> res = permutation_test((x, y), statistic, vectorized=True, + ... n_resamples=np.inf, alternative='less') + >>> print(res.statistic) + -3.5411688580987266 + >>> print(res.pvalue) + 0.004329004329004329 + + The probability of obtaining a test statistic less than or equal to the + observed value under the null hypothesis is 0.4329%. This is less than our + chosen threshold of 5%, so we consider this to be significant evidence + against the null hypothesis in favor of the alternative. + + Because the size of the samples above was small, `permutation_test` could + perform an exact test. For larger samples, we resort to a randomized + permutation test. + + >>> x = norm.rvs(size=100, random_state=rng) + >>> y = norm.rvs(size=120, loc=0.3, random_state=rng) + >>> res = permutation_test((x, y), statistic, n_resamples=100000, + ... vectorized=True, alternative='less', + ... random_state=rng) + >>> print(res.statistic) + -0.5230459671240913 + >>> print(res.pvalue) + 0.00016999830001699983 + + The approximate probability of obtaining a test statistic less than or + equal to the observed value under the null hypothesis is 0.0225%. This is + again less than our chosen threshold of 5%, so again we have significant + evidence to reject the null hypothesis in favor of the alternative. + + For large samples and number of permutations, the result is comparable to + that of the corresponding asymptotic test, the independent sample t-test. + + >>> from scipy.stats import ttest_ind + >>> res_asymptotic = ttest_ind(x, y, alternative='less') + >>> print(res_asymptotic.pvalue) + 0.00012688101537979522 + + The permutation distribution of the test statistic is provided for + further investigation. + + >>> import matplotlib.pyplot as plt + >>> plt.hist(res.null_distribution, bins=50) + >>> plt.title("Permutation distribution of test statistic") + >>> plt.xlabel("Value of Statistic") + >>> plt.ylabel("Frequency") + >>> plt.show() + + Inspection of the null distribution is essential if the statistic suffers + from inaccuracy due to limited machine precision. Consider the following + case: + + >>> from scipy.stats import pearsonr + >>> x = [1, 2, 4, 3] + >>> y = [2, 4, 6, 8] + >>> def statistic(x, y): + ... return pearsonr(x, y).statistic + >>> res = permutation_test((x, y), statistic, vectorized=False, + ... permutation_type='pairings', + ... alternative='greater') + >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution + + In this case, some elements of the null distribution differ from the + observed value of the correlation coefficient ``r`` due to numerical noise. + We manually inspect the elements of the null distribution that are nearly + the same as the observed value of the test statistic. + + >>> r + 0.8 + >>> unique = np.unique(null) + >>> unique + array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4, + 0.6, 0.8, 0.8, 1. ]) # may vary + >>> unique[np.isclose(r, unique)].tolist() + [0.7999999999999999, 0.8] + + If `permutation_test` were to perform the comparison naively, the + elements of the null distribution with value ``0.7999999999999999`` would + not be considered as extreme or more extreme as the observed value of the + statistic, so the calculated p-value would be too small. + + >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) + >>> incorrect_pvalue + 0.1111111111111111 # may vary + + Instead, `permutation_test` treats elements of the null distribution that + are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the + statistic ``r`` to be equal to ``r``. + + >>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null) + >>> correct_pvalue + 0.16666666666666666 + >>> res.pvalue == correct_pvalue + True + + This method of comparison is expected to be accurate in most practical + situations, but the user is advised to assess this by inspecting the + elements of the null distribution that are close to the observed value + of the statistic. Also, consider the use of statistics that can be + calculated using exact arithmetic (e.g. integer statistics). + + """ + args = _permutation_test_iv(data, statistic, permutation_type, vectorized, + n_resamples, batch, alternative, axis, + random_state) + (data, statistic, permutation_type, vectorized, n_resamples, batch, + alternative, axis, random_state) = args + + observed = statistic(*data, axis=-1) + + null_calculators = {"pairings": _calculate_null_pairings, + "samples": _calculate_null_samples, + "independent": _calculate_null_both} + null_calculator_args = (data, statistic, n_resamples, + batch, random_state) + calculate_null = null_calculators[permutation_type] + null_distribution, n_resamples, exact_test = ( + calculate_null(*null_calculator_args)) + + # See References [2] and [3] + adjustment = 0 if exact_test else 1 + + # relative tolerance for detecting numerically distinct but + # theoretically equal values in the null distribution + eps = (0 if not np.issubdtype(observed.dtype, np.inexact) + else np.finfo(observed.dtype).eps*100) + gamma = np.abs(eps * observed) + + def less(null_distribution, observed): + cmps = null_distribution <= observed + gamma + pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) + return pvalues + + def greater(null_distribution, observed): + cmps = null_distribution >= observed - gamma + pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) + return pvalues + + def two_sided(null_distribution, observed): + pvalues_less = less(null_distribution, observed) + pvalues_greater = greater(null_distribution, observed) + pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 + return pvalues + + compare = {"less": less, + "greater": greater, + "two-sided": two_sided} + + pvalues = compare[alternative](null_distribution, observed) + pvalues = np.clip(pvalues, 0, 1) + + return PermutationTestResult(observed, pvalues, null_distribution) + + +@dataclass +class ResamplingMethod: + """Configuration information for a statistical resampling method. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a resampling or Monte Carlo version + of the hypothesis test. + + Attributes + ---------- + n_resamples : int + The number of resamples to perform or Monte Carlo samples to draw. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + """ + n_resamples: int = 9999 + batch: int = None # type: ignore[assignment] + + +@dataclass +class MonteCarloMethod(ResamplingMethod): + """Configuration information for a Monte Carlo hypothesis test. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a Monte Carlo version of the + hypothesis tests. + + Attributes + ---------- + n_resamples : int, optional + The number of Monte Carlo samples to draw. Default is 9999. + batch : int, optional + The number of Monte Carlo samples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all samples in a single batch. + rvs : callable or tuple of callables, optional + A callable or sequence of callables that generates random variates + under the null hypothesis. Each element of `rvs` must be a callable + that accepts keyword argument ``size`` (e.g. ``rvs(size=(m, n))``) and + returns an N-d array sample of that shape. If `rvs` is a sequence, the + number of callables in `rvs` must match the number of samples passed + to the hypothesis test in which the `MonteCarloMethod` is used. Default + is ``None``, in which case the hypothesis test function chooses values + to match the standard version of the hypothesis test. For example, + the null hypothesis of `scipy.stats.pearsonr` is typically that the + samples are drawn from the standard normal distribution, so + ``rvs = (rng.normal, rng.normal)`` where + ``rng = np.random.default_rng()``. + """ + rvs: object = None + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + rvs=self.rvs) + + +@dataclass +class PermutationMethod(ResamplingMethod): + """Configuration information for a permutation hypothesis test. + + Instances of this class can be passed into the `method` parameter of some + hypothesis test functions to perform a permutation version of the + hypothesis tests. + + Attributes + ---------- + n_resamples : int, optional + The number of resamples to perform. Default is 9999. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then that instance is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + """ + random_state: object = None + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + random_state=self.random_state) + + +@dataclass +class BootstrapMethod(ResamplingMethod): + """Configuration information for a bootstrap confidence interval. + + Instances of this class can be passed into the `method` parameter of some + confidence interval methods to generate a bootstrap confidence interval. + + Attributes + ---------- + n_resamples : int, optional + The number of resamples to perform. Default is 9999. + batch : int, optional + The number of resamples to process in each vectorized call to + the statistic. Batch sizes >>1 tend to be faster when the statistic + is vectorized, but memory usage scales linearly with the batch size. + Default is ``None``, which processes all resamples in a single batch. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is already a ``Generator`` or ``RandomState`` + instance, then that instance is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is ``None`` (default), the + `numpy.random.RandomState` singleton is used. + + method : {'bca', 'percentile', 'basic'} + Whether to use the 'percentile' bootstrap ('percentile'), the 'basic' + (AKA 'reverse') bootstrap ('basic'), or the bias-corrected and + accelerated bootstrap ('BCa', default). + """ + random_state: object = None + method: str = 'BCa' + + def _asdict(self): + # `dataclasses.asdict` deepcopies; we don't want that. + return dict(n_resamples=self.n_resamples, batch=self.batch, + random_state=self.random_state, method=self.method) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_sampling.py b/venv/lib/python3.10/site-packages/scipy/stats/_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..44143985a88738c43984347b7787279348fac7f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_sampling.py @@ -0,0 +1,1314 @@ +import math +import numbers +import numpy as np +from scipy import stats +from scipy import special as sc +from ._qmc import (check_random_state as check_random_state_qmc, + Halton, QMCEngine) +from ._unuran.unuran_wrapper import NumericalInversePolynomial +from scipy._lib._util import check_random_state + + +__all__ = ['FastGeneratorInversion', 'RatioUniforms'] + + +# define pdfs and other helper functions to create the generators + +def argus_pdf(x, chi): + # approach follows Baumgarten/Hoermann: Generating ARGUS random variates + # for chi > 5, use relationship of the ARGUS distribution to Gamma(1.5) + if chi <= 5: + y = 1 - x * x + return x * math.sqrt(y) * math.exp(-0.5 * chi**2 * y) + return math.sqrt(x) * math.exp(-x) + + +def argus_gamma_trf(x, chi): + if chi <= 5: + return x + return np.sqrt(1.0 - 2 * x / chi**2) + + +def argus_gamma_inv_trf(x, chi): + if chi <= 5: + return x + return 0.5 * chi**2 * (1 - x**2) + + +def betaprime_pdf(x, a, b): + if x > 0: + logf = (a - 1) * math.log(x) - (a + b) * math.log1p(x) - sc.betaln(a, b) + return math.exp(logf) + else: + # return pdf at x == 0 separately to avoid runtime warnings + if a > 1: + return 0 + elif a < 1: + return np.inf + else: + return 1 / sc.beta(a, b) + + +def beta_valid_params(a, b): + return (min(a, b) >= 0.1) and (max(a, b) <= 700) + + +def gamma_pdf(x, a): + if x > 0: + return math.exp(-math.lgamma(a) + (a - 1.0) * math.log(x) - x) + else: + return 0 if a >= 1 else np.inf + + +def invgamma_pdf(x, a): + if x > 0: + return math.exp(-(a + 1.0) * math.log(x) - math.lgamma(a) - 1 / x) + else: + return 0 if a >= 1 else np.inf + + +def burr_pdf(x, cc, dd): + # note: we use np.exp instead of math.exp, otherwise an overflow + # error can occur in the setup, e.g., for parameters + # 1.89128135, 0.30195177, see test test_burr_overflow + if x > 0: + lx = math.log(x) + return np.exp(-(cc + 1) * lx - (dd + 1) * math.log1p(np.exp(-cc * lx))) + else: + return 0 + + +def burr12_pdf(x, cc, dd): + if x > 0: + lx = math.log(x) + logterm = math.log1p(math.exp(cc * lx)) + return math.exp((cc - 1) * lx - (dd + 1) * logterm + math.log(cc * dd)) + else: + return 0 + + +def chi_pdf(x, a): + if x > 0: + return math.exp( + (a - 1) * math.log(x) + - 0.5 * (x * x) + - (a / 2 - 1) * math.log(2) + - math.lgamma(0.5 * a) + ) + else: + return 0 if a >= 1 else np.inf + + +def chi2_pdf(x, df): + if x > 0: + return math.exp( + (df / 2 - 1) * math.log(x) + - 0.5 * x + - (df / 2) * math.log(2) + - math.lgamma(0.5 * df) + ) + else: + return 0 if df >= 1 else np.inf + + +def alpha_pdf(x, a): + if x > 0: + return math.exp(-2.0 * math.log(x) - 0.5 * (a - 1.0 / x) ** 2) + return 0.0 + + +def bradford_pdf(x, c): + if 0 <= x <= 1: + return 1.0 / (1.0 + c * x) + return 0.0 + + +def crystalball_pdf(x, b, m): + if x > -b: + return math.exp(-0.5 * x * x) + return math.exp(m * math.log(m / b) - 0.5 * b * b - m * math.log(m / b - b - x)) + + +def weibull_min_pdf(x, c): + if x > 0: + return c * math.exp((c - 1) * math.log(x) - x**c) + return 0.0 + + +def weibull_max_pdf(x, c): + if x < 0: + return c * math.exp((c - 1) * math.log(-x) - ((-x) ** c)) + return 0.0 + + +def invweibull_pdf(x, c): + if x > 0: + return c * math.exp(-(c + 1) * math.log(x) - x ** (-c)) + return 0.0 + + +def wald_pdf(x): + if x > 0: + return math.exp(-((x - 1) ** 2) / (2 * x)) / math.sqrt(x**3) + return 0.0 + + +def geninvgauss_mode(p, b): + if p > 1: # equivalent mode formulas numerical more stable versions + return (math.sqrt((1 - p) ** 2 + b**2) - (1 - p)) / b + return b / (math.sqrt((1 - p) ** 2 + b**2) + (1 - p)) + + +def geninvgauss_pdf(x, p, b): + m = geninvgauss_mode(p, b) + lfm = (p - 1) * math.log(m) - 0.5 * b * (m + 1 / m) + if x > 0: + return math.exp((p - 1) * math.log(x) - 0.5 * b * (x + 1 / x) - lfm) + return 0.0 + + +def invgauss_mode(mu): + return 1.0 / (math.sqrt(1.5 * 1.5 + 1 / (mu * mu)) + 1.5) + + +def invgauss_pdf(x, mu): + m = invgauss_mode(mu) + lfm = -1.5 * math.log(m) - (m - mu) ** 2 / (2 * m * mu**2) + if x > 0: + return math.exp(-1.5 * math.log(x) - (x - mu) ** 2 / (2 * x * mu**2) - lfm) + return 0.0 + + +def powerlaw_pdf(x, a): + if x > 0: + return x ** (a - 1) + return 0.0 + + +# Define a dictionary: for a given distribution (keys), another dictionary +# (values) specifies the parameters for NumericalInversePolynomial (PINV). +# The keys of the latter dictionary are: +# - pdf: the pdf of the distribution (callable). The signature of the pdf +# is float -> float (i.e., the function does not have to be vectorized). +# If possible, functions like log or exp from the module math should be +# preferred over functions from numpy since the PINV setup will be faster +# in that case. +# - check_pinv_params: callable f that returns true if the shape parameters +# (args) are recommended parameters for PINV (i.e., the u-error does +# not exceed the default tolerance) +# - center: scalar if the center does not depend on args, otherwise +# callable that returns the center as a function of the shape parameters +# - rvs_transform: a callable that can be used to transform the rvs that +# are distributed according to the pdf to the target distribution +# (as an example, see the entry for the beta distribution) +# - rvs_transform_inv: the inverse of rvs_transform (it is required +# for the transformed ppf) +# - mirror_uniform: boolean or a callable that returns true or false +# depending on the shape parameters. If True, the ppf is applied +# to 1-u instead of u to generate rvs, where u is a uniform rv. +# While both u and 1-u are uniform, it can be required to use 1-u +# to compute the u-error correctly. This is only relevant for the argus +# distribution. +# The only required keys are "pdf" and "check_pinv_params". +# All other keys are optional. + +PINV_CONFIG = { + "alpha": { + "pdf": alpha_pdf, + "check_pinv_params": lambda a: 1.0e-11 <= a < 2.1e5, + "center": lambda a: 0.25 * (math.sqrt(a * a + 8.0) - a), + }, + "anglit": { + "pdf": lambda x: math.cos(2 * x) + 1.0e-13, + # +1.e-13 is necessary, otherwise PINV has strange problems as + # f(upper border) is very close to 0 + "center": 0, + }, + "argus": { + "pdf": argus_pdf, + "center": lambda chi: 0.7 if chi <= 5 else 0.5, + "check_pinv_params": lambda chi: 1e-20 < chi < 901, + "rvs_transform": argus_gamma_trf, + "rvs_transform_inv": argus_gamma_inv_trf, + "mirror_uniform": lambda chi: chi > 5, + }, + "beta": { + "pdf": betaprime_pdf, + "center": lambda a, b: max(0.1, (a - 1) / (b + 1)), + "check_pinv_params": beta_valid_params, + "rvs_transform": lambda x, *args: x / (1 + x), + "rvs_transform_inv": lambda x, *args: x / (1 - x) if x < 1 else np.inf, + }, + "betaprime": { + "pdf": betaprime_pdf, + "center": lambda a, b: max(0.1, (a - 1) / (b + 1)), + "check_pinv_params": beta_valid_params, + }, + "bradford": { + "pdf": bradford_pdf, + "check_pinv_params": lambda a: 1.0e-6 <= a <= 1e9, + "center": 0.5, + }, + "burr": { + "pdf": burr_pdf, + "center": lambda a, b: (2 ** (1 / b) - 1) ** (-1 / a), + "check_pinv_params": lambda a, b: (min(a, b) >= 0.3) and (max(a, b) <= 50), + }, + "burr12": { + "pdf": burr12_pdf, + "center": lambda a, b: (2 ** (1 / b) - 1) ** (1 / a), + "check_pinv_params": lambda a, b: (min(a, b) >= 0.2) and (max(a, b) <= 50), + }, + "cauchy": { + "pdf": lambda x: 1 / (1 + (x * x)), + "center": 0, + }, + "chi": { + "pdf": chi_pdf, + "check_pinv_params": lambda df: 0.05 <= df <= 1.0e6, + "center": lambda a: math.sqrt(a), + }, + "chi2": { + "pdf": chi2_pdf, + "check_pinv_params": lambda df: 0.07 <= df <= 1e6, + "center": lambda a: a, + }, + "cosine": { + "pdf": lambda x: 1 + math.cos(x), + "center": 0, + }, + "crystalball": { + "pdf": crystalball_pdf, + "check_pinv_params": lambda b, m: (0.01 <= b <= 5.5) + and (1.1 <= m <= 75.1), + "center": 0.0, + }, + "expon": { + "pdf": lambda x: math.exp(-x), + "center": 1.0, + }, + "gamma": { + "pdf": gamma_pdf, + "check_pinv_params": lambda a: 0.04 <= a <= 1e6, + "center": lambda a: a, + }, + "gennorm": { + "pdf": lambda x, b: math.exp(-abs(x) ** b), + "check_pinv_params": lambda b: 0.081 <= b <= 45.0, + "center": 0.0, + }, + "geninvgauss": { + "pdf": geninvgauss_pdf, + "check_pinv_params": lambda p, b: (abs(p) <= 1200.0) + and (1.0e-10 <= b <= 1200.0), + "center": geninvgauss_mode, + }, + "gumbel_l": { + "pdf": lambda x: math.exp(x - math.exp(x)), + "center": -0.6, + }, + "gumbel_r": { + "pdf": lambda x: math.exp(-x - math.exp(-x)), + "center": 0.6, + }, + "hypsecant": { + "pdf": lambda x: 1.0 / (math.exp(x) + math.exp(-x)), + "center": 0.0, + }, + "invgamma": { + "pdf": invgamma_pdf, + "check_pinv_params": lambda a: 0.04 <= a <= 1e6, + "center": lambda a: 1 / a, + }, + "invgauss": { + "pdf": invgauss_pdf, + "check_pinv_params": lambda mu: 1.0e-10 <= mu <= 1.0e9, + "center": invgauss_mode, + }, + "invweibull": { + "pdf": invweibull_pdf, + "check_pinv_params": lambda a: 0.12 <= a <= 512, + "center": 1.0, + }, + "laplace": { + "pdf": lambda x: math.exp(-abs(x)), + "center": 0.0, + }, + "logistic": { + "pdf": lambda x: math.exp(-x) / (1 + math.exp(-x)) ** 2, + "center": 0.0, + }, + "maxwell": { + "pdf": lambda x: x * x * math.exp(-0.5 * x * x), + "center": 1.41421, + }, + "moyal": { + "pdf": lambda x: math.exp(-(x + math.exp(-x)) / 2), + "center": 1.2, + }, + "norm": { + "pdf": lambda x: math.exp(-x * x / 2), + "center": 0.0, + }, + "pareto": { + "pdf": lambda x, b: x ** -(b + 1), + "center": lambda b: b / (b - 1) if b > 2 else 1.5, + "check_pinv_params": lambda b: 0.08 <= b <= 400000, + }, + "powerlaw": { + "pdf": powerlaw_pdf, + "center": 1.0, + "check_pinv_params": lambda a: 0.06 <= a <= 1.0e5, + }, + "t": { + "pdf": lambda x, df: (1 + x * x / df) ** (-0.5 * (df + 1)), + "check_pinv_params": lambda a: 0.07 <= a <= 1e6, + "center": 0.0, + }, + "rayleigh": { + "pdf": lambda x: x * math.exp(-0.5 * (x * x)), + "center": 1.0, + }, + "semicircular": { + "pdf": lambda x: math.sqrt(1.0 - (x * x)), + "center": 0, + }, + "wald": { + "pdf": wald_pdf, + "center": 1.0, + }, + "weibull_max": { + "pdf": weibull_max_pdf, + "check_pinv_params": lambda a: 0.25 <= a <= 512, + "center": -1.0, + }, + "weibull_min": { + "pdf": weibull_min_pdf, + "check_pinv_params": lambda a: 0.25 <= a <= 512, + "center": 1.0, + }, +} + + +def _validate_qmc_input(qmc_engine, d, seed): + # Input validation for `qmc_engine` and `d` + # Error messages for invalid `d` are raised by QMCEngine + # we could probably use a stats.qmc.check_qrandom_state + if isinstance(qmc_engine, QMCEngine): + if d is not None and qmc_engine.d != d: + message = "`d` must be consistent with dimension of `qmc_engine`." + raise ValueError(message) + d = qmc_engine.d if d is None else d + elif qmc_engine is None: + d = 1 if d is None else d + qmc_engine = Halton(d, seed=seed) + else: + message = ( + "`qmc_engine` must be an instance of " + "`scipy.stats.qmc.QMCEngine` or `None`." + ) + raise ValueError(message) + + return qmc_engine, d + + +class CustomDistPINV: + def __init__(self, pdf, args): + self._pdf = lambda x: pdf(x, *args) + + def pdf(self, x): + return self._pdf(x) + + +class FastGeneratorInversion: + """ + Fast sampling by numerical inversion of the CDF for a large class of + continuous distributions in `scipy.stats`. + + Parameters + ---------- + dist : rv_frozen object + Frozen distribution object from `scipy.stats`. The list of supported + distributions can be found in the Notes section. The shape parameters, + `loc` and `scale` used to create the distributions must be scalars. + For example, for the Gamma distribution with shape parameter `p`, + `p` has to be a float, and for the beta distribution with shape + parameters (a, b), both a and b have to be floats. + domain : tuple of floats, optional + If one wishes to sample from a truncated/conditional distribution, + the domain has to be specified. + The default is None. In that case, the random variates are not + truncated, and the domain is inferred from the support of the + distribution. + ignore_shape_range : boolean, optional. + If False, shape parameters that are outside of the valid range + of values to ensure that the numerical accuracy (see Notes) is + high, raise a ValueError. If True, any shape parameters that are valid + for the distribution are accepted. This can be useful for testing. + The default is False. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + A NumPy random number generator or seed for the underlying NumPy + random number generator used to generate the stream of uniform + random numbers. + If `random_state` is None, it uses ``self.random_state``. + If `random_state` is an int, + ``np.random.default_rng(random_state)`` is used. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Attributes + ---------- + loc : float + The location parameter. + random_state : {`numpy.random.Generator`, `numpy.random.RandomState`} + The random state used in relevant methods like `rvs` (unless + another `random_state` is passed as an argument to these methods). + scale : float + The scale parameter. + + Methods + ------- + cdf + evaluate_error + ppf + qrvs + rvs + support + + Notes + ----- + The class creates an object for continuous distributions specified + by `dist`. The method `rvs` uses a generator from + `scipy.stats.sampling` that is created when the object is instantiated. + In addition, the methods `qrvs` and `ppf` are added. + `qrvs` generate samples based on quasi-random numbers from + `scipy.stats.qmc`. `ppf` is the PPF based on the + numerical inversion method in [1]_ (`NumericalInversePolynomial`) that is + used to generate random variates. + + Supported distributions (`distname`) are: + ``alpha``, ``anglit``, ``argus``, ``beta``, ``betaprime``, ``bradford``, + ``burr``, ``burr12``, ``cauchy``, ``chi``, ``chi2``, ``cosine``, + ``crystalball``, ``expon``, ``gamma``, ``gennorm``, ``geninvgauss``, + ``gumbel_l``, ``gumbel_r``, ``hypsecant``, ``invgamma``, ``invgauss``, + ``invweibull``, ``laplace``, ``logistic``, ``maxwell``, ``moyal``, + ``norm``, ``pareto``, ``powerlaw``, ``t``, ``rayleigh``, ``semicircular``, + ``wald``, ``weibull_max``, ``weibull_min``. + + `rvs` relies on the accuracy of the numerical inversion. If very extreme + shape parameters are used, the numerical inversion might not work. However, + for all implemented distributions, the admissible shape parameters have + been tested, and an error will be raised if the user supplies values + outside of the allowed range. The u-error should not exceed 1e-10 for all + valid parameters. Note that warnings might be raised even if parameters + are within the valid range when the object is instantiated. + To check numerical accuracy, the method `evaluate_error` can be used. + + Note that all implemented distributions are also part of `scipy.stats`, and + the object created by `FastGeneratorInversion` relies on methods like + `ppf`, `cdf` and `pdf` from `rv_frozen`. The main benefit of using this + class can be summarized as follows: Once the generator to sample random + variates is created in the setup step, sampling and evaluation of + the PPF using `ppf` are very fast, + and performance is essentially independent of the distribution. Therefore, + a substantial speed-up can be achieved for many distributions if large + numbers of random variates are required. It is important to know that this + fast sampling is achieved by inversion of the CDF. Thus, one uniform + random variate is transformed into a non-uniform variate, which is an + advantage for several simulation methods, e.g., when + the variance reduction methods of common random variates or + antithetic variates are be used ([2]_). + + In addition, inversion makes it possible to + - to use a QMC generator from `scipy.stats.qmc` (method `qrvs`), + - to generate random variates truncated to an interval. For example, if + one aims to sample standard normal random variates from + the interval (2, 4), this can be easily achieved by using the parameter + `domain`. + + The location and scale that are initially defined by `dist` + can be reset without having to rerun the setup + step to create the generator that is used for sampling. The relation + of the distribution `Y` with `loc` and `scale` to the standard + distribution `X` (i.e., ``loc=0`` and ``scale=1``) is given by + ``Y = loc + scale * X``. + + References + ---------- + .. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold. + "Random variate generation by numerical inversion when only the + density is known." ACM Transactions on Modeling and Computer + Simulation (TOMACS) 20.4 (2010): 1-25. + .. [2] Hörmann, Wolfgang, Josef Leydold and Gerhard Derflinger. + "Automatic nonuniform random number generation." + Springer, 2004. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Let's start with a simple example to illustrate the main features: + + >>> gamma_frozen = stats.gamma(1.5) + >>> gamma_dist = FastGeneratorInversion(gamma_frozen) + >>> r = gamma_dist.rvs(size=1000) + + The mean should be approximately equal to the shape parameter 1.5: + + >>> r.mean() + 1.52423591130436 # may vary + + Similarly, we can draw a sample based on quasi-random numbers: + + >>> r = gamma_dist.qrvs(size=1000) + >>> r.mean() + 1.4996639255942914 # may vary + + Compare the PPF against approximation `ppf`. + + >>> q = [0.001, 0.2, 0.5, 0.8, 0.999] + >>> np.max(np.abs(gamma_frozen.ppf(q) - gamma_dist.ppf(q))) + 4.313394796895409e-08 + + To confirm that the numerical inversion is accurate, we evaluate the + approximation error (u-error), which should be below 1e-10 (for more + details, refer to the documentation of `evaluate_error`): + + >>> gamma_dist.evaluate_error() + (7.446320551265581e-11, nan) # may vary + + Note that the location and scale can be changed without instantiating a + new generator: + + >>> gamma_dist.loc = 2 + >>> gamma_dist.scale = 3 + >>> r = gamma_dist.rvs(size=1000) + + The mean should be approximately 2 + 3*1.5 = 6.5. + + >>> r.mean() + 6.399549295242894 # may vary + + Let us also illustrate how truncation can be applied: + + >>> trunc_norm = FastGeneratorInversion(stats.norm(), domain=(3, 4)) + >>> r = trunc_norm.rvs(size=1000) + >>> 3 < r.min() < r.max() < 4 + True + + Check the mean: + + >>> r.mean() + 3.250433367078603 # may vary + + >>> stats.norm.expect(lb=3, ub=4, conditional=True) + 3.260454285589997 + + In this particular, case, `scipy.stats.truncnorm` could also be used to + generate truncated normal random variates. + + """ + + def __init__( + self, + dist, + *, + domain=None, + ignore_shape_range=False, + random_state=None, + ): + + if isinstance(dist, stats.distributions.rv_frozen): + distname = dist.dist.name + if distname not in PINV_CONFIG.keys(): + raise ValueError( + f"Distribution '{distname}' is not supported." + f"It must be one of {list(PINV_CONFIG.keys())}" + ) + else: + raise ValueError("`dist` must be a frozen distribution object") + + loc = dist.kwds.get("loc", 0) + scale = dist.kwds.get("scale", 1) + args = dist.args + if not np.isscalar(loc): + raise ValueError("loc must be scalar.") + if not np.isscalar(scale): + raise ValueError("scale must be scalar.") + + self._frozendist = getattr(stats, distname)( + *args, + loc=loc, + scale=scale, + ) + self._distname = distname + + nargs = np.broadcast_arrays(args)[0].size + nargs_expected = self._frozendist.dist.numargs + if nargs != nargs_expected: + raise ValueError( + f"Each of the {nargs_expected} shape parameters must be a " + f"scalar, but {nargs} values are provided." + ) + + self.random_state = random_state + + if domain is None: + self._domain = self._frozendist.support() + self._p_lower = 0.0 + self._p_domain = 1.0 + else: + self._domain = domain + self._p_lower = self._frozendist.cdf(self._domain[0]) + _p_domain = self._frozendist.cdf(self._domain[1]) - self._p_lower + self._p_domain = _p_domain + self._set_domain_adj() + self._ignore_shape_range = ignore_shape_range + + # the domain to be passed to NumericalInversePolynomial + # define a separate variable since in case of a transformation, + # domain_pinv will not be the same as self._domain + self._domain_pinv = self._domain + + # get information about the distribution from the config to set up + # the generator + dist = self._process_config(distname, args) + + if self._rvs_transform_inv is not None: + d0 = self._rvs_transform_inv(self._domain[0], *args) + d1 = self._rvs_transform_inv(self._domain[1], *args) + if d0 > d1: + # swap values if transformation if decreasing + d0, d1 = d1, d0 + # only update _domain_pinv and not _domain + # _domain refers to the original distribution, _domain_pinv + # to the transformed distribution + self._domain_pinv = d0, d1 + + # self._center has been set by the call self._process_config + # check if self._center is inside the transformed domain + # _domain_pinv, otherwise move it to the endpoint that is closer + if self._center is not None: + if self._center < self._domain_pinv[0]: + self._center = self._domain_pinv[0] + elif self._center > self._domain_pinv[1]: + self._center = self._domain_pinv[1] + + self._rng = NumericalInversePolynomial( + dist, + random_state=self.random_state, + domain=self._domain_pinv, + center=self._center, + ) + + @property + def random_state(self): + return self._random_state + + @random_state.setter + def random_state(self, random_state): + self._random_state = check_random_state_qmc(random_state) + + @property + def loc(self): + return self._frozendist.kwds.get("loc", 0) + + @loc.setter + def loc(self, loc): + if not np.isscalar(loc): + raise ValueError("loc must be scalar.") + self._frozendist.kwds["loc"] = loc + # update the adjusted domain that depends on loc and scale + self._set_domain_adj() + + @property + def scale(self): + return self._frozendist.kwds.get("scale", 0) + + @scale.setter + def scale(self, scale): + if not np.isscalar(scale): + raise ValueError("scale must be scalar.") + self._frozendist.kwds["scale"] = scale + # update the adjusted domain that depends on loc and scale + self._set_domain_adj() + + def _set_domain_adj(self): + """ Adjust the domain based on loc and scale. """ + loc = self.loc + scale = self.scale + lb = self._domain[0] * scale + loc + ub = self._domain[1] * scale + loc + self._domain_adj = (lb, ub) + + def _process_config(self, distname, args): + cfg = PINV_CONFIG[distname] + if "check_pinv_params" in cfg: + if not self._ignore_shape_range: + if not cfg["check_pinv_params"](*args): + msg = ("No generator is defined for the shape parameters " + f"{args}. Use ignore_shape_range to proceed " + "with the selected values.") + raise ValueError(msg) + + if "center" in cfg.keys(): + if not np.isscalar(cfg["center"]): + self._center = cfg["center"](*args) + else: + self._center = cfg["center"] + else: + self._center = None + self._rvs_transform = cfg.get("rvs_transform", None) + self._rvs_transform_inv = cfg.get("rvs_transform_inv", None) + _mirror_uniform = cfg.get("mirror_uniform", None) + if _mirror_uniform is None: + self._mirror_uniform = False + else: + self._mirror_uniform = _mirror_uniform(*args) + + return CustomDistPINV(cfg["pdf"], args) + + def rvs(self, size=None): + """ + Sample from the distribution by inversion. + + Parameters + ---------- + size : int or tuple, optional + The shape of samples. Default is ``None`` in which case a scalar + sample is returned. + + Returns + ------- + rvs : array_like + A NumPy array of random variates. + + Notes + ----- + Random variates are generated by numerical inversion of the CDF, i.e., + `ppf` computed by `NumericalInversePolynomial` when the class + is instantiated. Note that the + default ``rvs`` method of the rv_continuous class is + overwritten. Hence, a different stream of random numbers is generated + even if the same seed is used. + """ + # note: we cannot use self._rng.rvs directly in case + # self._mirror_uniform is true + u = self.random_state.uniform(size=size) + if self._mirror_uniform: + u = 1 - u + r = self._rng.ppf(u) + if self._rvs_transform is not None: + r = self._rvs_transform(r, *self._frozendist.args) + return self.loc + self.scale * r + + def ppf(self, q): + """ + Very fast PPF (inverse CDF) of the distribution which + is a very close approximation of the exact PPF values. + + Parameters + ---------- + u : array_like + Array with probabilities. + + Returns + ------- + ppf : array_like + Quantiles corresponding to the values in `u`. + + Notes + ----- + The evaluation of the PPF is very fast but it may have a large + relative error in the far tails. The numerical precision of the PPF + is controlled by the u-error, that is, + ``max |u - CDF(PPF(u))|`` where the max is taken over points in + the interval [0,1], see `evaluate_error`. + + Note that this PPF is designed to generate random samples. + """ + q = np.asarray(q) + if self._mirror_uniform: + x = self._rng.ppf(1 - q) + else: + x = self._rng.ppf(q) + if self._rvs_transform is not None: + x = self._rvs_transform(x, *self._frozendist.args) + return self.scale * x + self.loc + + def qrvs(self, size=None, d=None, qmc_engine=None): + """ + Quasi-random variates of the given distribution. + + The `qmc_engine` is used to draw uniform quasi-random variates, and + these are converted to quasi-random variates of the given distribution + using inverse transform sampling. + + Parameters + ---------- + size : int, tuple of ints, or None; optional + Defines shape of random variates array. Default is ``None``. + d : int or None, optional + Defines dimension of uniform quasi-random variates to be + transformed. Default is ``None``. + qmc_engine : scipy.stats.qmc.QMCEngine(d=1), optional + Defines the object to use for drawing + quasi-random variates. Default is ``None``, which uses + `scipy.stats.qmc.Halton(1)`. + + Returns + ------- + rvs : ndarray or scalar + Quasi-random variates. See Notes for shape information. + + Notes + ----- + The shape of the output array depends on `size`, `d`, and `qmc_engine`. + The intent is for the interface to be natural, but the detailed rules + to achieve this are complicated. + + - If `qmc_engine` is ``None``, a `scipy.stats.qmc.Halton` instance is + created with dimension `d`. If `d` is not provided, ``d=1``. + - If `qmc_engine` is not ``None`` and `d` is ``None``, `d` is + determined from the dimension of the `qmc_engine`. + - If `qmc_engine` is not ``None`` and `d` is not ``None`` but the + dimensions are inconsistent, a ``ValueError`` is raised. + - After `d` is determined according to the rules above, the output + shape is ``tuple_shape + d_shape``, where: + + - ``tuple_shape = tuple()`` if `size` is ``None``, + - ``tuple_shape = (size,)`` if `size` is an ``int``, + - ``tuple_shape = size`` if `size` is a sequence, + - ``d_shape = tuple()`` if `d` is ``None`` or `d` is 1, and + - ``d_shape = (d,)`` if `d` is greater than 1. + + The elements of the returned array are part of a low-discrepancy + sequence. If `d` is 1, this means that none of the samples are truly + independent. If `d` > 1, each slice ``rvs[..., i]`` will be of a + quasi-independent sequence; see `scipy.stats.qmc.QMCEngine` for + details. Note that when `d` > 1, the samples returned are still those + of the provided univariate distribution, not a multivariate + generalization of that distribution. + + """ + qmc_engine, d = _validate_qmc_input(qmc_engine, d, self.random_state) + # mainly copied from unuran_wrapper.pyx.templ + # `rvs` is flexible about whether `size` is an int or tuple, so this + # should be, too. + try: + if size is None: + tuple_size = (1,) + else: + tuple_size = tuple(size) + except TypeError: + tuple_size = (size,) + # we do not use rng.qrvs directly since we need to be + # able to apply the ppf to 1 - u + N = 1 if size is None else np.prod(size) + u = qmc_engine.random(N) + if self._mirror_uniform: + u = 1 - u + qrvs = self._ppf(u) + if self._rvs_transform is not None: + qrvs = self._rvs_transform(qrvs, *self._frozendist.args) + if size is None: + qrvs = qrvs.squeeze()[()] + else: + if d == 1: + qrvs = qrvs.reshape(tuple_size) + else: + qrvs = qrvs.reshape(tuple_size + (d,)) + return self.loc + self.scale * qrvs + + def evaluate_error(self, size=100000, random_state=None, x_error=False): + """ + Evaluate the numerical accuracy of the inversion (u- and x-error). + + Parameters + ---------- + size : int, optional + The number of random points over which the error is estimated. + Default is ``100000``. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + A NumPy random number generator or seed for the underlying NumPy + random number generator used to generate the stream of uniform + random numbers. + If `random_state` is None, use ``self.random_state``. + If `random_state` is an int, + ``np.random.default_rng(random_state)`` is used. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + u_error, x_error : tuple of floats + A NumPy array of random variates. + + Notes + ----- + The numerical precision of the inverse CDF `ppf` is controlled by + the u-error. It is computed as follows: + ``max |u - CDF(PPF(u))|`` where the max is taken `size` random + points in the interval [0,1]. `random_state` determines the random + sample. Note that if `ppf` was exact, the u-error would be zero. + + The x-error measures the direct distance between the exact PPF + and `ppf`. If ``x_error`` is set to ``True`, it is + computed as the maximum of the minimum of the relative and absolute + x-error: + ``max(min(x_error_abs[i], x_error_rel[i]))`` where + ``x_error_abs[i] = |PPF(u[i]) - PPF_fast(u[i])|``, + ``x_error_rel[i] = max |(PPF(u[i]) - PPF_fast(u[i])) / PPF(u[i])|``. + Note that it is important to consider the relative x-error in the case + that ``PPF(u)`` is close to zero or very large. + + By default, only the u-error is evaluated and the x-error is set to + ``np.nan``. Note that the evaluation of the x-error will be very slow + if the implementation of the PPF is slow. + + Further information about these error measures can be found in [1]_. + + References + ---------- + .. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold. + "Random variate generation by numerical inversion when only the + density is known." ACM Transactions on Modeling and Computer + Simulation (TOMACS) 20.4 (2010): 1-25. + + Examples + -------- + + >>> import numpy as np + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Create an object for the normal distribution: + + >>> d_norm_frozen = stats.norm() + >>> d_norm = FastGeneratorInversion(d_norm_frozen) + + To confirm that the numerical inversion is accurate, we evaluate the + approximation error (u-error and x-error). + + >>> u_error, x_error = d_norm.evaluate_error(x_error=True) + + The u-error should be below 1e-10: + + >>> u_error + 8.785783212061915e-11 # may vary + + Compare the PPF against approximation `ppf`: + + >>> q = [0.001, 0.2, 0.4, 0.6, 0.8, 0.999] + >>> diff = np.abs(d_norm_frozen.ppf(q) - d_norm.ppf(q)) + >>> x_error_abs = np.max(diff) + >>> x_error_abs + 1.2937954707581412e-08 + + This is the absolute x-error evaluated at the points q. The relative + error is given by + + >>> x_error_rel = np.max(diff / np.abs(d_norm_frozen.ppf(q))) + >>> x_error_rel + 4.186725600453555e-09 + + The x_error computed above is derived in a very similar way over a + much larger set of random values q. At each value q[i], the minimum + of the relative and absolute error is taken. The final value is then + derived as the maximum of these values. In our example, we get the + following value: + + >>> x_error + 4.507068014335139e-07 # may vary + + """ + if not isinstance(size, (numbers.Integral, np.integer)): + raise ValueError("size must be an integer.") + # urng will be used to draw the samples for testing the error + # it must not interfere with self.random_state. therefore, do not + # call self.rvs, but draw uniform random numbers and apply + # self.ppf (note: like in rvs, consider self._mirror_uniform) + urng = check_random_state_qmc(random_state) + u = urng.uniform(size=size) + if self._mirror_uniform: + u = 1 - u + x = self.ppf(u) + uerr = np.max(np.abs(self._cdf(x) - u)) + if not x_error: + return uerr, np.nan + ppf_u = self._ppf(u) + x_error_abs = np.abs(self.ppf(u)-ppf_u) + x_error_rel = x_error_abs / np.abs(ppf_u) + x_error_combined = np.array([x_error_abs, x_error_rel]).min(axis=0) + return uerr, np.max(x_error_combined) + + def support(self): + """Support of the distribution. + + Returns + ------- + a, b : float + end-points of the distribution's support. + + Notes + ----- + + Note that the support of the distribution depends on `loc`, + `scale` and `domain`. + + Examples + -------- + + >>> from scipy import stats + >>> from scipy.stats.sampling import FastGeneratorInversion + + Define a truncated normal distribution: + + >>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1)) + >>> d_norm.support() + (0, 1) + + Shift the distribution: + + >>> d_norm.loc = 2.5 + >>> d_norm.support() + (2.5, 3.5) + + """ + return self._domain_adj + + def _cdf(self, x): + """Cumulative distribution function (CDF) + + Parameters + ---------- + x : array_like + The values where the CDF is evaluated + + Returns + ------- + y : ndarray + CDF evaluated at x + + """ + y = self._frozendist.cdf(x) + if self._p_domain == 1.0: + return y + return np.clip((y - self._p_lower) / self._p_domain, 0, 1) + + def _ppf(self, q): + """Percent point function (inverse of `cdf`) + + Parameters + ---------- + q : array_like + lower tail probability + + Returns + ------- + x : array_like + quantile corresponding to the lower tail probability q. + + """ + if self._p_domain == 1.0: + return self._frozendist.ppf(q) + x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower) + return np.clip(x, self._domain_adj[0], self._domain_adj[1]) + + +class RatioUniforms: + """ + Generate random samples from a probability density function using the + ratio-of-uniforms method. + + Parameters + ---------- + pdf : callable + A function with signature `pdf(x)` that is proportional to the + probability density function of the distribution. + umax : float + The upper bound of the bounding rectangle in the u-direction. + vmin : float + The lower bound of the bounding rectangle in the v-direction. + vmax : float + The upper bound of the bounding rectangle in the v-direction. + c : float, optional. + Shift parameter of ratio-of-uniforms method, see Notes. Default is 0. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Methods + ------- + rvs + + Notes + ----- + Given a univariate probability density function `pdf` and a constant `c`, + define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``. + If ``(U, V)`` is a random vector uniformly distributed over ``A``, + then ``V/U + c`` follows a distribution according to `pdf`. + + The above result (see [1]_, [2]_) can be used to sample random variables + using only the PDF, i.e. no inversion of the CDF is required. Typical + choices of `c` are zero or the mode of `pdf`. The set ``A`` is a subset of + the rectangle ``R = [0, umax] x [vmin, vmax]`` where + + - ``umax = sup sqrt(pdf(x))`` + - ``vmin = inf (x - c) sqrt(pdf(x))`` + - ``vmax = sup (x - c) sqrt(pdf(x))`` + + In particular, these values are finite if `pdf` is bounded and + ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails). + One can generate ``(U, V)`` uniformly on ``R`` and return + ``V/U + c`` if ``(U, V)`` are also in ``A`` which can be directly + verified. + + The algorithm is not changed if one replaces `pdf` by k * `pdf` for any + constant k > 0. Thus, it is often convenient to work with a function + that is proportional to the probability density function by dropping + unnecessary normalization factors. + + Intuitively, the method works well if ``A`` fills up most of the + enclosing rectangle such that the probability is high that ``(U, V)`` + lies in ``A`` whenever it lies in ``R`` as the number of required + iterations becomes too large otherwise. To be more precise, note that + the expected number of iterations to draw ``(U, V)`` uniformly + distributed on ``R`` such that ``(U, V)`` is also in ``A`` is given by + the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``, + where `area(pdf)` is the integral of `pdf` (which is equal to one if the + probability density function is used but can take on other values if a + function proportional to the density is used). The equality holds since + the area of ``A`` is equal to ``0.5 * area(pdf)`` (Theorem 7.1 in [1]_). + If the sampling fails to generate a single random variate after 50000 + iterations (i.e. not a single draw is in ``A``), an exception is raised. + + If the bounding rectangle is not correctly specified (i.e. if it does not + contain ``A``), the algorithm samples from a distribution different from + the one given by `pdf`. It is therefore recommended to perform a + test such as `~scipy.stats.kstest` as a check. + + References + ---------- + .. [1] L. Devroye, "Non-Uniform Random Variate Generation", + Springer-Verlag, 1986. + + .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian + random variates", Statistics and Computing, 24(4), p. 547--557, 2014. + + .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random + Variables Using the Ratio of Uniform Deviates", + ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977. + + Examples + -------- + >>> import numpy as np + >>> from scipy import stats + + >>> from scipy.stats.sampling import RatioUniforms + >>> rng = np.random.default_rng() + + Simulate normally distributed random variables. It is easy to compute the + bounding rectangle explicitly in that case. For simplicity, we drop the + normalization factor of the density. + + >>> f = lambda x: np.exp(-x**2 / 2) + >>> v = np.sqrt(f(np.sqrt(2))) * np.sqrt(2) + >>> umax = np.sqrt(f(0)) + >>> gen = RatioUniforms(f, umax=umax, vmin=-v, vmax=v, random_state=rng) + >>> r = gen.rvs(size=2500) + + The K-S test confirms that the random variates are indeed normally + distributed (normality is not rejected at 5% significance level): + + >>> stats.kstest(r, 'norm')[1] + 0.250634764150542 + + The exponential distribution provides another example where the bounding + rectangle can be determined explicitly. + + >>> gen = RatioUniforms(lambda x: np.exp(-x), umax=1, vmin=0, + ... vmax=2*np.exp(-1), random_state=rng) + >>> r = gen.rvs(1000) + >>> stats.kstest(r, 'expon')[1] + 0.21121052054580314 + + """ + + def __init__(self, pdf, *, umax, vmin, vmax, c=0, random_state=None): + if vmin >= vmax: + raise ValueError("vmin must be smaller than vmax.") + + if umax <= 0: + raise ValueError("umax must be positive.") + + self._pdf = pdf + self._umax = umax + self._vmin = vmin + self._vmax = vmax + self._c = c + self._rng = check_random_state(random_state) + + def rvs(self, size=1): + """Sampling of random variates + + Parameters + ---------- + size : int or tuple of ints, optional + Number of random variates to be generated (default is 1). + + Returns + ------- + rvs : ndarray + The random variates distributed according to the probability + distribution defined by the pdf. + + """ + size1d = tuple(np.atleast_1d(size)) + N = np.prod(size1d) # number of rvs needed, reshape upon return + + # start sampling using ratio of uniforms method + x = np.zeros(N) + simulated, i = 0, 1 + + # loop until N rvs have been generated: expected runtime is finite. + # to avoid infinite loop, raise exception if not a single rv has been + # generated after 50000 tries. even if the expected number of iterations + # is 1000, the probability of this event is (1-1/1000)**50000 + # which is of order 10e-22 + while simulated < N: + k = N - simulated + # simulate uniform rvs on [0, umax] and [vmin, vmax] + u1 = self._umax * self._rng.uniform(size=k) + v1 = self._rng.uniform(self._vmin, self._vmax, size=k) + # apply rejection method + rvs = v1 / u1 + self._c + accept = (u1**2 <= self._pdf(rvs)) + num_accept = np.sum(accept) + if num_accept > 0: + x[simulated:(simulated + num_accept)] = rvs[accept] + simulated += num_accept + + if (simulated == 0) and (i*N >= 50000): + msg = ( + f"Not a single random variate could be generated in {i*N} " + "attempts. The ratio of uniforms method does not appear " + "to work for the provided parameters. Please check the " + "pdf and the bounds." + ) + raise RuntimeError(msg) + i += 1 + + return np.reshape(x, size1d) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..094eba701370c6c6a4ab1ffba5bc3aaba17c17bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_sobol.pyi b/venv/lib/python3.10/site-packages/scipy/stats/_sobol.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7ca5e3a9c1a142b25ac26401e9ab1cb6726c877f --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_sobol.pyi @@ -0,0 +1,54 @@ +import numpy as np +from scipy._lib._util import IntNumber +from typing import Literal + +def _initialize_v( + v : np.ndarray, + dim : IntNumber, + bits: IntNumber +) -> None: ... + +def _cscramble ( + dim : IntNumber, + bits: IntNumber, + ltm : np.ndarray, + sv: np.ndarray +) -> None: ... + +def _fill_p_cumulative( + p: np.ndarray, + p_cumulative: np.ndarray +) -> None: ... + +def _draw( + n : IntNumber, + num_gen: IntNumber, + dim: IntNumber, + scale: float, + sv: np.ndarray, + quasi: np.ndarray, + sample: np.ndarray + ) -> None: ... + +def _fast_forward( + n: IntNumber, + num_gen: IntNumber, + dim: IntNumber, + sv: np.ndarray, + quasi: np.ndarray + ) -> None: ... + +def _categorize( + draws: np.ndarray, + p_cumulative: np.ndarray, + result: np.ndarray + ) -> None: ... + +_MAXDIM: Literal[21201] +_MAXDEG: Literal[18] + +def _test_find_index( + p_cumulative: np.ndarray, + size: int, + value: float + ) -> int: ... diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..572f4de11e124009d1ec0299a859a6b37e6247c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_stats.pxd b/venv/lib/python3.10/site-packages/scipy/stats/_stats.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a7085943001b0b8fd2891f504c01c35ded7f7fb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_stats.pxd @@ -0,0 +1,9 @@ +# destined to be used in a LowLevelCallable +cdef double _geninvgauss_pdf(double x, void *user_data) except * nogil +cdef double _studentized_range_cdf(int n, double[2] x, void *user_data) noexcept nogil +cdef double _studentized_range_cdf_asymptotic(double z, void *user_data) noexcept nogil +cdef double _studentized_range_pdf(int n, double[2] x, void *user_data) noexcept nogil +cdef double _studentized_range_pdf_asymptotic(double z, void *user_data) noexcept nogil +cdef double _studentized_range_moment(int n, double[3] x_arg, void *user_data) noexcept nogil +cdef double _genhyperbolic_pdf(double x, void *user_data) except * nogil +cdef double _genhyperbolic_logpdf(double x, void *user_data) except * nogil diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py b/venv/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..b77173c136d80eb57f5c993108b7408653acad13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_tukeylambda_stats.py @@ -0,0 +1,199 @@ +import numpy as np +from numpy import poly1d +from scipy.special import beta + + +# The following code was used to generate the Pade coefficients for the +# Tukey Lambda variance function. Version 0.17 of mpmath was used. +#--------------------------------------------------------------------------- +# import mpmath as mp +# +# mp.mp.dps = 60 +# +# one = mp.mpf(1) +# two = mp.mpf(2) +# +# def mpvar(lam): +# if lam == 0: +# v = mp.pi**2 / three +# else: +# v = (two / lam**2) * (one / (one + two*lam) - +# mp.beta(lam + one, lam + one)) +# return v +# +# t = mp.taylor(mpvar, 0, 8) +# p, q = mp.pade(t, 4, 4) +# print("p =", [mp.fp.mpf(c) for c in p]) +# print("q =", [mp.fp.mpf(c) for c in q]) +#--------------------------------------------------------------------------- + +# Pade coefficients for the Tukey Lambda variance function. +_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127, + -0.5370742306855439, 0.17292046290190008, + -0.02371146284628187] +_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124, + 1.7660926747377275, 0.2643989311168465] + +# numpy.poly1d instances for the numerator and denominator of the +# Pade approximation to the Tukey Lambda variance. +_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1]) +_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1]) + + +def tukeylambda_variance(lam): + """Variance of the Tukey Lambda distribution. + + Parameters + ---------- + lam : array_like + The lambda values at which to compute the variance. + + Returns + ------- + v : ndarray + The variance. For lam < -0.5, the variance is not defined, so + np.nan is returned. For lam = 0.5, np.inf is returned. + + Notes + ----- + In an interval around lambda=0, this function uses the [4,4] Pade + approximation to compute the variance. Otherwise it uses the standard + formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The + Pade approximation is used because the standard formula has a removable + discontinuity at lambda = 0, and does not produce accurate numerical + results near lambda = 0. + """ + lam = np.asarray(lam) + shp = lam.shape + lam = np.atleast_1d(lam).astype(np.float64) + + # For absolute values of lam less than threshold, use the Pade + # approximation. + threshold = 0.075 + + # Play games with masks to implement the conditional evaluation of + # the distribution. + # lambda < -0.5: var = nan + low_mask = lam < -0.5 + # lambda == -0.5: var = inf + neghalf_mask = lam == -0.5 + # abs(lambda) < threshold: use Pade approximation + small_mask = np.abs(lam) < threshold + # else the "regular" case: use the explicit formula. + reg_mask = ~(low_mask | neghalf_mask | small_mask) + + # Get the 'lam' values for the cases where they are needed. + small = lam[small_mask] + reg = lam[reg_mask] + + # Compute the function for each case. + v = np.empty_like(lam) + v[low_mask] = np.nan + v[neghalf_mask] = np.inf + if small.size > 0: + # Use the Pade approximation near lambda = 0. + v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small) + if reg.size > 0: + v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) - + beta(reg + 1, reg + 1)) + v.shape = shp + return v + + +# The following code was used to generate the Pade coefficients for the +# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used. +#--------------------------------------------------------------------------- +# import mpmath as mp +# +# mp.mp.dps = 60 +# +# one = mp.mpf(1) +# two = mp.mpf(2) +# three = mp.mpf(3) +# four = mp.mpf(4) +# +# def mpkurt(lam): +# if lam == 0: +# k = mp.mpf(6)/5 +# else: +# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) + +# three*mp.beta(two*lam+one, two*lam+one)) +# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2 +# k = numer / denom - three +# return k +# +# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the +# # taylor function and we request a degree 9 Taylor polynomial, we actually +# # get degree 8. +# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01) +# t = [mp.chop(c, tol=1e-15) for c in t] +# p, q = mp.pade(t, 4, 4) +# print("p =", [mp.fp.mpf(c) for c in p]) +# print("q =", [mp.fp.mpf(c) for c in q]) +#--------------------------------------------------------------------------- + +# Pade coefficients for the Tukey Lambda kurtosis function. +_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077, + 0.20601184383406815, 4.59796302262789] +_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842, + 0.43075235247853005, -2.789746758009912] + +# numpy.poly1d instances for the numerator and denominator of the +# Pade approximation to the Tukey Lambda kurtosis. +_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1]) +_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1]) + + +def tukeylambda_kurtosis(lam): + """Kurtosis of the Tukey Lambda distribution. + + Parameters + ---------- + lam : array_like + The lambda values at which to compute the variance. + + Returns + ------- + v : ndarray + The variance. For lam < -0.25, the variance is not defined, so + np.nan is returned. For lam = 0.25, np.inf is returned. + + """ + lam = np.asarray(lam) + shp = lam.shape + lam = np.atleast_1d(lam).astype(np.float64) + + # For absolute values of lam less than threshold, use the Pade + # approximation. + threshold = 0.055 + + # Use masks to implement the conditional evaluation of the kurtosis. + # lambda < -0.25: kurtosis = nan + low_mask = lam < -0.25 + # lambda == -0.25: kurtosis = inf + negqrtr_mask = lam == -0.25 + # lambda near 0: use Pade approximation + small_mask = np.abs(lam) < threshold + # else the "regular" case: use the explicit formula. + reg_mask = ~(low_mask | negqrtr_mask | small_mask) + + # Get the 'lam' values for the cases where they are needed. + small = lam[small_mask] + reg = lam[reg_mask] + + # Compute the function for each case. + k = np.empty_like(lam) + k[low_mask] = np.nan + k[negqrtr_mask] = np.inf + if small.size > 0: + k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small) + if reg.size > 0: + numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) + + 3 * beta(2 * reg + 1, 2 * reg + 1)) + denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2 + k[reg_mask] = numer / denom - 3 + + # The return value will be a numpy array; resetting the shape ensures that + # if `lam` was a scalar, the return value is a 0-d array. + k.shape = shp + return k diff --git a/venv/lib/python3.10/site-packages/scipy/stats/morestats.py b/venv/lib/python3.10/site-packages/scipy/stats/morestats.py new file mode 100644 index 0000000000000000000000000000000000000000..fce0b989d8b5ec583a517c7998d55b1336937bf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/morestats.py @@ -0,0 +1,34 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'mvsdist', + 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', + 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', + 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', + 'fligner', 'mood', 'wilcoxon', 'median_test', + 'circmean', 'circvar', 'circstd', 'anderson_ksamp', + 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax', + 'yeojohnson_normplot', 'annotations', 'namedtuple', 'isscalar', 'log', + 'around', 'unique', 'arange', 'sort', 'amin', 'amax', 'atleast_1d', + 'array', 'compress', 'exp', 'ravel', 'count_nonzero', 'arctan2', + 'hypot', 'optimize', 'find_repeats', + 'chi2_contingency', 'distributions', 'rv_generic', 'Mean', + 'Variance', 'Std_dev', 'ShapiroResult', 'AndersonResult', + 'Anderson_ksampResult', 'AnsariResult', 'BartlettResult', + 'LeveneResult', 'FlignerResult', 'WilcoxonResult' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="morestats", + private_modules=["_morestats"], all=__all__, + attribute=name) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/mstats.py b/venv/lib/python3.10/site-packages/scipy/stats/mstats.py new file mode 100644 index 0000000000000000000000000000000000000000..88016af71803dc5c4ebadba168f22cdcd8273dbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/mstats.py @@ -0,0 +1,140 @@ +""" +=================================================================== +Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) +=================================================================== + +.. currentmodule:: scipy.stats.mstats + +This module contains a large number of statistical functions that can +be used with masked arrays. + +Most of these functions are similar to those in `scipy.stats` but might +have small differences in the API or in the algorithm used. Since this +is a relatively new package, some API changes are still possible. + +Summary statistics +================== + +.. autosummary:: + :toctree: generated/ + + describe + gmean + hmean + kurtosis + mode + mquantiles + hdmedian + hdquantiles + hdquantiles_sd + idealfourths + plotting_positions + meppf + moment + skew + tmean + tvar + tmin + tmax + tsem + variation + find_repeats + sem + trimmed_mean + trimmed_mean_ci + trimmed_std + trimmed_var + +Frequency statistics +==================== + +.. autosummary:: + :toctree: generated/ + + scoreatpercentile + +Correlation functions +===================== + +.. autosummary:: + :toctree: generated/ + + f_oneway + pearsonr + spearmanr + pointbiserialr + kendalltau + kendalltau_seasonal + linregress + siegelslopes + theilslopes + sen_seasonal_slopes + +Statistical tests +================= + +.. autosummary:: + :toctree: generated/ + + ttest_1samp + ttest_onesamp + ttest_ind + ttest_rel + chisquare + kstest + ks_2samp + ks_1samp + ks_twosamp + mannwhitneyu + rankdata + kruskal + kruskalwallis + friedmanchisquare + brunnermunzel + skewtest + kurtosistest + normaltest + +Transformations +=============== + +.. autosummary:: + :toctree: generated/ + + obrientransform + trim + trima + trimmed_stde + trimr + trimtail + trimboth + winsorize + zmap + zscore + +Other +===== + +.. autosummary:: + :toctree: generated/ + + argstoarray + count_tied_groups + msign + compare_medians_ms + median_cihs + mjci + mquantiles_cimj + rsh + +""" +from . import _mstats_basic +from . import _mstats_extras +from ._mstats_basic import * # noqa: F403 +from ._mstats_extras import * # noqa: F403 +# Functions that support masked array input in stats but need to be kept in the +# mstats namespace for backwards compatibility: +from scipy.stats import gmean, hmean, zmap, zscore, chisquare + +__all__ = _mstats_basic.__all__ + _mstats_extras.__all__ +__all__ += ['gmean', 'hmean', 'zmap', 'zscore', 'chisquare'] diff --git a/venv/lib/python3.10/site-packages/scipy/stats/mstats_basic.py b/venv/lib/python3.10/site-packages/scipy/stats/mstats_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..f79f07bed7a01a49e3005496db27c1a2c971e06a --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/mstats_basic.py @@ -0,0 +1,50 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'argstoarray', + 'count_tied_groups', + 'describe', + 'f_oneway', 'find_repeats','friedmanchisquare', + 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', + 'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest', + 'ks_1samp', 'kstest', + 'linregress', + 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', + 'normaltest', + 'obrientransform', + 'pearsonr','plotting_positions','pointbiserialr', + 'rankdata', + 'scoreatpercentile','sem', + 'sen_seasonal_slopes','skew','skewtest','spearmanr', + 'siegelslopes', 'theilslopes', + 'tmax','tmean','tmin','trim','trimboth', + 'trimtail','trima','trimr','trimmed_mean','trimmed_std', + 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', + 'ttest_ind','ttest_rel','tvar', + 'variation', + 'winsorize', + 'brunnermunzel', 'ma', 'masked', 'nomask', 'namedtuple', + 'distributions', 'stats_linregress', 'stats_LinregressResult', + 'stats_theilslopes', 'stats_siegelslopes', 'ModeResult', + 'PointbiserialrResult', + 'Ttest_1sampResult', 'Ttest_indResult', 'Ttest_relResult', + 'MannwhitneyuResult', 'KruskalResult', 'trimdoc', 'trim1', + 'DescribeResult', 'stde_median', 'SkewtestResult', 'KurtosistestResult', + 'NormaltestResult', 'F_onewayResult', 'FriedmanchisquareResult', + 'BrunnerMunzelResult' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="mstats_basic", + private_modules=["_mstats_basic"], all=__all__, + attribute=name, correct_module="mstats") diff --git a/venv/lib/python3.10/site-packages/scipy/stats/sampling.py b/venv/lib/python3.10/site-packages/scipy/stats/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..a699221ba77bb8320023d70c3e47f562c08eab18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/sampling.py @@ -0,0 +1,68 @@ +""" +====================================================== +Random Number Generators (:mod:`scipy.stats.sampling`) +====================================================== + +.. currentmodule:: scipy.stats.sampling + +This module contains a collection of random number generators to sample +from univariate continuous and discrete distributions. It uses the +implementation of a C library called "UNU.RAN". The only exception is +RatioUniforms, which is a pure Python implementation of the +Ratio-of-Uniforms method. + +Generators Wrapped +================== + +For continuous distributions +---------------------------- + +.. autosummary:: + :toctree: generated/ + + NumericalInverseHermite + NumericalInversePolynomial + TransformedDensityRejection + SimpleRatioUniforms + RatioUniforms + +For discrete distributions +-------------------------- + +.. autosummary:: + :toctree: generated/ + + DiscreteAliasUrn + DiscreteGuideTable + +Warnings / Errors used in :mod:`scipy.stats.sampling` +----------------------------------------------------- + +.. autosummary:: + :toctree: generated/ + + UNURANError + + +Generators for pre-defined distributions +======================================== + +To easily apply the above methods for some of the continuous distributions +in :mod:`scipy.stats`, the following functionality can be used: + +.. autosummary:: + :toctree: generated/ + + FastGeneratorInversion + +""" +from ._sampling import FastGeneratorInversion, RatioUniforms # noqa: F401 +from ._unuran.unuran_wrapper import ( # noqa: F401 + TransformedDensityRejection, + DiscreteAliasUrn, + DiscreteGuideTable, + NumericalInversePolynomial, + NumericalInverseHermite, + SimpleRatioUniforms, + UNURANError +) diff --git a/venv/lib/python3.10/site-packages/scipy/stats/stats.py b/venv/lib/python3.10/site-packages/scipy/stats/stats.py new file mode 100644 index 0000000000000000000000000000000000000000..142686e08ab56e8cd2a734ef3e327b0f50a10f54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/stats.py @@ -0,0 +1,52 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.stats` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar', + 'tmin', 'tmax', 'tstd', 'tsem', 'moment', + 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', + 'normaltest', 'jarque_bera', + 'scoreatpercentile', 'percentileofscore', + 'cumfreq', 'relfreq', 'obrientransform', + 'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd', + 'median_abs_deviation', + 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', + 'f_oneway', + 'pearsonr', 'fisher_exact', + 'spearmanr', 'pointbiserialr', + 'kendalltau', 'weightedtau', 'multiscale_graphcorr', + 'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp', + 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', + 'kstest', 'ks_1samp', 'ks_2samp', + 'chisquare', 'power_divergence', + 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', + 'rankdata', + 'combine_pvalues', 'wasserstein_distance', 'energy_distance', + 'brunnermunzel', 'alexandergovern', 'gcd', 'namedtuple', 'array', + 'ma', 'cdist', 'check_random_state', 'MapWrapper', + 'rng_integers', 'float_factorial', 'linalg', 'distributions', + 'mstats_basic', 'ModeResult', 'DescribeResult', + 'SkewtestResult', 'KurtosistestResult', 'NormaltestResult', + 'HistogramResult', 'CumfreqResult', + 'RelfreqResult', 'SigmaclipResult', 'F_onewayResult', + 'AlexanderGovernResult', + 'PointbiserialrResult', + 'MGCResult', 'Ttest_1sampResult', 'Ttest_indResult', + 'Ttest_relResult', 'Power_divergenceResult', 'KstestResult', + 'Ks_2sampResult', 'RanksumsResult', 'KruskalResult', + 'FriedmanchisquareResult', 'BrunnerMunzelResult', 'RepeatedResults' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="stats", module="stats", + private_modules=["_stats_py"], all=__all__, + attribute=name)