applied-ai-018 commited on
Commit
30348aa
·
verified ·
1 Parent(s): fdfc13e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 +3 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so +0 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/stats/_censored_data.py +459 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py +519 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/stats/_rvs_sampling.py +56 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/stats/contingency.py +468 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/stats/kde.py +23 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/stats/qmc.py +235 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/common_tests.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_fit_censored.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_distns.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multicomp.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_relative_risk.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sensitivity_analysis.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_survival.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -160,3 +160,4 @@ llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs d
160
  env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
161
  env-llmeval/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
162
  env-llmeval/lib/python3.10/site-packages/scipy/misc/face.dat filter=lfs diff=lfs merge=lfs -text
 
 
160
  env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
161
  env-llmeval/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
162
  env-llmeval/lib/python3.10/site-packages/scipy/misc/face.dat filter=lfs diff=lfs merge=lfs -text
163
+ env-llmeval/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea129565baf96309bc48b440e9ff15afcd46c1a7f8ff1f1de5596a3f964d575c
3
+ size 219454696
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/beta_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (205 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/binom_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (176 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/hypergeom_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (121 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/invgauss_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (171 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/ncf_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (174 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/nct_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (224 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_boost/skewnorm_ufunc.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (109 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_censored_data.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def _validate_1d(a, name, allow_inf=False):
5
+ if np.ndim(a) != 1:
6
+ raise ValueError(f'`{name}` must be a one-dimensional sequence.')
7
+ if np.isnan(a).any():
8
+ raise ValueError(f'`{name}` must not contain nan.')
9
+ if not allow_inf and np.isinf(a).any():
10
+ raise ValueError(f'`{name}` must contain only finite values.')
11
+
12
+
13
+ def _validate_interval(interval):
14
+ interval = np.asarray(interval)
15
+ if interval.shape == (0,):
16
+ # The input was a sequence with length 0.
17
+ interval = interval.reshape((0, 2))
18
+ if interval.ndim != 2 or interval.shape[-1] != 2:
19
+ raise ValueError('`interval` must be a two-dimensional array with '
20
+ 'shape (m, 2), where m is the number of '
21
+ 'interval-censored values, but got shape '
22
+ f'{interval.shape}')
23
+
24
+ if np.isnan(interval).any():
25
+ raise ValueError('`interval` must not contain nan.')
26
+ if np.isinf(interval).all(axis=1).any():
27
+ raise ValueError('In each row in `interval`, both values must not'
28
+ ' be infinite.')
29
+ if (interval[:, 0] > interval[:, 1]).any():
30
+ raise ValueError('In each row of `interval`, the left value must not'
31
+ ' exceed the right value.')
32
+
33
+ uncensored_mask = interval[:, 0] == interval[:, 1]
34
+ left_mask = np.isinf(interval[:, 0])
35
+ right_mask = np.isinf(interval[:, 1])
36
+ interval_mask = np.isfinite(interval).all(axis=1) & ~uncensored_mask
37
+
38
+ uncensored2 = interval[uncensored_mask, 0]
39
+ left2 = interval[left_mask, 1]
40
+ right2 = interval[right_mask, 0]
41
+ interval2 = interval[interval_mask]
42
+
43
+ return uncensored2, left2, right2, interval2
44
+
45
+
46
+ def _validate_x_censored(x, censored):
47
+ x = np.asarray(x)
48
+ if x.ndim != 1:
49
+ raise ValueError('`x` must be one-dimensional.')
50
+ censored = np.asarray(censored)
51
+ if censored.ndim != 1:
52
+ raise ValueError('`censored` must be one-dimensional.')
53
+ if (~np.isfinite(x)).any():
54
+ raise ValueError('`x` must not contain nan or inf.')
55
+ if censored.size != x.size:
56
+ raise ValueError('`x` and `censored` must have the same length.')
57
+ return x, censored.astype(bool)
58
+
59
+
60
+ class CensoredData:
61
+ """
62
+ Instances of this class represent censored data.
63
+
64
+ Instances may be passed to the ``fit`` method of continuous
65
+ univariate SciPy distributions for maximum likelihood estimation.
66
+ The *only* method of the univariate continuous distributions that
67
+ understands `CensoredData` is the ``fit`` method. An instance of
68
+ `CensoredData` can not be passed to methods such as ``pdf`` and
69
+ ``cdf``.
70
+
71
+ An observation is said to be *censored* when the precise value is unknown,
72
+ but it has a known upper and/or lower bound. The conventional terminology
73
+ is:
74
+
75
+ * left-censored: an observation is below a certain value but it is
76
+ unknown by how much.
77
+ * right-censored: an observation is above a certain value but it is
78
+ unknown by how much.
79
+ * interval-censored: an observation lies somewhere on an interval between
80
+ two values.
81
+
82
+ Left-, right-, and interval-censored data can be represented by
83
+ `CensoredData`.
84
+
85
+ For convenience, the class methods ``left_censored`` and
86
+ ``right_censored`` are provided to create a `CensoredData`
87
+ instance from a single one-dimensional array of measurements
88
+ and a corresponding boolean array to indicate which measurements
89
+ are censored. The class method ``interval_censored`` accepts two
90
+ one-dimensional arrays that hold the lower and upper bounds of the
91
+ intervals.
92
+
93
+ Parameters
94
+ ----------
95
+ uncensored : array_like, 1D
96
+ Uncensored observations.
97
+ left : array_like, 1D
98
+ Left-censored observations.
99
+ right : array_like, 1D
100
+ Right-censored observations.
101
+ interval : array_like, 2D, with shape (m, 2)
102
+ Interval-censored observations. Each row ``interval[k, :]``
103
+ represents the interval for the kth interval-censored observation.
104
+
105
+ Notes
106
+ -----
107
+ In the input array `interval`, the lower bound of the interval may
108
+ be ``-inf``, and the upper bound may be ``inf``, but at least one must be
109
+ finite. When the lower bound is ``-inf``, the row represents a left-
110
+ censored observation, and when the upper bound is ``inf``, the row
111
+ represents a right-censored observation. If the length of an interval
112
+ is 0 (i.e. ``interval[k, 0] == interval[k, 1]``, the observation is
113
+ treated as uncensored. So one can represent all the types of censored
114
+ and uncensored data in ``interval``, but it is generally more convenient
115
+ to use `uncensored`, `left` and `right` for uncensored, left-censored and
116
+ right-censored observations, respectively.
117
+
118
+ Examples
119
+ --------
120
+ In the most general case, a censored data set may contain values that
121
+ are left-censored, right-censored, interval-censored, and uncensored.
122
+ For example, here we create a data set with five observations. Two
123
+ are uncensored (values 1 and 1.5), one is a left-censored observation
124
+ of 0, one is a right-censored observation of 10 and one is
125
+ interval-censored in the interval [2, 3].
126
+
127
+ >>> import numpy as np
128
+ >>> from scipy.stats import CensoredData
129
+ >>> data = CensoredData(uncensored=[1, 1.5], left=[0], right=[10],
130
+ ... interval=[[2, 3]])
131
+ >>> print(data)
132
+ CensoredData(5 values: 2 not censored, 1 left-censored,
133
+ 1 right-censored, 1 interval-censored)
134
+
135
+ Equivalently,
136
+
137
+ >>> data = CensoredData(interval=[[1, 1],
138
+ ... [1.5, 1.5],
139
+ ... [-np.inf, 0],
140
+ ... [10, np.inf],
141
+ ... [2, 3]])
142
+ >>> print(data)
143
+ CensoredData(5 values: 2 not censored, 1 left-censored,
144
+ 1 right-censored, 1 interval-censored)
145
+
146
+ A common case is to have a mix of uncensored observations and censored
147
+ observations that are all right-censored (or all left-censored). For
148
+ example, consider an experiment in which six devices are started at
149
+ various times and left running until they fail. Assume that time is
150
+ measured in hours, and the experiment is stopped after 30 hours, even
151
+ if all the devices have not failed by that time. We might end up with
152
+ data such as this::
153
+
154
+ Device Start-time Fail-time Time-to-failure
155
+ 1 0 13 13
156
+ 2 2 24 22
157
+ 3 5 22 17
158
+ 4 8 23 15
159
+ 5 10 *** >20
160
+ 6 12 *** >18
161
+
162
+ Two of the devices had not failed when the experiment was stopped;
163
+ the observations of the time-to-failure for these two devices are
164
+ right-censored. We can represent this data with
165
+
166
+ >>> data = CensoredData(uncensored=[13, 22, 17, 15], right=[20, 18])
167
+ >>> print(data)
168
+ CensoredData(6 values: 4 not censored, 2 right-censored)
169
+
170
+ Alternatively, we can use the method `CensoredData.right_censored` to
171
+ create a representation of this data. The time-to-failure observations
172
+ are put the list ``ttf``. The ``censored`` list indicates which values
173
+ in ``ttf`` are censored.
174
+
175
+ >>> ttf = [13, 22, 17, 15, 20, 18]
176
+ >>> censored = [False, False, False, False, True, True]
177
+
178
+ Pass these lists to `CensoredData.right_censored` to create an
179
+ instance of `CensoredData`.
180
+
181
+ >>> data = CensoredData.right_censored(ttf, censored)
182
+ >>> print(data)
183
+ CensoredData(6 values: 4 not censored, 2 right-censored)
184
+
185
+ If the input data is interval censored and already stored in two
186
+ arrays, one holding the low end of the intervals and another
187
+ holding the high ends, the class method ``interval_censored`` can
188
+ be used to create the `CensoredData` instance.
189
+
190
+ This example creates an instance with four interval-censored values.
191
+ The intervals are [10, 11], [0.5, 1], [2, 3], and [12.5, 13.5].
192
+
193
+ >>> a = [10, 0.5, 2, 12.5] # Low ends of the intervals
194
+ >>> b = [11, 1.0, 3, 13.5] # High ends of the intervals
195
+ >>> data = CensoredData.interval_censored(low=a, high=b)
196
+ >>> print(data)
197
+ CensoredData(4 values: 0 not censored, 4 interval-censored)
198
+
199
+ Finally, we create and censor some data from the `weibull_min`
200
+ distribution, and then fit `weibull_min` to that data. We'll assume
201
+ that the location parameter is known to be 0.
202
+
203
+ >>> from scipy.stats import weibull_min
204
+ >>> rng = np.random.default_rng()
205
+
206
+ Create the random data set.
207
+
208
+ >>> x = weibull_min.rvs(2.5, loc=0, scale=30, size=250, random_state=rng)
209
+ >>> x[x > 40] = 40 # Right-censor values greater or equal to 40.
210
+
211
+ Create the `CensoredData` instance with the `right_censored` method.
212
+ The censored values are those where the value is 40.
213
+
214
+ >>> data = CensoredData.right_censored(x, x == 40)
215
+ >>> print(data)
216
+ CensoredData(250 values: 215 not censored, 35 right-censored)
217
+
218
+ 35 values have been right-censored.
219
+
220
+ Fit `weibull_min` to the censored data. We expect to shape and scale
221
+ to be approximately 2.5 and 30, respectively.
222
+
223
+ >>> weibull_min.fit(data, floc=0)
224
+ (2.3575922823897315, 0, 30.40650074451254)
225
+
226
+ """
227
+
228
+ def __init__(self, uncensored=None, *, left=None, right=None,
229
+ interval=None):
230
+ if uncensored is None:
231
+ uncensored = []
232
+ if left is None:
233
+ left = []
234
+ if right is None:
235
+ right = []
236
+ if interval is None:
237
+ interval = np.empty((0, 2))
238
+
239
+ _validate_1d(uncensored, 'uncensored')
240
+ _validate_1d(left, 'left')
241
+ _validate_1d(right, 'right')
242
+ uncensored2, left2, right2, interval2 = _validate_interval(interval)
243
+
244
+ self._uncensored = np.concatenate((uncensored, uncensored2))
245
+ self._left = np.concatenate((left, left2))
246
+ self._right = np.concatenate((right, right2))
247
+ # Note that by construction, the private attribute _interval
248
+ # will be a 2D array that contains only finite values representing
249
+ # intervals with nonzero but finite length.
250
+ self._interval = interval2
251
+
252
+ def __repr__(self):
253
+ uncensored_str = " ".join(np.array_repr(self._uncensored).split())
254
+ left_str = " ".join(np.array_repr(self._left).split())
255
+ right_str = " ".join(np.array_repr(self._right).split())
256
+ interval_str = " ".join(np.array_repr(self._interval).split())
257
+ return (f"CensoredData(uncensored={uncensored_str}, left={left_str}, "
258
+ f"right={right_str}, interval={interval_str})")
259
+
260
+ def __str__(self):
261
+ num_nc = len(self._uncensored)
262
+ num_lc = len(self._left)
263
+ num_rc = len(self._right)
264
+ num_ic = len(self._interval)
265
+ n = num_nc + num_lc + num_rc + num_ic
266
+ parts = [f'{num_nc} not censored']
267
+ if num_lc > 0:
268
+ parts.append(f'{num_lc} left-censored')
269
+ if num_rc > 0:
270
+ parts.append(f'{num_rc} right-censored')
271
+ if num_ic > 0:
272
+ parts.append(f'{num_ic} interval-censored')
273
+ return f'CensoredData({n} values: ' + ', '.join(parts) + ')'
274
+
275
+ # This is not a complete implementation of the arithmetic operators.
276
+ # All we need is subtracting a scalar and dividing by a scalar.
277
+
278
+ def __sub__(self, other):
279
+ return CensoredData(uncensored=self._uncensored - other,
280
+ left=self._left - other,
281
+ right=self._right - other,
282
+ interval=self._interval - other)
283
+
284
+ def __truediv__(self, other):
285
+ return CensoredData(uncensored=self._uncensored / other,
286
+ left=self._left / other,
287
+ right=self._right / other,
288
+ interval=self._interval / other)
289
+
290
+ def __len__(self):
291
+ """
292
+ The number of values (censored and not censored).
293
+ """
294
+ return (len(self._uncensored) + len(self._left) + len(self._right)
295
+ + len(self._interval))
296
+
297
+ def num_censored(self):
298
+ """
299
+ Number of censored values.
300
+ """
301
+ return len(self._left) + len(self._right) + len(self._interval)
302
+
303
+ @classmethod
304
+ def right_censored(cls, x, censored):
305
+ """
306
+ Create a `CensoredData` instance of right-censored data.
307
+
308
+ Parameters
309
+ ----------
310
+ x : array_like
311
+ `x` is the array of observed data or measurements.
312
+ `x` must be a one-dimensional sequence of finite numbers.
313
+ censored : array_like of bool
314
+ `censored` must be a one-dimensional sequence of boolean
315
+ values. If ``censored[k]`` is True, the corresponding value
316
+ in `x` is right-censored. That is, the value ``x[k]``
317
+ is the lower bound of the true (but unknown) value.
318
+
319
+ Returns
320
+ -------
321
+ data : `CensoredData`
322
+ An instance of `CensoredData` that represents the
323
+ collection of uncensored and right-censored values.
324
+
325
+ Examples
326
+ --------
327
+ >>> from scipy.stats import CensoredData
328
+
329
+ Two uncensored values (4 and 10) and two right-censored values
330
+ (24 and 25).
331
+
332
+ >>> data = CensoredData.right_censored([4, 10, 24, 25],
333
+ ... [False, False, True, True])
334
+ >>> data
335
+ CensoredData(uncensored=array([ 4., 10.]),
336
+ left=array([], dtype=float64), right=array([24., 25.]),
337
+ interval=array([], shape=(0, 2), dtype=float64))
338
+ >>> print(data)
339
+ CensoredData(4 values: 2 not censored, 2 right-censored)
340
+ """
341
+ x, censored = _validate_x_censored(x, censored)
342
+ return cls(uncensored=x[~censored], right=x[censored])
343
+
344
+ @classmethod
345
+ def left_censored(cls, x, censored):
346
+ """
347
+ Create a `CensoredData` instance of left-censored data.
348
+
349
+ Parameters
350
+ ----------
351
+ x : array_like
352
+ `x` is the array of observed data or measurements.
353
+ `x` must be a one-dimensional sequence of finite numbers.
354
+ censored : array_like of bool
355
+ `censored` must be a one-dimensional sequence of boolean
356
+ values. If ``censored[k]`` is True, the corresponding value
357
+ in `x` is left-censored. That is, the value ``x[k]``
358
+ is the upper bound of the true (but unknown) value.
359
+
360
+ Returns
361
+ -------
362
+ data : `CensoredData`
363
+ An instance of `CensoredData` that represents the
364
+ collection of uncensored and left-censored values.
365
+
366
+ Examples
367
+ --------
368
+ >>> from scipy.stats import CensoredData
369
+
370
+ Two uncensored values (0.12 and 0.033) and two left-censored values
371
+ (both 1e-3).
372
+
373
+ >>> data = CensoredData.left_censored([0.12, 0.033, 1e-3, 1e-3],
374
+ ... [False, False, True, True])
375
+ >>> data
376
+ CensoredData(uncensored=array([0.12 , 0.033]),
377
+ left=array([0.001, 0.001]), right=array([], dtype=float64),
378
+ interval=array([], shape=(0, 2), dtype=float64))
379
+ >>> print(data)
380
+ CensoredData(4 values: 2 not censored, 2 left-censored)
381
+ """
382
+ x, censored = _validate_x_censored(x, censored)
383
+ return cls(uncensored=x[~censored], left=x[censored])
384
+
385
+ @classmethod
386
+ def interval_censored(cls, low, high):
387
+ """
388
+ Create a `CensoredData` instance of interval-censored data.
389
+
390
+ This method is useful when all the data is interval-censored, and
391
+ the low and high ends of the intervals are already stored in
392
+ separate one-dimensional arrays.
393
+
394
+ Parameters
395
+ ----------
396
+ low : array_like
397
+ The one-dimensional array containing the low ends of the
398
+ intervals.
399
+ high : array_like
400
+ The one-dimensional array containing the high ends of the
401
+ intervals.
402
+
403
+ Returns
404
+ -------
405
+ data : `CensoredData`
406
+ An instance of `CensoredData` that represents the
407
+ collection of censored values.
408
+
409
+ Examples
410
+ --------
411
+ >>> import numpy as np
412
+ >>> from scipy.stats import CensoredData
413
+
414
+ ``a`` and ``b`` are the low and high ends of a collection of
415
+ interval-censored values.
416
+
417
+ >>> a = [0.5, 2.0, 3.0, 5.5]
418
+ >>> b = [1.0, 2.5, 3.5, 7.0]
419
+ >>> data = CensoredData.interval_censored(low=a, high=b)
420
+ >>> print(data)
421
+ CensoredData(4 values: 0 not censored, 4 interval-censored)
422
+ """
423
+ _validate_1d(low, 'low', allow_inf=True)
424
+ _validate_1d(high, 'high', allow_inf=True)
425
+ if len(low) != len(high):
426
+ raise ValueError('`low` and `high` must have the same length.')
427
+ interval = np.column_stack((low, high))
428
+ uncensored, left, right, interval = _validate_interval(interval)
429
+ return cls(uncensored=uncensored, left=left, right=right,
430
+ interval=interval)
431
+
432
+ def _uncensor(self):
433
+ """
434
+ This function is used when a non-censored version of the data
435
+ is needed to create a rough estimate of the parameters of a
436
+ distribution via the method of moments or some similar method.
437
+ The data is "uncensored" by taking the given endpoints as the
438
+ data for the left- or right-censored data, and the mean for the
439
+ interval-censored data.
440
+ """
441
+ data = np.concatenate((self._uncensored, self._left, self._right,
442
+ self._interval.mean(axis=1)))
443
+ return data
444
+
445
+ def _supported(self, a, b):
446
+ """
447
+ Return a subset of self containing the values that are in
448
+ (or overlap with) the interval (a, b).
449
+ """
450
+ uncensored = self._uncensored
451
+ uncensored = uncensored[(a < uncensored) & (uncensored < b)]
452
+ left = self._left
453
+ left = left[a < left]
454
+ right = self._right
455
+ right = right[right < b]
456
+ interval = self._interval
457
+ interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)]
458
+ return CensoredData(uncensored, left=left, right=right,
459
+ interval=interval)
env-llmeval/lib/python3.10/site-packages/scipy/stats/_mannwhitneyu.py ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from collections import namedtuple
3
+ from scipy import special
4
+ from scipy import stats
5
+ from scipy.stats._stats_py import _rankdata
6
+ from ._axis_nan_policy import _axis_nan_policy_factory
7
+
8
+
9
+ def _broadcast_concatenate(x, y, axis):
10
+ '''Broadcast then concatenate arrays, leaving concatenation axis last'''
11
+ x = np.moveaxis(x, axis, -1)
12
+ y = np.moveaxis(y, axis, -1)
13
+ z = np.broadcast(x[..., 0], y[..., 0])
14
+ x = np.broadcast_to(x, z.shape + (x.shape[-1],))
15
+ y = np.broadcast_to(y, z.shape + (y.shape[-1],))
16
+ z = np.concatenate((x, y), axis=-1)
17
+ return x, y, z
18
+
19
+
20
+ class _MWU:
21
+ '''Distribution of MWU statistic under the null hypothesis'''
22
+ # Possible improvement: if m and n are small enough, use integer arithmetic
23
+
24
+ def __init__(self):
25
+ '''Minimal initializer'''
26
+ self._fmnks = -np.ones((1, 1, 1))
27
+ self._recursive = None
28
+
29
+ def pmf(self, k, m, n):
30
+
31
+ # In practice, `pmf` is never called with k > m*n/2.
32
+ # If it were, we'd exploit symmetry here:
33
+ # k = np.array(k, copy=True)
34
+ # k2 = m*n - k
35
+ # i = k2 < k
36
+ # k[i] = k2[i]
37
+
38
+ if (self._recursive is None and m <= 500 and n <= 500
39
+ or self._recursive):
40
+ return self.pmf_recursive(k, m, n)
41
+ else:
42
+ return self.pmf_iterative(k, m, n)
43
+
44
+ def pmf_recursive(self, k, m, n):
45
+ '''Probability mass function, recursive version'''
46
+ self._resize_fmnks(m, n, np.max(k))
47
+ # could loop over just the unique elements, but probably not worth
48
+ # the time to find them
49
+ for i in np.ravel(k):
50
+ self._f(m, n, i)
51
+ return self._fmnks[m, n, k] / special.binom(m + n, m)
52
+
53
+ def pmf_iterative(self, k, m, n):
54
+ '''Probability mass function, iterative version'''
55
+ fmnks = {}
56
+ for i in np.ravel(k):
57
+ fmnks = _mwu_f_iterative(m, n, i, fmnks)
58
+ return (np.array([fmnks[(m, n, ki)] for ki in k])
59
+ / special.binom(m + n, m))
60
+
61
+ def cdf(self, k, m, n):
62
+ '''Cumulative distribution function'''
63
+
64
+ # In practice, `cdf` is never called with k > m*n/2.
65
+ # If it were, we'd exploit symmetry here rather than in `sf`
66
+ pmfs = self.pmf(np.arange(0, np.max(k) + 1), m, n)
67
+ cdfs = np.cumsum(pmfs)
68
+ return cdfs[k]
69
+
70
+ def sf(self, k, m, n):
71
+ '''Survival function'''
72
+ # Note that both CDF and SF include the PMF at k. The p-value is
73
+ # calculated from the SF and should include the mass at k, so this
74
+ # is desirable
75
+
76
+ # Use the fact that the distribution is symmetric; i.e.
77
+ # _f(m, n, m*n-k) = _f(m, n, k), and sum from the left
78
+ kc = np.asarray(m*n - k) # complement of k
79
+ i = k < kc
80
+ if np.any(i):
81
+ kc[i] = k[i]
82
+ cdfs = np.asarray(self.cdf(kc, m, n))
83
+ cdfs[i] = 1. - cdfs[i] + self.pmf(kc[i], m, n)
84
+ else:
85
+ cdfs = np.asarray(self.cdf(kc, m, n))
86
+ return cdfs[()]
87
+
88
+ def _resize_fmnks(self, m, n, k):
89
+ '''If necessary, expand the array that remembers PMF values'''
90
+ # could probably use `np.pad` but I'm not sure it would save code
91
+ shape_old = np.array(self._fmnks.shape)
92
+ shape_new = np.array((m+1, n+1, k+1))
93
+ if np.any(shape_new > shape_old):
94
+ shape = np.maximum(shape_old, shape_new)
95
+ fmnks = -np.ones(shape) # create the new array
96
+ m0, n0, k0 = shape_old
97
+ fmnks[:m0, :n0, :k0] = self._fmnks # copy remembered values
98
+ self._fmnks = fmnks
99
+
100
+ def _f(self, m, n, k):
101
+ '''Recursive implementation of function of [3] Theorem 2.5'''
102
+
103
+ # [3] Theorem 2.5 Line 1
104
+ if k < 0 or m < 0 or n < 0 or k > m*n:
105
+ return 0
106
+
107
+ # if already calculated, return the value
108
+ if self._fmnks[m, n, k] >= 0:
109
+ return self._fmnks[m, n, k]
110
+
111
+ if k == 0 and m >= 0 and n >= 0: # [3] Theorem 2.5 Line 2
112
+ fmnk = 1
113
+ else: # [3] Theorem 2.5 Line 3 / Equation 3
114
+ fmnk = self._f(m-1, n, k-n) + self._f(m, n-1, k)
115
+
116
+ self._fmnks[m, n, k] = fmnk # remember result
117
+
118
+ return fmnk
119
+
120
+
121
+ # Maintain state for faster repeat calls to mannwhitneyu w/ method='exact'
122
+ _mwu_state = _MWU()
123
+
124
+
125
+ def _mwu_f_iterative(m, n, k, fmnks):
126
+ '''Iterative implementation of function of [3] Theorem 2.5'''
127
+
128
+ def _base_case(m, n, k):
129
+ '''Base cases from recursive version'''
130
+
131
+ # if already calculated, return the value
132
+ if fmnks.get((m, n, k), -1) >= 0:
133
+ return fmnks[(m, n, k)]
134
+
135
+ # [3] Theorem 2.5 Line 1
136
+ elif k < 0 or m < 0 or n < 0 or k > m*n:
137
+ return 0
138
+
139
+ # [3] Theorem 2.5 Line 2
140
+ elif k == 0 and m >= 0 and n >= 0:
141
+ return 1
142
+
143
+ return None
144
+
145
+ stack = [(m, n, k)]
146
+ fmnk = None
147
+
148
+ while stack:
149
+ # Popping only if necessary would save a tiny bit of time, but NWI.
150
+ m, n, k = stack.pop()
151
+
152
+ # If we're at a base case, continue (stack unwinds)
153
+ fmnk = _base_case(m, n, k)
154
+ if fmnk is not None:
155
+ fmnks[(m, n, k)] = fmnk
156
+ continue
157
+
158
+ # If both terms are base cases, continue (stack unwinds)
159
+ f1 = _base_case(m-1, n, k-n)
160
+ f2 = _base_case(m, n-1, k)
161
+ if f1 is not None and f2 is not None:
162
+ # [3] Theorem 2.5 Line 3 / Equation 3
163
+ fmnk = f1 + f2
164
+ fmnks[(m, n, k)] = fmnk
165
+ continue
166
+
167
+ # recurse deeper
168
+ stack.append((m, n, k))
169
+ if f1 is None:
170
+ stack.append((m-1, n, k-n))
171
+ if f2 is None:
172
+ stack.append((m, n-1, k))
173
+
174
+ return fmnks
175
+
176
+
177
+ def _get_mwu_z(U, n1, n2, t, axis=0, continuity=True):
178
+ '''Standardized MWU statistic'''
179
+ # Follows mannwhitneyu [2]
180
+ mu = n1 * n2 / 2
181
+ n = n1 + n2
182
+
183
+ # Tie correction according to [2], "Normal approximation and tie correction"
184
+ # "A more computationally-efficient form..."
185
+ tie_term = (t**3 - t).sum(axis=-1)
186
+ s = np.sqrt(n1*n2/12 * ((n + 1) - tie_term/(n*(n-1))))
187
+
188
+ numerator = U - mu
189
+
190
+ # Continuity correction.
191
+ # Because SF is always used to calculate the p-value, we can always
192
+ # _subtract_ 0.5 for the continuity correction. This always increases the
193
+ # p-value to account for the rest of the probability mass _at_ q = U.
194
+ if continuity:
195
+ numerator -= 0.5
196
+
197
+ # no problem evaluating the norm SF at an infinity
198
+ with np.errstate(divide='ignore', invalid='ignore'):
199
+ z = numerator / s
200
+ return z
201
+
202
+
203
+ def _mwu_input_validation(x, y, use_continuity, alternative, axis, method):
204
+ ''' Input validation and standardization for mannwhitneyu '''
205
+ # Would use np.asarray_chkfinite, but infs are OK
206
+ x, y = np.atleast_1d(x), np.atleast_1d(y)
207
+ if np.isnan(x).any() or np.isnan(y).any():
208
+ raise ValueError('`x` and `y` must not contain NaNs.')
209
+ if np.size(x) == 0 or np.size(y) == 0:
210
+ raise ValueError('`x` and `y` must be of nonzero size.')
211
+
212
+ bools = {True, False}
213
+ if use_continuity not in bools:
214
+ raise ValueError(f'`use_continuity` must be one of {bools}.')
215
+
216
+ alternatives = {"two-sided", "less", "greater"}
217
+ alternative = alternative.lower()
218
+ if alternative not in alternatives:
219
+ raise ValueError(f'`alternative` must be one of {alternatives}.')
220
+
221
+ axis_int = int(axis)
222
+ if axis != axis_int:
223
+ raise ValueError('`axis` must be an integer.')
224
+
225
+ if not isinstance(method, stats.PermutationMethod):
226
+ methods = {"asymptotic", "exact", "auto"}
227
+ method = method.lower()
228
+ if method not in methods:
229
+ raise ValueError(f'`method` must be one of {methods}.')
230
+
231
+ return x, y, use_continuity, alternative, axis_int, method
232
+
233
+
234
+ def _mwu_choose_method(n1, n2, ties):
235
+ """Choose method 'asymptotic' or 'exact' depending on input size, ties"""
236
+
237
+ # if both inputs are large, asymptotic is OK
238
+ if n1 > 8 and n2 > 8:
239
+ return "asymptotic"
240
+
241
+ # if there are any ties, asymptotic is preferred
242
+ if ties:
243
+ return "asymptotic"
244
+
245
+ return "exact"
246
+
247
+
248
+ MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
249
+
250
+
251
+ @_axis_nan_policy_factory(MannwhitneyuResult, n_samples=2)
252
+ def mannwhitneyu(x, y, use_continuity=True, alternative="two-sided",
253
+ axis=0, method="auto"):
254
+ r'''Perform the Mann-Whitney U rank test on two independent samples.
255
+
256
+ The Mann-Whitney U test is a nonparametric test of the null hypothesis
257
+ that the distribution underlying sample `x` is the same as the
258
+ distribution underlying sample `y`. It is often used as a test of
259
+ difference in location between distributions.
260
+
261
+ Parameters
262
+ ----------
263
+ x, y : array-like
264
+ N-d arrays of samples. The arrays must be broadcastable except along
265
+ the dimension given by `axis`.
266
+ use_continuity : bool, optional
267
+ Whether a continuity correction (1/2) should be applied.
268
+ Default is True when `method` is ``'asymptotic'``; has no effect
269
+ otherwise.
270
+ alternative : {'two-sided', 'less', 'greater'}, optional
271
+ Defines the alternative hypothesis. Default is 'two-sided'.
272
+ Let *F(u)* and *G(u)* be the cumulative distribution functions of the
273
+ distributions underlying `x` and `y`, respectively. Then the following
274
+ alternative hypotheses are available:
275
+
276
+ * 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
277
+ at least one *u*.
278
+ * 'less': the distribution underlying `x` is stochastically less
279
+ than the distribution underlying `y`, i.e. *F(u) > G(u)* for all *u*.
280
+ * 'greater': the distribution underlying `x` is stochastically greater
281
+ than the distribution underlying `y`, i.e. *F(u) < G(u)* for all *u*.
282
+
283
+ Note that the mathematical expressions in the alternative hypotheses
284
+ above describe the CDFs of the underlying distributions. The directions
285
+ of the inequalities appear inconsistent with the natural language
286
+ description at first glance, but they are not. For example, suppose
287
+ *X* and *Y* are random variables that follow distributions with CDFs
288
+ *F* and *G*, respectively. If *F(u) > G(u)* for all *u*, samples drawn
289
+ from *X* tend to be less than those drawn from *Y*.
290
+
291
+ Under a more restrictive set of assumptions, the alternative hypotheses
292
+ can be expressed in terms of the locations of the distributions;
293
+ see [5] section 5.1.
294
+ axis : int, optional
295
+ Axis along which to perform the test. Default is 0.
296
+ method : {'auto', 'asymptotic', 'exact'} or `PermutationMethod` instance, optional
297
+ Selects the method used to calculate the *p*-value.
298
+ Default is 'auto'. The following options are available.
299
+
300
+ * ``'asymptotic'``: compares the standardized test statistic
301
+ against the normal distribution, correcting for ties.
302
+ * ``'exact'``: computes the exact *p*-value by comparing the observed
303
+ :math:`U` statistic against the exact distribution of the :math:`U`
304
+ statistic under the null hypothesis. No correction is made for ties.
305
+ * ``'auto'``: chooses ``'exact'`` when the size of one of the samples
306
+ is less than or equal to 8 and there are no ties;
307
+ chooses ``'asymptotic'`` otherwise.
308
+ * `PermutationMethod` instance. In this case, the p-value
309
+ is computed using `permutation_test` with the provided
310
+ configuration options and other appropriate settings.
311
+
312
+ Returns
313
+ -------
314
+ res : MannwhitneyuResult
315
+ An object containing attributes:
316
+
317
+ statistic : float
318
+ The Mann-Whitney U statistic corresponding with sample `x`. See
319
+ Notes for the test statistic corresponding with sample `y`.
320
+ pvalue : float
321
+ The associated *p*-value for the chosen `alternative`.
322
+
323
+ Notes
324
+ -----
325
+ If ``U1`` is the statistic corresponding with sample `x`, then the
326
+ statistic corresponding with sample `y` is
327
+ ``U2 = x.shape[axis] * y.shape[axis] - U1``.
328
+
329
+ `mannwhitneyu` is for independent samples. For related / paired samples,
330
+ consider `scipy.stats.wilcoxon`.
331
+
332
+ `method` ``'exact'`` is recommended when there are no ties and when either
333
+ sample size is less than 8 [1]_. The implementation follows the recurrence
334
+ relation originally proposed in [1]_ as it is described in [3]_.
335
+ Note that the exact method is *not* corrected for ties, but
336
+ `mannwhitneyu` will not raise errors or warnings if there are ties in the
337
+ data. If there are ties and either samples is small (fewer than ~10
338
+ observations), consider passing an instance of `PermutationMethod`
339
+ as the `method` to perform a permutation test.
340
+
341
+ The Mann-Whitney U test is a non-parametric version of the t-test for
342
+ independent samples. When the means of samples from the populations
343
+ are normally distributed, consider `scipy.stats.ttest_ind`.
344
+
345
+ See Also
346
+ --------
347
+ scipy.stats.wilcoxon, scipy.stats.ranksums, scipy.stats.ttest_ind
348
+
349
+ References
350
+ ----------
351
+ .. [1] H.B. Mann and D.R. Whitney, "On a test of whether one of two random
352
+ variables is stochastically larger than the other", The Annals of
353
+ Mathematical Statistics, Vol. 18, pp. 50-60, 1947.
354
+ .. [2] Mann-Whitney U Test, Wikipedia,
355
+ http://en.wikipedia.org/wiki/Mann-Whitney_U_test
356
+ .. [3] A. Di Bucchianico, "Combinatorics, computer algebra, and the
357
+ Wilcoxon-Mann-Whitney test", Journal of Statistical Planning and
358
+ Inference, Vol. 79, pp. 349-364, 1999.
359
+ .. [4] Rosie Shier, "Statistics: 2.3 The Mann-Whitney U Test", Mathematics
360
+ Learning Support Centre, 2004.
361
+ .. [5] Michael P. Fay and Michael A. Proschan. "Wilcoxon-Mann-Whitney
362
+ or t-test? On assumptions for hypothesis tests and multiple \
363
+ interpretations of decision rules." Statistics surveys, Vol. 4, pp.
364
+ 1-39, 2010. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2857732/
365
+
366
+ Examples
367
+ --------
368
+ We follow the example from [4]_: nine randomly sampled young adults were
369
+ diagnosed with type II diabetes at the ages below.
370
+
371
+ >>> males = [19, 22, 16, 29, 24]
372
+ >>> females = [20, 11, 17, 12]
373
+
374
+ We use the Mann-Whitney U test to assess whether there is a statistically
375
+ significant difference in the diagnosis age of males and females.
376
+ The null hypothesis is that the distribution of male diagnosis ages is
377
+ the same as the distribution of female diagnosis ages. We decide
378
+ that a confidence level of 95% is required to reject the null hypothesis
379
+ in favor of the alternative that the distributions are different.
380
+ Since the number of samples is very small and there are no ties in the
381
+ data, we can compare the observed test statistic against the *exact*
382
+ distribution of the test statistic under the null hypothesis.
383
+
384
+ >>> from scipy.stats import mannwhitneyu
385
+ >>> U1, p = mannwhitneyu(males, females, method="exact")
386
+ >>> print(U1)
387
+ 17.0
388
+
389
+ `mannwhitneyu` always reports the statistic associated with the first
390
+ sample, which, in this case, is males. This agrees with :math:`U_M = 17`
391
+ reported in [4]_. The statistic associated with the second statistic
392
+ can be calculated:
393
+
394
+ >>> nx, ny = len(males), len(females)
395
+ >>> U2 = nx*ny - U1
396
+ >>> print(U2)
397
+ 3.0
398
+
399
+ This agrees with :math:`U_F = 3` reported in [4]_. The two-sided
400
+ *p*-value can be calculated from either statistic, and the value produced
401
+ by `mannwhitneyu` agrees with :math:`p = 0.11` reported in [4]_.
402
+
403
+ >>> print(p)
404
+ 0.1111111111111111
405
+
406
+ The exact distribution of the test statistic is asymptotically normal, so
407
+ the example continues by comparing the exact *p*-value against the
408
+ *p*-value produced using the normal approximation.
409
+
410
+ >>> _, pnorm = mannwhitneyu(males, females, method="asymptotic")
411
+ >>> print(pnorm)
412
+ 0.11134688653314041
413
+
414
+ Here `mannwhitneyu`'s reported *p*-value appears to conflict with the
415
+ value :math:`p = 0.09` given in [4]_. The reason is that [4]_
416
+ does not apply the continuity correction performed by `mannwhitneyu`;
417
+ `mannwhitneyu` reduces the distance between the test statistic and the
418
+ mean :math:`\mu = n_x n_y / 2` by 0.5 to correct for the fact that the
419
+ discrete statistic is being compared against a continuous distribution.
420
+ Here, the :math:`U` statistic used is less than the mean, so we reduce
421
+ the distance by adding 0.5 in the numerator.
422
+
423
+ >>> import numpy as np
424
+ >>> from scipy.stats import norm
425
+ >>> U = min(U1, U2)
426
+ >>> N = nx + ny
427
+ >>> z = (U - nx*ny/2 + 0.5) / np.sqrt(nx*ny * (N + 1)/ 12)
428
+ >>> p = 2 * norm.cdf(z) # use CDF to get p-value from smaller statistic
429
+ >>> print(p)
430
+ 0.11134688653314041
431
+
432
+ If desired, we can disable the continuity correction to get a result
433
+ that agrees with that reported in [4]_.
434
+
435
+ >>> _, pnorm = mannwhitneyu(males, females, use_continuity=False,
436
+ ... method="asymptotic")
437
+ >>> print(pnorm)
438
+ 0.0864107329737
439
+
440
+ Regardless of whether we perform an exact or asymptotic test, the
441
+ probability of the test statistic being as extreme or more extreme by
442
+ chance exceeds 5%, so we do not consider the results statistically
443
+ significant.
444
+
445
+ Suppose that, before seeing the data, we had hypothesized that females
446
+ would tend to be diagnosed at a younger age than males.
447
+ In that case, it would be natural to provide the female ages as the
448
+ first input, and we would have performed a one-sided test using
449
+ ``alternative = 'less'``: females are diagnosed at an age that is
450
+ stochastically less than that of males.
451
+
452
+ >>> res = mannwhitneyu(females, males, alternative="less", method="exact")
453
+ >>> print(res)
454
+ MannwhitneyuResult(statistic=3.0, pvalue=0.05555555555555555)
455
+
456
+ Again, the probability of getting a sufficiently low value of the
457
+ test statistic by chance under the null hypothesis is greater than 5%,
458
+ so we do not reject the null hypothesis in favor of our alternative.
459
+
460
+ If it is reasonable to assume that the means of samples from the
461
+ populations are normally distributed, we could have used a t-test to
462
+ perform the analysis.
463
+
464
+ >>> from scipy.stats import ttest_ind
465
+ >>> res = ttest_ind(females, males, alternative="less")
466
+ >>> print(res)
467
+ Ttest_indResult(statistic=-2.239334696520584, pvalue=0.030068441095757924)
468
+
469
+ Under this assumption, the *p*-value would be low enough to reject the
470
+ null hypothesis in favor of the alternative.
471
+
472
+ '''
473
+
474
+ x, y, use_continuity, alternative, axis_int, method = (
475
+ _mwu_input_validation(x, y, use_continuity, alternative, axis, method))
476
+
477
+ x, y, xy = _broadcast_concatenate(x, y, axis)
478
+
479
+ n1, n2 = x.shape[-1], y.shape[-1]
480
+
481
+ # Follows [2]
482
+ ranks, t = _rankdata(xy, 'average', return_ties=True) # method 2, step 1
483
+ R1 = ranks[..., :n1].sum(axis=-1) # method 2, step 2
484
+ U1 = R1 - n1*(n1+1)/2 # method 2, step 3
485
+ U2 = n1 * n2 - U1 # as U1 + U2 = n1 * n2
486
+
487
+ if alternative == "greater":
488
+ U, f = U1, 1 # U is the statistic to use for p-value, f is a factor
489
+ elif alternative == "less":
490
+ U, f = U2, 1 # Due to symmetry, use SF of U2 rather than CDF of U1
491
+ else:
492
+ U, f = np.maximum(U1, U2), 2 # multiply SF by two for two-sided test
493
+
494
+ if method == "auto":
495
+ method = _mwu_choose_method(n1, n2, np.any(t > 1))
496
+
497
+ if method == "exact":
498
+ p = _mwu_state.sf(U.astype(int), min(n1, n2), max(n1, n2))
499
+ elif method == "asymptotic":
500
+ z = _get_mwu_z(U, n1, n2, t, continuity=use_continuity)
501
+ p = stats.norm.sf(z)
502
+ else: # `PermutationMethod` instance (already validated)
503
+ def statistic(x, y, axis):
504
+ return mannwhitneyu(x, y, use_continuity=use_continuity,
505
+ alternative=alternative, axis=axis,
506
+ method="asymptotic").statistic
507
+
508
+ res = stats.permutation_test((x, y), statistic, axis=axis,
509
+ **method._asdict(), alternative=alternative)
510
+ p = res.pvalue
511
+ f = 1
512
+
513
+ p *= f
514
+
515
+ # Ensure that test statistic is not greater than 1
516
+ # This could happen for exact test when U = m*n/2
517
+ p = np.clip(p, 0, 1)
518
+
519
+ return MannwhitneyuResult(U1, p)
env-llmeval/lib/python3.10/site-packages/scipy/stats/_mvn.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (287 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/_rvs_sampling.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from scipy.stats.sampling import RatioUniforms
3
+
4
+ def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
5
+ """
6
+ Generate random samples from a probability density function using the
7
+ ratio-of-uniforms method.
8
+
9
+ .. deprecated:: 1.12.0
10
+ `rvs_ratio_uniforms` is deprecated in favour of
11
+ `scipy.stats.sampling.RatioUniforms` from version 1.12.0 and will
12
+ be removed in SciPy 1.15.0
13
+
14
+ Parameters
15
+ ----------
16
+ pdf : callable
17
+ A function with signature `pdf(x)` that is proportional to the
18
+ probability density function of the distribution.
19
+ umax : float
20
+ The upper bound of the bounding rectangle in the u-direction.
21
+ vmin : float
22
+ The lower bound of the bounding rectangle in the v-direction.
23
+ vmax : float
24
+ The upper bound of the bounding rectangle in the v-direction.
25
+ size : int or tuple of ints, optional
26
+ Defining number of random variates (default is 1).
27
+ c : float, optional.
28
+ Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
29
+ random_state : {None, int, `numpy.random.Generator`,
30
+ `numpy.random.RandomState`}, optional
31
+
32
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
33
+ singleton is used.
34
+ If `seed` is an int, a new ``RandomState`` instance is used,
35
+ seeded with `seed`.
36
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
37
+ that instance is used.
38
+
39
+ Returns
40
+ -------
41
+ rvs : ndarray
42
+ The random variates distributed according to the probability
43
+ distribution defined by the pdf.
44
+
45
+ Notes
46
+ -----
47
+ Please refer to `scipy.stats.sampling.RatioUniforms` for the documentation.
48
+ """
49
+ warnings.warn("Please use `RatioUniforms` from the "
50
+ "`scipy.stats.sampling` namespace. The "
51
+ "`scipy.stats.rvs_ratio_uniforms` namespace is deprecated "
52
+ "and will be removed in SciPy 1.15.0",
53
+ category=DeprecationWarning, stacklevel=2)
54
+ gen = RatioUniforms(pdf, umax=umax, vmin=vmin, vmax=vmax,
55
+ c=c, random_state=random_state)
56
+ return gen.rvs(size)
env-llmeval/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (766 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/contingency.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contingency table functions (:mod:`scipy.stats.contingency`)
3
+ ============================================================
4
+
5
+ Functions for creating and analyzing contingency tables.
6
+
7
+ .. currentmodule:: scipy.stats.contingency
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ chi2_contingency
13
+ relative_risk
14
+ odds_ratio
15
+ crosstab
16
+ association
17
+
18
+ expected_freq
19
+ margins
20
+
21
+ """
22
+
23
+
24
+ from functools import reduce
25
+ import math
26
+ import numpy as np
27
+ from ._stats_py import power_divergence
28
+ from ._relative_risk import relative_risk
29
+ from ._crosstab import crosstab
30
+ from ._odds_ratio import odds_ratio
31
+ from scipy._lib._bunch import _make_tuple_bunch
32
+
33
+
34
+ __all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
35
+ 'association', 'relative_risk', 'odds_ratio']
36
+
37
+
38
+ def margins(a):
39
+ """Return a list of the marginal sums of the array `a`.
40
+
41
+ Parameters
42
+ ----------
43
+ a : ndarray
44
+ The array for which to compute the marginal sums.
45
+
46
+ Returns
47
+ -------
48
+ margsums : list of ndarrays
49
+ A list of length `a.ndim`. `margsums[k]` is the result
50
+ of summing `a` over all axes except `k`; it has the same
51
+ number of dimensions as `a`, but the length of each axis
52
+ except axis `k` will be 1.
53
+
54
+ Examples
55
+ --------
56
+ >>> import numpy as np
57
+ >>> from scipy.stats.contingency import margins
58
+
59
+ >>> a = np.arange(12).reshape(2, 6)
60
+ >>> a
61
+ array([[ 0, 1, 2, 3, 4, 5],
62
+ [ 6, 7, 8, 9, 10, 11]])
63
+ >>> m0, m1 = margins(a)
64
+ >>> m0
65
+ array([[15],
66
+ [51]])
67
+ >>> m1
68
+ array([[ 6, 8, 10, 12, 14, 16]])
69
+
70
+ >>> b = np.arange(24).reshape(2,3,4)
71
+ >>> m0, m1, m2 = margins(b)
72
+ >>> m0
73
+ array([[[ 66]],
74
+ [[210]]])
75
+ >>> m1
76
+ array([[[ 60],
77
+ [ 92],
78
+ [124]]])
79
+ >>> m2
80
+ array([[[60, 66, 72, 78]]])
81
+ """
82
+ margsums = []
83
+ ranged = list(range(a.ndim))
84
+ for k in ranged:
85
+ marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
86
+ margsums.append(marg)
87
+ return margsums
88
+
89
+
90
+ def expected_freq(observed):
91
+ """
92
+ Compute the expected frequencies from a contingency table.
93
+
94
+ Given an n-dimensional contingency table of observed frequencies,
95
+ compute the expected frequencies for the table based on the marginal
96
+ sums under the assumption that the groups associated with each
97
+ dimension are independent.
98
+
99
+ Parameters
100
+ ----------
101
+ observed : array_like
102
+ The table of observed frequencies. (While this function can handle
103
+ a 1-D array, that case is trivial. Generally `observed` is at
104
+ least 2-D.)
105
+
106
+ Returns
107
+ -------
108
+ expected : ndarray of float64
109
+ The expected frequencies, based on the marginal sums of the table.
110
+ Same shape as `observed`.
111
+
112
+ Examples
113
+ --------
114
+ >>> import numpy as np
115
+ >>> from scipy.stats.contingency import expected_freq
116
+ >>> observed = np.array([[10, 10, 20],[20, 20, 20]])
117
+ >>> expected_freq(observed)
118
+ array([[ 12., 12., 16.],
119
+ [ 18., 18., 24.]])
120
+
121
+ """
122
+ # Typically `observed` is an integer array. If `observed` has a large
123
+ # number of dimensions or holds large values, some of the following
124
+ # computations may overflow, so we first switch to floating point.
125
+ observed = np.asarray(observed, dtype=np.float64)
126
+
127
+ # Create a list of the marginal sums.
128
+ margsums = margins(observed)
129
+
130
+ # Create the array of expected frequencies. The shapes of the
131
+ # marginal sums returned by apply_over_axes() are just what we
132
+ # need for broadcasting in the following product.
133
+ d = observed.ndim
134
+ expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
135
+ return expected
136
+
137
+
138
+ Chi2ContingencyResult = _make_tuple_bunch(
139
+ 'Chi2ContingencyResult',
140
+ ['statistic', 'pvalue', 'dof', 'expected_freq'], []
141
+ )
142
+
143
+
144
+ def chi2_contingency(observed, correction=True, lambda_=None):
145
+ """Chi-square test of independence of variables in a contingency table.
146
+
147
+ This function computes the chi-square statistic and p-value for the
148
+ hypothesis test of independence of the observed frequencies in the
149
+ contingency table [1]_ `observed`. The expected frequencies are computed
150
+ based on the marginal sums under the assumption of independence; see
151
+ `scipy.stats.contingency.expected_freq`. The number of degrees of
152
+ freedom is (expressed using numpy functions and attributes)::
153
+
154
+ dof = observed.size - sum(observed.shape) + observed.ndim - 1
155
+
156
+
157
+ Parameters
158
+ ----------
159
+ observed : array_like
160
+ The contingency table. The table contains the observed frequencies
161
+ (i.e. number of occurrences) in each category. In the two-dimensional
162
+ case, the table is often described as an "R x C table".
163
+ correction : bool, optional
164
+ If True, *and* the degrees of freedom is 1, apply Yates' correction
165
+ for continuity. The effect of the correction is to adjust each
166
+ observed value by 0.5 towards the corresponding expected value.
167
+ lambda_ : float or str, optional
168
+ By default, the statistic computed in this test is Pearson's
169
+ chi-squared statistic [2]_. `lambda_` allows a statistic from the
170
+ Cressie-Read power divergence family [3]_ to be used instead. See
171
+ `scipy.stats.power_divergence` for details.
172
+
173
+ Returns
174
+ -------
175
+ res : Chi2ContingencyResult
176
+ An object containing attributes:
177
+
178
+ statistic : float
179
+ The test statistic.
180
+ pvalue : float
181
+ The p-value of the test.
182
+ dof : int
183
+ The degrees of freedom.
184
+ expected_freq : ndarray, same shape as `observed`
185
+ The expected frequencies, based on the marginal sums of the table.
186
+
187
+ See Also
188
+ --------
189
+ scipy.stats.contingency.expected_freq
190
+ scipy.stats.fisher_exact
191
+ scipy.stats.chisquare
192
+ scipy.stats.power_divergence
193
+ scipy.stats.barnard_exact
194
+ scipy.stats.boschloo_exact
195
+
196
+ Notes
197
+ -----
198
+ An often quoted guideline for the validity of this calculation is that
199
+ the test should be used only if the observed and expected frequencies
200
+ in each cell are at least 5.
201
+
202
+ This is a test for the independence of different categories of a
203
+ population. The test is only meaningful when the dimension of
204
+ `observed` is two or more. Applying the test to a one-dimensional
205
+ table will always result in `expected` equal to `observed` and a
206
+ chi-square statistic equal to 0.
207
+
208
+ This function does not handle masked arrays, because the calculation
209
+ does not make sense with missing values.
210
+
211
+ Like `scipy.stats.chisquare`, this function computes a chi-square
212
+ statistic; the convenience this function provides is to figure out the
213
+ expected frequencies and degrees of freedom from the given contingency
214
+ table. If these were already known, and if the Yates' correction was not
215
+ required, one could use `scipy.stats.chisquare`. That is, if one calls::
216
+
217
+ res = chi2_contingency(obs, correction=False)
218
+
219
+ then the following is true::
220
+
221
+ (res.statistic, res.pvalue) == stats.chisquare(obs.ravel(),
222
+ f_exp=ex.ravel(),
223
+ ddof=obs.size - 1 - dof)
224
+
225
+ The `lambda_` argument was added in version 0.13.0 of scipy.
226
+
227
+ References
228
+ ----------
229
+ .. [1] "Contingency table",
230
+ https://en.wikipedia.org/wiki/Contingency_table
231
+ .. [2] "Pearson's chi-squared test",
232
+ https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
233
+ .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
234
+ Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
235
+ pp. 440-464.
236
+ .. [4] Berger, Jeffrey S. et al. "Aspirin for the Primary Prevention of
237
+ Cardiovascular Events in Women and Men: A Sex-Specific
238
+ Meta-analysis of Randomized Controlled Trials."
239
+ JAMA, 295(3):306-313, :doi:`10.1001/jama.295.3.306`, 2006.
240
+
241
+ Examples
242
+ --------
243
+ In [4]_, the use of aspirin to prevent cardiovascular events in women
244
+ and men was investigated. The study notably concluded:
245
+
246
+ ...aspirin therapy reduced the risk of a composite of
247
+ cardiovascular events due to its effect on reducing the risk of
248
+ ischemic stroke in women [...]
249
+
250
+ The article lists studies of various cardiovascular events. Let's
251
+ focus on the ischemic stoke in women.
252
+
253
+ The following table summarizes the results of the experiment in which
254
+ participants took aspirin or a placebo on a regular basis for several
255
+ years. Cases of ischemic stroke were recorded::
256
+
257
+ Aspirin Control/Placebo
258
+ Ischemic stroke 176 230
259
+ No stroke 21035 21018
260
+
261
+ Is there evidence that the aspirin reduces the risk of ischemic stroke?
262
+ We begin by formulating a null hypothesis :math:`H_0`:
263
+
264
+ The effect of aspirin is equivalent to that of placebo.
265
+
266
+ Let's assess the plausibility of this hypothesis with
267
+ a chi-square test.
268
+
269
+ >>> import numpy as np
270
+ >>> from scipy.stats import chi2_contingency
271
+ >>> table = np.array([[176, 230], [21035, 21018]])
272
+ >>> res = chi2_contingency(table)
273
+ >>> res.statistic
274
+ 6.892569132546561
275
+ >>> res.pvalue
276
+ 0.008655478161175739
277
+
278
+ Using a significance level of 5%, we would reject the null hypothesis in
279
+ favor of the alternative hypothesis: "the effect of aspirin
280
+ is not equivalent to the effect of placebo".
281
+ Because `scipy.stats.contingency.chi2_contingency` performs a two-sided
282
+ test, the alternative hypothesis does not indicate the direction of the
283
+ effect. We can use `stats.contingency.odds_ratio` to support the
284
+ conclusion that aspirin *reduces* the risk of ischemic stroke.
285
+
286
+ Below are further examples showing how larger contingency tables can be
287
+ tested.
288
+
289
+ A two-way example (2 x 3):
290
+
291
+ >>> obs = np.array([[10, 10, 20], [20, 20, 20]])
292
+ >>> res = chi2_contingency(obs)
293
+ >>> res.statistic
294
+ 2.7777777777777777
295
+ >>> res.pvalue
296
+ 0.24935220877729619
297
+ >>> res.dof
298
+ 2
299
+ >>> res.expected_freq
300
+ array([[ 12., 12., 16.],
301
+ [ 18., 18., 24.]])
302
+
303
+ Perform the test using the log-likelihood ratio (i.e. the "G-test")
304
+ instead of Pearson's chi-squared statistic.
305
+
306
+ >>> res = chi2_contingency(obs, lambda_="log-likelihood")
307
+ >>> res.statistic
308
+ 2.7688587616781319
309
+ >>> res.pvalue
310
+ 0.25046668010954165
311
+
312
+ A four-way example (2 x 2 x 2 x 2):
313
+
314
+ >>> obs = np.array(
315
+ ... [[[[12, 17],
316
+ ... [11, 16]],
317
+ ... [[11, 12],
318
+ ... [15, 16]]],
319
+ ... [[[23, 15],
320
+ ... [30, 22]],
321
+ ... [[14, 17],
322
+ ... [15, 16]]]])
323
+ >>> res = chi2_contingency(obs)
324
+ >>> res.statistic
325
+ 8.7584514426741897
326
+ >>> res.pvalue
327
+ 0.64417725029295503
328
+ """
329
+ observed = np.asarray(observed)
330
+ if np.any(observed < 0):
331
+ raise ValueError("All values in `observed` must be nonnegative.")
332
+ if observed.size == 0:
333
+ raise ValueError("No data; `observed` has size 0.")
334
+
335
+ expected = expected_freq(observed)
336
+ if np.any(expected == 0):
337
+ # Include one of the positions where expected is zero in
338
+ # the exception message.
339
+ zeropos = list(zip(*np.nonzero(expected == 0)))[0]
340
+ raise ValueError("The internally computed table of expected "
341
+ f"frequencies has a zero element at {zeropos}.")
342
+
343
+ # The degrees of freedom
344
+ dof = expected.size - sum(expected.shape) + expected.ndim - 1
345
+
346
+ if dof == 0:
347
+ # Degenerate case; this occurs when `observed` is 1D (or, more
348
+ # generally, when it has only one nontrivial dimension). In this
349
+ # case, we also have observed == expected, so chi2 is 0.
350
+ chi2 = 0.0
351
+ p = 1.0
352
+ else:
353
+ if dof == 1 and correction:
354
+ # Adjust `observed` according to Yates' correction for continuity.
355
+ # Magnitude of correction no bigger than difference; see gh-13875
356
+ diff = expected - observed
357
+ direction = np.sign(diff)
358
+ magnitude = np.minimum(0.5, np.abs(diff))
359
+ observed = observed + magnitude * direction
360
+
361
+ chi2, p = power_divergence(observed, expected,
362
+ ddof=observed.size - 1 - dof, axis=None,
363
+ lambda_=lambda_)
364
+
365
+ return Chi2ContingencyResult(chi2, p, dof, expected)
366
+
367
+
368
+ def association(observed, method="cramer", correction=False, lambda_=None):
369
+ """Calculates degree of association between two nominal variables.
370
+
371
+ The function provides the option for computing one of three measures of
372
+ association between two nominal variables from the data given in a 2d
373
+ contingency table: Tschuprow's T, Pearson's Contingency Coefficient
374
+ and Cramer's V.
375
+
376
+ Parameters
377
+ ----------
378
+ observed : array-like
379
+ The array of observed values
380
+ method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
381
+ The association test statistic.
382
+ correction : bool, optional
383
+ Inherited from `scipy.stats.contingency.chi2_contingency()`
384
+ lambda_ : float or str, optional
385
+ Inherited from `scipy.stats.contingency.chi2_contingency()`
386
+
387
+ Returns
388
+ -------
389
+ statistic : float
390
+ Value of the test statistic
391
+
392
+ Notes
393
+ -----
394
+ Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
395
+ measure the degree to which two nominal or ordinal variables are related,
396
+ or the level of their association. This differs from correlation, although
397
+ many often mistakenly consider them equivalent. Correlation measures in
398
+ what way two variables are related, whereas, association measures how
399
+ related the variables are. As such, association does not subsume
400
+ independent variables, and is rather a test of independence. A value of
401
+ 1.0 indicates perfect association, and 0.0 means the variables have no
402
+ association.
403
+
404
+ Both the Cramer's V and Tschuprow's T are extensions of the phi
405
+ coefficient. Moreover, due to the close relationship between the
406
+ Cramer's V and Tschuprow's T the returned values can often be similar
407
+ or even equivalent. They are likely to diverge more as the array shape
408
+ diverges from a 2x2.
409
+
410
+ References
411
+ ----------
412
+ .. [1] "Tschuprow's T",
413
+ https://en.wikipedia.org/wiki/Tschuprow's_T
414
+ .. [2] Tschuprow, A. A. (1939)
415
+ Principles of the Mathematical Theory of Correlation;
416
+ translated by M. Kantorowitsch. W. Hodge & Co.
417
+ .. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
418
+ .. [4] "Nominal Association: Phi and Cramer's V",
419
+ http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
420
+ .. [5] Gingrich, Paul, "Association Between Variables",
421
+ http://uregina.ca/~gingrich/ch11a.pdf
422
+
423
+ Examples
424
+ --------
425
+ An example with a 4x2 contingency table:
426
+
427
+ >>> import numpy as np
428
+ >>> from scipy.stats.contingency import association
429
+ >>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
430
+
431
+ Pearson's contingency coefficient
432
+
433
+ >>> association(obs4x2, method="pearson")
434
+ 0.18303298140595667
435
+
436
+ Cramer's V
437
+
438
+ >>> association(obs4x2, method="cramer")
439
+ 0.18617813077483678
440
+
441
+ Tschuprow's T
442
+
443
+ >>> association(obs4x2, method="tschuprow")
444
+ 0.14146478765062995
445
+ """
446
+ arr = np.asarray(observed)
447
+ if not np.issubdtype(arr.dtype, np.integer):
448
+ raise ValueError("`observed` must be an integer array.")
449
+
450
+ if len(arr.shape) != 2:
451
+ raise ValueError("method only accepts 2d arrays")
452
+
453
+ chi2_stat = chi2_contingency(arr, correction=correction,
454
+ lambda_=lambda_)
455
+
456
+ phi2 = chi2_stat.statistic / arr.sum()
457
+ n_rows, n_cols = arr.shape
458
+ if method == "cramer":
459
+ value = phi2 / min(n_cols - 1, n_rows - 1)
460
+ elif method == "tschuprow":
461
+ value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
462
+ elif method == 'pearson':
463
+ value = phi2 / (1 + phi2)
464
+ else:
465
+ raise ValueError("Invalid argument value: 'method' argument must "
466
+ "be 'cramer', 'tschuprow', or 'pearson'")
467
+
468
+ return math.sqrt(value)
env-llmeval/lib/python3.10/site-packages/scipy/stats/kde.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.stats` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'gaussian_kde', 'linalg', 'logsumexp', 'check_random_state',
10
+ 'atleast_2d', 'reshape', 'newaxis', 'exp', 'ravel', 'power',
11
+ 'atleast_1d', 'squeeze', 'sum', 'transpose', 'cov',
12
+ 'gaussian_kernel_estimate'
13
+ ]
14
+
15
+
16
+ def __dir__():
17
+ return __all__
18
+
19
+
20
+ def __getattr__(name):
21
+ return _sub_module_deprecation(sub_package="stats", module="kde",
22
+ private_modules=["_kde"], all=__all__,
23
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/stats/qmc.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ ====================================================
3
+ Quasi-Monte Carlo submodule (:mod:`scipy.stats.qmc`)
4
+ ====================================================
5
+
6
+ .. currentmodule:: scipy.stats.qmc
7
+
8
+ This module provides Quasi-Monte Carlo generators and associated helper
9
+ functions.
10
+
11
+
12
+ Quasi-Monte Carlo
13
+ =================
14
+
15
+ Engines
16
+ -------
17
+
18
+ .. autosummary::
19
+ :toctree: generated/
20
+
21
+ QMCEngine
22
+ Sobol
23
+ Halton
24
+ LatinHypercube
25
+ PoissonDisk
26
+ MultinomialQMC
27
+ MultivariateNormalQMC
28
+
29
+ Helpers
30
+ -------
31
+
32
+ .. autosummary::
33
+ :toctree: generated/
34
+
35
+ discrepancy
36
+ geometric_discrepancy
37
+ update_discrepancy
38
+ scale
39
+
40
+
41
+ Introduction to Quasi-Monte Carlo
42
+ =================================
43
+
44
+ Quasi-Monte Carlo (QMC) methods [1]_, [2]_, [3]_ provide an
45
+ :math:`n \times d` array of numbers in :math:`[0,1]`. They can be used in
46
+ place of :math:`n` points from the :math:`U[0,1]^{d}` distribution. Compared to
47
+ random points, QMC points are designed to have fewer gaps and clumps. This is
48
+ quantified by discrepancy measures [4]_. From the Koksma-Hlawka
49
+ inequality [5]_ we know that low discrepancy reduces a bound on
50
+ integration error. Averaging a function :math:`f` over :math:`n` QMC points
51
+ can achieve an integration error close to :math:`O(n^{-1})` for well
52
+ behaved functions [2]_.
53
+
54
+ Most QMC constructions are designed for special values of :math:`n`
55
+ such as powers of 2 or large primes. Changing the sample
56
+ size by even one can degrade their performance, even their
57
+ rate of convergence [6]_. For instance :math:`n=100` points may give less
58
+ accuracy than :math:`n=64` if the method was designed for :math:`n=2^m`.
59
+
60
+ Some QMC constructions are extensible in :math:`n`: we can find
61
+ another special sample size :math:`n' > n` and often an infinite
62
+ sequence of increasing special sample sizes. Some QMC
63
+ constructions are extensible in :math:`d`: we can increase the dimension,
64
+ possibly to some upper bound, and typically without requiring
65
+ special values of :math:`d`. Some QMC methods are extensible in
66
+ both :math:`n` and :math:`d`.
67
+
68
+ QMC points are deterministic. That makes it hard to estimate the accuracy of
69
+ integrals estimated by averages over QMC points. Randomized QMC (RQMC) [7]_
70
+ points are constructed so that each point is individually :math:`U[0,1]^{d}`
71
+ while collectively the :math:`n` points retain their low discrepancy.
72
+ One can make :math:`R` independent replications of RQMC points to
73
+ see how stable a computation is. From :math:`R` independent values,
74
+ a t-test (or bootstrap t-test [8]_) then gives approximate confidence
75
+ intervals on the mean value. Some RQMC methods produce a
76
+ root mean squared error that is actually :math:`o(1/n)` and smaller than
77
+ the rate seen in unrandomized QMC. An intuitive explanation is
78
+ that the error is a sum of many small ones and random errors
79
+ cancel in a way that deterministic ones do not. RQMC also
80
+ has advantages on integrands that are singular or, for other
81
+ reasons, fail to be Riemann integrable.
82
+
83
+ (R)QMC cannot beat Bahkvalov's curse of dimension (see [9]_). For
84
+ any random or deterministic method, there are worst case functions
85
+ that will give it poor performance in high dimensions. A worst
86
+ case function for QMC might be 0 at all n points but very
87
+ large elsewhere. Worst case analyses get very pessimistic
88
+ in high dimensions. (R)QMC can bring a great improvement over
89
+ MC when the functions on which it is used are not worst case.
90
+ For instance (R)QMC can be especially effective on integrands
91
+ that are well approximated by sums of functions of
92
+ some small number of their input variables at a time [10]_, [11]_.
93
+ That property is often a surprising finding about those functions.
94
+
95
+ Also, to see an improvement over IID MC, (R)QMC requires a bit of smoothness of
96
+ the integrand, roughly the mixed first order derivative in each direction,
97
+ :math:`\partial^d f/\partial x_1 \cdots \partial x_d`, must be integral.
98
+ For instance, a function that is 1 inside the hypersphere and 0 outside of it
99
+ has infinite variation in the sense of Hardy and Krause for any dimension
100
+ :math:`d = 2`.
101
+
102
+ Scrambled nets are a kind of RQMC that have some valuable robustness
103
+ properties [12]_. If the integrand is square integrable, they give variance
104
+ :math:`var_{SNET} = o(1/n)`. There is a finite upper bound on
105
+ :math:`var_{SNET} / var_{MC}` that holds simultaneously for every square
106
+ integrable integrand. Scrambled nets satisfy a strong law of large numbers
107
+ for :math:`f` in :math:`L^p` when :math:`p>1`. In some
108
+ special cases there is a central limit theorem [13]_. For smooth enough
109
+ integrands they can achieve RMSE nearly :math:`O(n^{-3})`. See [12]_
110
+ for references about these properties.
111
+
112
+ The main kinds of QMC methods are lattice rules [14]_ and digital
113
+ nets and sequences [2]_, [15]_. The theories meet up in polynomial
114
+ lattice rules [16]_ which can produce digital nets. Lattice rules
115
+ require some form of search for good constructions. For digital
116
+ nets there are widely used default constructions.
117
+
118
+ The most widely used QMC methods are Sobol' sequences [17]_.
119
+ These are digital nets. They are extensible in both :math:`n` and :math:`d`.
120
+ They can be scrambled. The special sample sizes are powers
121
+ of 2. Another popular method are Halton sequences [18]_.
122
+ The constructions resemble those of digital nets. The earlier
123
+ dimensions have much better equidistribution properties than
124
+ later ones. There are essentially no special sample sizes.
125
+ They are not thought to be as accurate as Sobol' sequences.
126
+ They can be scrambled. The nets of Faure [19]_ are also widely
127
+ used. All dimensions are equally good, but the special sample
128
+ sizes grow rapidly with dimension :math:`d`. They can be scrambled.
129
+ The nets of Niederreiter and Xing [20]_ have the best asymptotic
130
+ properties but have not shown good empirical performance [21]_.
131
+
132
+ Higher order digital nets are formed by a digit interleaving process
133
+ in the digits of the constructed points. They can achieve higher
134
+ levels of asymptotic accuracy given higher smoothness conditions on :math:`f`
135
+ and they can be scrambled [22]_. There is little or no empirical work
136
+ showing the improved rate to be attained.
137
+
138
+ Using QMC is like using the entire period of a small random
139
+ number generator. The constructions are similar and so
140
+ therefore are the computational costs [23]_.
141
+
142
+ (R)QMC is sometimes improved by passing the points through
143
+ a baker's transformation (tent function) prior to using them.
144
+ That function has the form :math:`1-2|x-1/2|`. As :math:`x` goes from 0 to
145
+ 1, this function goes from 0 to 1 and then back. It is very
146
+ useful to produce a periodic function for lattice rules [14]_,
147
+ and sometimes it improves the convergence rate [24]_.
148
+
149
+ It is not straightforward to apply QMC methods to Markov
150
+ chain Monte Carlo (MCMC). We can think of MCMC as using
151
+ :math:`n=1` point in :math:`[0,1]^{d}` for very large :math:`d`, with
152
+ ergodic results corresponding to :math:`d \to \infty`. One proposal is
153
+ in [25]_ and under strong conditions an improved rate of convergence
154
+ has been shown [26]_.
155
+
156
+ Returning to Sobol' points: there are many versions depending
157
+ on what are called direction numbers. Those are the result of
158
+ searches and are tabulated. A very widely used set of direction
159
+ numbers come from [27]_. It is extensible in dimension up to
160
+ :math:`d=21201`.
161
+
162
+ References
163
+ ----------
164
+ .. [1] Owen, Art B. "Monte Carlo Book: the Quasi-Monte Carlo parts." 2019.
165
+ .. [2] Niederreiter, Harald. "Random number generation and quasi-Monte Carlo
166
+ methods." Society for Industrial and Applied Mathematics, 1992.
167
+ .. [3] Dick, Josef, Frances Y. Kuo, and Ian H. Sloan. "High-dimensional
168
+ integration: the quasi-Monte Carlo way." Acta Numerica no. 22: 133, 2013.
169
+ .. [4] Aho, A. V., C. Aistleitner, T. Anderson, K. Appel, V. Arnol'd, N.
170
+ Aronszajn, D. Asotsky et al. "W. Chen et al.(eds.), "A Panorama of
171
+ Discrepancy Theory", Sringer International Publishing,
172
+ Switzerland: 679, 2014.
173
+ .. [5] Hickernell, Fred J. "Koksma-Hlawka Inequality." Wiley StatsRef:
174
+ Statistics Reference Online, 2014.
175
+ .. [6] Owen, Art B. "On dropping the first Sobol' point." :arxiv:`2008.08051`,
176
+ 2020.
177
+ .. [7] L'Ecuyer, Pierre, and Christiane Lemieux. "Recent advances in randomized
178
+ quasi-Monte Carlo methods." In Modeling uncertainty, pp. 419-474. Springer,
179
+ New York, NY, 2002.
180
+ .. [8] DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence
181
+ intervals." Statistical science: 189-212, 1996.
182
+ .. [9] Dimov, Ivan T. "Monte Carlo methods for applied scientists." World
183
+ Scientific, 2008.
184
+ .. [10] Caflisch, Russel E., William J. Morokoff, and Art B. Owen. "Valuation
185
+ of mortgage backed securities using Brownian bridges to reduce effective
186
+ dimension." Journal of Computational Finance: no. 1 27-46, 1997.
187
+ .. [11] Sloan, Ian H., and Henryk Wozniakowski. "When are quasi-Monte Carlo
188
+ algorithms efficient for high dimensional integrals?." Journal of Complexity
189
+ 14, no. 1 (1998): 1-33.
190
+ .. [12] Owen, Art B., and Daniel Rudolf, "A strong law of large numbers for
191
+ scrambled net integration." SIAM Review, to appear.
192
+ .. [13] Loh, Wei-Liem. "On the asymptotic distribution of scrambled net
193
+ quadrature." The Annals of Statistics 31, no. 4: 1282-1324, 2003.
194
+ .. [14] Sloan, Ian H. and S. Joe. "Lattice methods for multiple integration."
195
+ Oxford University Press, 1994.
196
+ .. [15] Dick, Josef, and Friedrich Pillichshammer. "Digital nets and sequences:
197
+ discrepancy theory and quasi-Monte Carlo integration." Cambridge University
198
+ Press, 2010.
199
+ .. [16] Dick, Josef, F. Kuo, Friedrich Pillichshammer, and I. Sloan.
200
+ "Construction algorithms for polynomial lattice rules for multivariate
201
+ integration." Mathematics of computation 74, no. 252: 1895-1921, 2005.
202
+ .. [17] Sobol', Il'ya Meerovich. "On the distribution of points in a cube and
203
+ the approximate evaluation of integrals." Zhurnal Vychislitel'noi Matematiki
204
+ i Matematicheskoi Fiziki 7, no. 4: 784-802, 1967.
205
+ .. [18] Halton, John H. "On the efficiency of certain quasi-random sequences of
206
+ points in evaluating multi-dimensional integrals." Numerische Mathematik 2,
207
+ no. 1: 84-90, 1960.
208
+ .. [19] Faure, Henri. "Discrepance de suites associees a un systeme de
209
+ numeration (en dimension s)." Acta arithmetica 41, no. 4: 337-351, 1982.
210
+ .. [20] Niederreiter, Harold, and Chaoping Xing. "Low-discrepancy sequences and
211
+ global function fields with many rational places." Finite Fields and their
212
+ applications 2, no. 3: 241-273, 1996.
213
+ .. [21] Hong, Hee Sun, and Fred J. Hickernell. "Algorithm 823: Implementing
214
+ scrambled digital sequences." ACM Transactions on Mathematical Software
215
+ (TOMS) 29, no. 2: 95-109, 2003.
216
+ .. [22] Dick, Josef. "Higher order scrambled digital nets achieve the optimal
217
+ rate of the root mean square error for smooth integrands." The Annals of
218
+ Statistics 39, no. 3: 1372-1398, 2011.
219
+ .. [23] Niederreiter, Harald. "Multidimensional numerical integration using
220
+ pseudorandom numbers." In Stochastic Programming 84 Part I, pp. 17-38.
221
+ Springer, Berlin, Heidelberg, 1986.
222
+ .. [24] Hickernell, Fred J. "Obtaining O (N-2+e) Convergence for Lattice
223
+ Quadrature Rules." In Monte Carlo and Quasi-Monte Carlo Methods 2000,
224
+ pp. 274-289. Springer, Berlin, Heidelberg, 2002.
225
+ .. [25] Owen, Art B., and Seth D. Tribble. "A quasi-Monte Carlo Metropolis
226
+ algorithm." Proceedings of the National Academy of Sciences 102,
227
+ no. 25: 8844-8849, 2005.
228
+ .. [26] Chen, Su. "Consistency and convergence rate of Markov chain quasi Monte
229
+ Carlo with examples." PhD diss., Stanford University, 2011.
230
+ .. [27] Joe, Stephen, and Frances Y. Kuo. "Constructing Sobol sequences with
231
+ better two-dimensional projections." SIAM Journal on Scientific Computing
232
+ 30, no. 5: 2635-2654, 2008.
233
+
234
+ """
235
+ from ._qmc import * # noqa: F403
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/common_tests.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc ADDED
Binary file (34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_continuous_fit_censored.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_discrete_distns.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc ADDED
Binary file (278 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc ADDED
Binary file (30.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc ADDED
Binary file (55.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc ADDED
Binary file (99.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc ADDED
Binary file (68.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multicomp.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc ADDED
Binary file (112 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc ADDED
Binary file (5.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_relative_risk.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc ADDED
Binary file (52.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_sensitivity_analysis.cpython-310.pyc ADDED
Binary file (8.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc ADDED
Binary file (265 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_survival.cpython-310.pyc ADDED
Binary file (13.9 kB). View file