applied-ai-018 commited on
Commit
907dd76
·
verified ·
1 Parent(s): 700540d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step60/zero/12.attention.dense.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step60/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step60/zero/7.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/numpy/ma/API_CHANGES.txt +135 -0
  5. venv/lib/python3.10/site-packages/numpy/ma/extras.py +2133 -0
  6. venv/lib/python3.10/site-packages/numpy/ma/mrecords.py +783 -0
  7. venv/lib/python3.10/site-packages/numpy/ma/setup.py +12 -0
  8. venv/lib/python3.10/site-packages/numpy/random/LICENSE.md +71 -0
  9. venv/lib/python3.10/site-packages/numpy/random/__init__.pxd +14 -0
  10. venv/lib/python3.10/site-packages/numpy/random/__init__.py +215 -0
  11. venv/lib/python3.10/site-packages/numpy/random/__init__.pyi +72 -0
  12. venv/lib/python3.10/site-packages/numpy/random/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/numpy/random/__pycache__/_pickle.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/numpy/random/_bounded_integers.cpython-310-x86_64-linux-gnu.so +0 -0
  15. venv/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd +29 -0
  16. venv/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux-gnu.so +0 -0
  17. venv/lib/python3.10/site-packages/numpy/random/_common.pxd +106 -0
  18. venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/extending.py +40 -0
  21. venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/parse.py +54 -0
  22. venv/lib/python3.10/site-packages/numpy/random/_examples/cython/extending.pyx +78 -0
  23. venv/lib/python3.10/site-packages/numpy/random/_examples/cython/extending_distributions.pyx +117 -0
  24. venv/lib/python3.10/site-packages/numpy/random/_examples/cython/meson.build +45 -0
  25. venv/lib/python3.10/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/numpy/random/_examples/numba/extending.py +84 -0
  28. venv/lib/python3.10/site-packages/numpy/random/_examples/numba/extending_distributions.py +67 -0
  29. venv/lib/python3.10/site-packages/numpy/random/_generator.cpython-310-x86_64-linux-gnu.so +0 -0
  30. venv/lib/python3.10/site-packages/numpy/random/_generator.pyi +681 -0
  31. venv/lib/python3.10/site-packages/numpy/random/_mt19937.cpython-310-x86_64-linux-gnu.so +0 -0
  32. venv/lib/python3.10/site-packages/numpy/random/_mt19937.pyi +22 -0
  33. venv/lib/python3.10/site-packages/numpy/random/_pcg64.cpython-310-x86_64-linux-gnu.so +0 -0
  34. venv/lib/python3.10/site-packages/numpy/random/_pcg64.pyi +42 -0
  35. venv/lib/python3.10/site-packages/numpy/random/_philox.cpython-310-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.10/site-packages/numpy/random/_philox.pyi +36 -0
  37. venv/lib/python3.10/site-packages/numpy/random/_pickle.py +80 -0
  38. venv/lib/python3.10/site-packages/numpy/random/_sfc64.cpython-310-x86_64-linux-gnu.so +0 -0
  39. venv/lib/python3.10/site-packages/numpy/random/_sfc64.pyi +28 -0
  40. venv/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so +0 -0
  41. venv/lib/python3.10/site-packages/numpy/random/bit_generator.pxd +35 -0
  42. venv/lib/python3.10/site-packages/numpy/random/bit_generator.pyi +112 -0
  43. venv/lib/python3.10/site-packages/numpy/random/c_distributions.pxd +120 -0
  44. venv/lib/python3.10/site-packages/numpy/random/lib/libnpyrandom.a +0 -0
  45. venv/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so +0 -0
  46. venv/lib/python3.10/site-packages/numpy/random/mtrand.pyi +571 -0
  47. venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-310.pyc +0 -0
ckpts/universal/global_step60/zero/12.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd6b1110c13ab85211ad8249ac8b6dfd16037292725b82a47f10bd6963c6b3ad
3
+ size 16778317
ckpts/universal/global_step60/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6b467f5f41cde22a4508ee4ee598fd25d222fd8806c27cf3093d5dc3f6391e6
3
+ size 33555612
ckpts/universal/global_step60/zero/7.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f446c591065bd609cced8218e0d82f53080c74ca5b6751479adfa20f9b595e30
3
+ size 33555533
venv/lib/python3.10/site-packages/numpy/ma/API_CHANGES.txt ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. -*- rest -*-
2
+
3
+ ==================================================
4
+ API changes in the new masked array implementation
5
+ ==================================================
6
+
7
+ Masked arrays are subclasses of ndarray
8
+ ---------------------------------------
9
+
10
+ Contrary to the original implementation, masked arrays are now regular
11
+ ndarrays::
12
+
13
+ >>> x = masked_array([1,2,3],mask=[0,0,1])
14
+ >>> print isinstance(x, numpy.ndarray)
15
+ True
16
+
17
+
18
+ ``_data`` returns a view of the masked array
19
+ --------------------------------------------
20
+
21
+ Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the
22
+ ``_data`` part will return a regular ndarray or any of its subclass, depending
23
+ on the initial data::
24
+
25
+ >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]])
26
+ >>> print x._data
27
+ [[1 2]
28
+ [3 4]]
29
+ >>> print type(x._data)
30
+ <class 'numpy.matrixlib.defmatrix.matrix'>
31
+
32
+
33
+ In practice, ``_data`` is implemented as a property, not as an attribute.
34
+ Therefore, you cannot access it directly, and some simple tests such as the
35
+ following one will fail::
36
+
37
+ >>>x._data is x._data
38
+ False
39
+
40
+
41
+ ``filled(x)`` can return a subclass of ndarray
42
+ ----------------------------------------------
43
+ The function ``filled(a)`` returns an array of the same type as ``a._data``::
44
+
45
+ >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]])
46
+ >>> y = filled(x)
47
+ >>> print type(y)
48
+ <class 'numpy.matrixlib.defmatrix.matrix'>
49
+ >>> print y
50
+ matrix([[ 1, 2],
51
+ [ 3, 999999]])
52
+
53
+
54
+ ``put``, ``putmask`` behave like their ndarray counterparts
55
+ -----------------------------------------------------------
56
+
57
+ Previously, ``putmask`` was used like this::
58
+
59
+ mask = [False,True,True]
60
+ x = array([1,4,7],mask=mask)
61
+ putmask(x,mask,[3])
62
+
63
+ which translated to::
64
+
65
+ x[~mask] = [3]
66
+
67
+ (Note that a ``True``-value in a mask suppresses a value.)
68
+
69
+ In other words, the mask had the same length as ``x``, whereas
70
+ ``values`` had ``sum(~mask)`` elements.
71
+
72
+ Now, the behaviour is similar to that of ``ndarray.putmask``, where
73
+ the mask and the values are both the same length as ``x``, i.e.
74
+
75
+ ::
76
+
77
+ putmask(x,mask,[3,0,0])
78
+
79
+
80
+ ``fill_value`` is a property
81
+ ----------------------------
82
+
83
+ ``fill_value`` is no longer a method, but a property::
84
+
85
+ >>> print x.fill_value
86
+ 999999
87
+
88
+ ``cumsum`` and ``cumprod`` ignore missing values
89
+ ------------------------------------------------
90
+
91
+ Missing values are assumed to be the identity element, i.e. 0 for
92
+ ``cumsum`` and 1 for ``cumprod``::
93
+
94
+ >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False])
95
+ >>> print x
96
+ [1 -- 3 4]
97
+ >>> print x.cumsum()
98
+ [1 -- 4 8]
99
+ >> print x.cumprod()
100
+ [1 -- 3 12]
101
+
102
+ ``bool(x)`` raises a ValueError
103
+ -------------------------------
104
+
105
+ Masked arrays now behave like regular ``ndarrays``, in that they cannot be
106
+ converted to booleans:
107
+
108
+ ::
109
+
110
+ >>> x = N.ma.array([1,2,3])
111
+ >>> bool(x)
112
+ Traceback (most recent call last):
113
+ File "<stdin>", line 1, in <module>
114
+ ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
115
+
116
+
117
+ ==================================
118
+ New features (non exhaustive list)
119
+ ==================================
120
+
121
+ ``mr_``
122
+ -------
123
+
124
+ ``mr_`` mimics the behavior of ``r_`` for masked arrays::
125
+
126
+ >>> np.ma.mr_[3,4,5]
127
+ masked_array(data = [3 4 5],
128
+ mask = False,
129
+ fill_value=999999)
130
+
131
+
132
+ ``anom``
133
+ --------
134
+
135
+ The ``anom`` method returns the deviations from the average (anomalies).
venv/lib/python3.10/site-packages/numpy/ma/extras.py ADDED
@@ -0,0 +1,2133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Masked arrays add-ons.
3
+
4
+ A collection of utilities for `numpy.ma`.
5
+
6
+ :author: Pierre Gerard-Marchant
7
+ :contact: pierregm_at_uga_dot_edu
8
+ :version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
9
+
10
+ """
11
+ __all__ = [
12
+ 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
13
+ 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack',
14
+ 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows',
15
+ 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d',
16
+ 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack',
17
+ 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows',
18
+ 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate',
19
+ 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
20
+ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
21
+ ]
22
+
23
+ import itertools
24
+ import warnings
25
+
26
+ from . import core as ma
27
+ from .core import (
28
+ MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
29
+ getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
30
+ nomask, ones, sort, zeros, getdata, get_masked_subclass, dot
31
+ )
32
+
33
+ import numpy as np
34
+ from numpy import ndarray, array as nxarray
35
+ from numpy.core.multiarray import normalize_axis_index
36
+ from numpy.core.numeric import normalize_axis_tuple
37
+ from numpy.lib.function_base import _ureduce
38
+ from numpy.lib.index_tricks import AxisConcatenator
39
+
40
+
41
+ def issequence(seq):
42
+ """
43
+ Is seq a sequence (ndarray, list or tuple)?
44
+
45
+ """
46
+ return isinstance(seq, (ndarray, tuple, list))
47
+
48
+
49
+ def count_masked(arr, axis=None):
50
+ """
51
+ Count the number of masked elements along the given axis.
52
+
53
+ Parameters
54
+ ----------
55
+ arr : array_like
56
+ An array with (possibly) masked elements.
57
+ axis : int, optional
58
+ Axis along which to count. If None (default), a flattened
59
+ version of the array is used.
60
+
61
+ Returns
62
+ -------
63
+ count : int, ndarray
64
+ The total number of masked elements (axis=None) or the number
65
+ of masked elements along each slice of the given axis.
66
+
67
+ See Also
68
+ --------
69
+ MaskedArray.count : Count non-masked elements.
70
+
71
+ Examples
72
+ --------
73
+ >>> import numpy.ma as ma
74
+ >>> a = np.arange(9).reshape((3,3))
75
+ >>> a = ma.array(a)
76
+ >>> a[1, 0] = ma.masked
77
+ >>> a[1, 2] = ma.masked
78
+ >>> a[2, 1] = ma.masked
79
+ >>> a
80
+ masked_array(
81
+ data=[[0, 1, 2],
82
+ [--, 4, --],
83
+ [6, --, 8]],
84
+ mask=[[False, False, False],
85
+ [ True, False, True],
86
+ [False, True, False]],
87
+ fill_value=999999)
88
+ >>> ma.count_masked(a)
89
+ 3
90
+
91
+ When the `axis` keyword is used an array is returned.
92
+
93
+ >>> ma.count_masked(a, axis=0)
94
+ array([1, 1, 1])
95
+ >>> ma.count_masked(a, axis=1)
96
+ array([0, 2, 1])
97
+
98
+ """
99
+ m = getmaskarray(arr)
100
+ return m.sum(axis)
101
+
102
+
103
+ def masked_all(shape, dtype=float):
104
+ """
105
+ Empty masked array with all elements masked.
106
+
107
+ Return an empty masked array of the given shape and dtype, where all the
108
+ data are masked.
109
+
110
+ Parameters
111
+ ----------
112
+ shape : int or tuple of ints
113
+ Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``.
114
+ dtype : dtype, optional
115
+ Data type of the output.
116
+
117
+ Returns
118
+ -------
119
+ a : MaskedArray
120
+ A masked array with all data masked.
121
+
122
+ See Also
123
+ --------
124
+ masked_all_like : Empty masked array modelled on an existing array.
125
+
126
+ Examples
127
+ --------
128
+ >>> import numpy.ma as ma
129
+ >>> ma.masked_all((3, 3))
130
+ masked_array(
131
+ data=[[--, --, --],
132
+ [--, --, --],
133
+ [--, --, --]],
134
+ mask=[[ True, True, True],
135
+ [ True, True, True],
136
+ [ True, True, True]],
137
+ fill_value=1e+20,
138
+ dtype=float64)
139
+
140
+ The `dtype` parameter defines the underlying data type.
141
+
142
+ >>> a = ma.masked_all((3, 3))
143
+ >>> a.dtype
144
+ dtype('float64')
145
+ >>> a = ma.masked_all((3, 3), dtype=np.int32)
146
+ >>> a.dtype
147
+ dtype('int32')
148
+
149
+ """
150
+ a = masked_array(np.empty(shape, dtype),
151
+ mask=np.ones(shape, make_mask_descr(dtype)))
152
+ return a
153
+
154
+
155
+ def masked_all_like(arr):
156
+ """
157
+ Empty masked array with the properties of an existing array.
158
+
159
+ Return an empty masked array of the same shape and dtype as
160
+ the array `arr`, where all the data are masked.
161
+
162
+ Parameters
163
+ ----------
164
+ arr : ndarray
165
+ An array describing the shape and dtype of the required MaskedArray.
166
+
167
+ Returns
168
+ -------
169
+ a : MaskedArray
170
+ A masked array with all data masked.
171
+
172
+ Raises
173
+ ------
174
+ AttributeError
175
+ If `arr` doesn't have a shape attribute (i.e. not an ndarray)
176
+
177
+ See Also
178
+ --------
179
+ masked_all : Empty masked array with all elements masked.
180
+
181
+ Examples
182
+ --------
183
+ >>> import numpy.ma as ma
184
+ >>> arr = np.zeros((2, 3), dtype=np.float32)
185
+ >>> arr
186
+ array([[0., 0., 0.],
187
+ [0., 0., 0.]], dtype=float32)
188
+ >>> ma.masked_all_like(arr)
189
+ masked_array(
190
+ data=[[--, --, --],
191
+ [--, --, --]],
192
+ mask=[[ True, True, True],
193
+ [ True, True, True]],
194
+ fill_value=1e+20,
195
+ dtype=float32)
196
+
197
+ The dtype of the masked array matches the dtype of `arr`.
198
+
199
+ >>> arr.dtype
200
+ dtype('float32')
201
+ >>> ma.masked_all_like(arr).dtype
202
+ dtype('float32')
203
+
204
+ """
205
+ a = np.empty_like(arr).view(MaskedArray)
206
+ a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
207
+ return a
208
+
209
+
210
+ #####--------------------------------------------------------------------------
211
+ #---- --- Standard functions ---
212
+ #####--------------------------------------------------------------------------
213
+ class _fromnxfunction:
214
+ """
215
+ Defines a wrapper to adapt NumPy functions to masked arrays.
216
+
217
+
218
+ An instance of `_fromnxfunction` can be called with the same parameters
219
+ as the wrapped NumPy function. The docstring of `newfunc` is adapted from
220
+ the wrapped function as well, see `getdoc`.
221
+
222
+ This class should not be used directly. Instead, one of its extensions that
223
+ provides support for a specific type of input should be used.
224
+
225
+ Parameters
226
+ ----------
227
+ funcname : str
228
+ The name of the function to be adapted. The function should be
229
+ in the NumPy namespace (i.e. ``np.funcname``).
230
+
231
+ """
232
+
233
+ def __init__(self, funcname):
234
+ self.__name__ = funcname
235
+ self.__doc__ = self.getdoc()
236
+
237
+ def getdoc(self):
238
+ """
239
+ Retrieve the docstring and signature from the function.
240
+
241
+ The ``__doc__`` attribute of the function is used as the docstring for
242
+ the new masked array version of the function. A note on application
243
+ of the function to the mask is appended.
244
+
245
+ Parameters
246
+ ----------
247
+ None
248
+
249
+ """
250
+ npfunc = getattr(np, self.__name__, None)
251
+ doc = getattr(npfunc, '__doc__', None)
252
+ if doc:
253
+ sig = self.__name__ + ma.get_object_signature(npfunc)
254
+ doc = ma.doc_note(doc, "The function is applied to both the _data "
255
+ "and the _mask, if any.")
256
+ return '\n\n'.join((sig, doc))
257
+ return
258
+
259
+ def __call__(self, *args, **params):
260
+ pass
261
+
262
+
263
+ class _fromnxfunction_single(_fromnxfunction):
264
+ """
265
+ A version of `_fromnxfunction` that is called with a single array
266
+ argument followed by auxiliary args that are passed verbatim for
267
+ both the data and mask calls.
268
+ """
269
+ def __call__(self, x, *args, **params):
270
+ func = getattr(np, self.__name__)
271
+ if isinstance(x, ndarray):
272
+ _d = func(x.__array__(), *args, **params)
273
+ _m = func(getmaskarray(x), *args, **params)
274
+ return masked_array(_d, mask=_m)
275
+ else:
276
+ _d = func(np.asarray(x), *args, **params)
277
+ _m = func(getmaskarray(x), *args, **params)
278
+ return masked_array(_d, mask=_m)
279
+
280
+
281
+ class _fromnxfunction_seq(_fromnxfunction):
282
+ """
283
+ A version of `_fromnxfunction` that is called with a single sequence
284
+ of arrays followed by auxiliary args that are passed verbatim for
285
+ both the data and mask calls.
286
+ """
287
+ def __call__(self, x, *args, **params):
288
+ func = getattr(np, self.__name__)
289
+ _d = func(tuple([np.asarray(a) for a in x]), *args, **params)
290
+ _m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
291
+ return masked_array(_d, mask=_m)
292
+
293
+
294
+ class _fromnxfunction_args(_fromnxfunction):
295
+ """
296
+ A version of `_fromnxfunction` that is called with multiple array
297
+ arguments. The first non-array-like input marks the beginning of the
298
+ arguments that are passed verbatim for both the data and mask calls.
299
+ Array arguments are processed independently and the results are
300
+ returned in a list. If only one array is found, the return value is
301
+ just the processed array instead of a list.
302
+ """
303
+ def __call__(self, *args, **params):
304
+ func = getattr(np, self.__name__)
305
+ arrays = []
306
+ args = list(args)
307
+ while len(args) > 0 and issequence(args[0]):
308
+ arrays.append(args.pop(0))
309
+ res = []
310
+ for x in arrays:
311
+ _d = func(np.asarray(x), *args, **params)
312
+ _m = func(getmaskarray(x), *args, **params)
313
+ res.append(masked_array(_d, mask=_m))
314
+ if len(arrays) == 1:
315
+ return res[0]
316
+ return res
317
+
318
+
319
+ class _fromnxfunction_allargs(_fromnxfunction):
320
+ """
321
+ A version of `_fromnxfunction` that is called with multiple array
322
+ arguments. Similar to `_fromnxfunction_args` except that all args
323
+ are converted to arrays even if they are not so already. This makes
324
+ it possible to process scalars as 1-D arrays. Only keyword arguments
325
+ are passed through verbatim for the data and mask calls. Arrays
326
+ arguments are processed independently and the results are returned
327
+ in a list. If only one arg is present, the return value is just the
328
+ processed array instead of a list.
329
+ """
330
+ def __call__(self, *args, **params):
331
+ func = getattr(np, self.__name__)
332
+ res = []
333
+ for x in args:
334
+ _d = func(np.asarray(x), **params)
335
+ _m = func(getmaskarray(x), **params)
336
+ res.append(masked_array(_d, mask=_m))
337
+ if len(args) == 1:
338
+ return res[0]
339
+ return res
340
+
341
+
342
+ atleast_1d = _fromnxfunction_allargs('atleast_1d')
343
+ atleast_2d = _fromnxfunction_allargs('atleast_2d')
344
+ atleast_3d = _fromnxfunction_allargs('atleast_3d')
345
+
346
+ vstack = row_stack = _fromnxfunction_seq('vstack')
347
+ hstack = _fromnxfunction_seq('hstack')
348
+ column_stack = _fromnxfunction_seq('column_stack')
349
+ dstack = _fromnxfunction_seq('dstack')
350
+ stack = _fromnxfunction_seq('stack')
351
+
352
+ hsplit = _fromnxfunction_single('hsplit')
353
+
354
+ diagflat = _fromnxfunction_single('diagflat')
355
+
356
+
357
+ #####--------------------------------------------------------------------------
358
+ #----
359
+ #####--------------------------------------------------------------------------
360
+ def flatten_inplace(seq):
361
+ """Flatten a sequence in place."""
362
+ k = 0
363
+ while (k != len(seq)):
364
+ while hasattr(seq[k], '__iter__'):
365
+ seq[k:(k + 1)] = seq[k]
366
+ k += 1
367
+ return seq
368
+
369
+
370
+ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
371
+ """
372
+ (This docstring should be overwritten)
373
+ """
374
+ arr = array(arr, copy=False, subok=True)
375
+ nd = arr.ndim
376
+ axis = normalize_axis_index(axis, nd)
377
+ ind = [0] * (nd - 1)
378
+ i = np.zeros(nd, 'O')
379
+ indlist = list(range(nd))
380
+ indlist.remove(axis)
381
+ i[axis] = slice(None, None)
382
+ outshape = np.asarray(arr.shape).take(indlist)
383
+ i.put(indlist, ind)
384
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
385
+ # if res is a number, then we have a smaller output array
386
+ asscalar = np.isscalar(res)
387
+ if not asscalar:
388
+ try:
389
+ len(res)
390
+ except TypeError:
391
+ asscalar = True
392
+ # Note: we shouldn't set the dtype of the output from the first result
393
+ # so we force the type to object, and build a list of dtypes. We'll
394
+ # just take the largest, to avoid some downcasting
395
+ dtypes = []
396
+ if asscalar:
397
+ dtypes.append(np.asarray(res).dtype)
398
+ outarr = zeros(outshape, object)
399
+ outarr[tuple(ind)] = res
400
+ Ntot = np.prod(outshape)
401
+ k = 1
402
+ while k < Ntot:
403
+ # increment the index
404
+ ind[-1] += 1
405
+ n = -1
406
+ while (ind[n] >= outshape[n]) and (n > (1 - nd)):
407
+ ind[n - 1] += 1
408
+ ind[n] = 0
409
+ n -= 1
410
+ i.put(indlist, ind)
411
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
412
+ outarr[tuple(ind)] = res
413
+ dtypes.append(asarray(res).dtype)
414
+ k += 1
415
+ else:
416
+ res = array(res, copy=False, subok=True)
417
+ j = i.copy()
418
+ j[axis] = ([slice(None, None)] * res.ndim)
419
+ j.put(indlist, ind)
420
+ Ntot = np.prod(outshape)
421
+ holdshape = outshape
422
+ outshape = list(arr.shape)
423
+ outshape[axis] = res.shape
424
+ dtypes.append(asarray(res).dtype)
425
+ outshape = flatten_inplace(outshape)
426
+ outarr = zeros(outshape, object)
427
+ outarr[tuple(flatten_inplace(j.tolist()))] = res
428
+ k = 1
429
+ while k < Ntot:
430
+ # increment the index
431
+ ind[-1] += 1
432
+ n = -1
433
+ while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
434
+ ind[n - 1] += 1
435
+ ind[n] = 0
436
+ n -= 1
437
+ i.put(indlist, ind)
438
+ j.put(indlist, ind)
439
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
440
+ outarr[tuple(flatten_inplace(j.tolist()))] = res
441
+ dtypes.append(asarray(res).dtype)
442
+ k += 1
443
+ max_dtypes = np.dtype(np.asarray(dtypes).max())
444
+ if not hasattr(arr, '_mask'):
445
+ result = np.asarray(outarr, dtype=max_dtypes)
446
+ else:
447
+ result = asarray(outarr, dtype=max_dtypes)
448
+ result.fill_value = ma.default_fill_value(result)
449
+ return result
450
+ apply_along_axis.__doc__ = np.apply_along_axis.__doc__
451
+
452
+
453
+ def apply_over_axes(func, a, axes):
454
+ """
455
+ (This docstring will be overwritten)
456
+ """
457
+ val = asarray(a)
458
+ N = a.ndim
459
+ if array(axes).ndim == 0:
460
+ axes = (axes,)
461
+ for axis in axes:
462
+ if axis < 0:
463
+ axis = N + axis
464
+ args = (val, axis)
465
+ res = func(*args)
466
+ if res.ndim == val.ndim:
467
+ val = res
468
+ else:
469
+ res = ma.expand_dims(res, axis)
470
+ if res.ndim == val.ndim:
471
+ val = res
472
+ else:
473
+ raise ValueError("function is not returning "
474
+ "an array of the correct shape")
475
+ return val
476
+
477
+
478
+ if apply_over_axes.__doc__ is not None:
479
+ apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
480
+ :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
481
+ """
482
+
483
+ Examples
484
+ --------
485
+ >>> a = np.ma.arange(24).reshape(2,3,4)
486
+ >>> a[:,0,1] = np.ma.masked
487
+ >>> a[:,1,:] = np.ma.masked
488
+ >>> a
489
+ masked_array(
490
+ data=[[[0, --, 2, 3],
491
+ [--, --, --, --],
492
+ [8, 9, 10, 11]],
493
+ [[12, --, 14, 15],
494
+ [--, --, --, --],
495
+ [20, 21, 22, 23]]],
496
+ mask=[[[False, True, False, False],
497
+ [ True, True, True, True],
498
+ [False, False, False, False]],
499
+ [[False, True, False, False],
500
+ [ True, True, True, True],
501
+ [False, False, False, False]]],
502
+ fill_value=999999)
503
+ >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
504
+ masked_array(
505
+ data=[[[46],
506
+ [--],
507
+ [124]]],
508
+ mask=[[[False],
509
+ [ True],
510
+ [False]]],
511
+ fill_value=999999)
512
+
513
+ Tuple axis arguments to ufuncs are equivalent:
514
+
515
+ >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
516
+ masked_array(
517
+ data=[[[46],
518
+ [--],
519
+ [124]]],
520
+ mask=[[[False],
521
+ [ True],
522
+ [False]]],
523
+ fill_value=999999)
524
+ """
525
+
526
+
527
+ def average(a, axis=None, weights=None, returned=False, *,
528
+ keepdims=np._NoValue):
529
+ """
530
+ Return the weighted average of array over the given axis.
531
+
532
+ Parameters
533
+ ----------
534
+ a : array_like
535
+ Data to be averaged.
536
+ Masked entries are not taken into account in the computation.
537
+ axis : int, optional
538
+ Axis along which to average `a`. If None, averaging is done over
539
+ the flattened array.
540
+ weights : array_like, optional
541
+ The importance that each element has in the computation of the average.
542
+ The weights array can either be 1-D (in which case its length must be
543
+ the size of `a` along the given axis) or of the same shape as `a`.
544
+ If ``weights=None``, then all data in `a` are assumed to have a
545
+ weight equal to one. The 1-D calculation is::
546
+
547
+ avg = sum(a * weights) / sum(weights)
548
+
549
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
550
+ returned : bool, optional
551
+ Flag indicating whether a tuple ``(result, sum of weights)``
552
+ should be returned as output (True), or just the result (False).
553
+ Default is False.
554
+ keepdims : bool, optional
555
+ If this is set to True, the axes which are reduced are left
556
+ in the result as dimensions with size one. With this option,
557
+ the result will broadcast correctly against the original `a`.
558
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
559
+ or other classes whose methods do not support `keepdims`.
560
+
561
+ .. versionadded:: 1.23.0
562
+
563
+ Returns
564
+ -------
565
+ average, [sum_of_weights] : (tuple of) scalar or MaskedArray
566
+ The average along the specified axis. When returned is `True`,
567
+ return a tuple with the average as the first element and the sum
568
+ of the weights as the second element. The return type is `np.float64`
569
+ if `a` is of integer type and floats smaller than `float64`, or the
570
+ input data-type, otherwise. If returned, `sum_of_weights` is always
571
+ `float64`.
572
+
573
+ Examples
574
+ --------
575
+ >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
576
+ >>> np.ma.average(a, weights=[3, 1, 0, 0])
577
+ 1.25
578
+
579
+ >>> x = np.ma.arange(6.).reshape(3, 2)
580
+ >>> x
581
+ masked_array(
582
+ data=[[0., 1.],
583
+ [2., 3.],
584
+ [4., 5.]],
585
+ mask=False,
586
+ fill_value=1e+20)
587
+ >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
588
+ ... returned=True)
589
+ >>> avg
590
+ masked_array(data=[2.6666666666666665, 3.6666666666666665],
591
+ mask=[False, False],
592
+ fill_value=1e+20)
593
+
594
+ With ``keepdims=True``, the following result has shape (3, 1).
595
+
596
+ >>> np.ma.average(x, axis=1, keepdims=True)
597
+ masked_array(
598
+ data=[[0.5],
599
+ [2.5],
600
+ [4.5]],
601
+ mask=False,
602
+ fill_value=1e+20)
603
+ """
604
+ a = asarray(a)
605
+ m = getmask(a)
606
+
607
+ # inspired by 'average' in numpy/lib/function_base.py
608
+
609
+ if keepdims is np._NoValue:
610
+ # Don't pass on the keepdims argument if one wasn't given.
611
+ keepdims_kw = {}
612
+ else:
613
+ keepdims_kw = {'keepdims': keepdims}
614
+
615
+ if weights is None:
616
+ avg = a.mean(axis, **keepdims_kw)
617
+ scl = avg.dtype.type(a.count(axis))
618
+ else:
619
+ wgt = asarray(weights)
620
+
621
+ if issubclass(a.dtype.type, (np.integer, np.bool_)):
622
+ result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
623
+ else:
624
+ result_dtype = np.result_type(a.dtype, wgt.dtype)
625
+
626
+ # Sanity checks
627
+ if a.shape != wgt.shape:
628
+ if axis is None:
629
+ raise TypeError(
630
+ "Axis must be specified when shapes of a and weights "
631
+ "differ.")
632
+ if wgt.ndim != 1:
633
+ raise TypeError(
634
+ "1D weights expected when shapes of a and weights differ.")
635
+ if wgt.shape[0] != a.shape[axis]:
636
+ raise ValueError(
637
+ "Length of weights not compatible with specified axis.")
638
+
639
+ # setup wgt to broadcast along axis
640
+ wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True)
641
+ wgt = wgt.swapaxes(-1, axis)
642
+
643
+ if m is not nomask:
644
+ wgt = wgt*(~a.mask)
645
+ wgt.mask |= a.mask
646
+
647
+ scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw)
648
+ avg = np.multiply(a, wgt,
649
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
650
+
651
+ if returned:
652
+ if scl.shape != avg.shape:
653
+ scl = np.broadcast_to(scl, avg.shape).copy()
654
+ return avg, scl
655
+ else:
656
+ return avg
657
+
658
+
659
+ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
660
+ """
661
+ Compute the median along the specified axis.
662
+
663
+ Returns the median of the array elements.
664
+
665
+ Parameters
666
+ ----------
667
+ a : array_like
668
+ Input array or object that can be converted to an array.
669
+ axis : int, optional
670
+ Axis along which the medians are computed. The default (None) is
671
+ to compute the median along a flattened version of the array.
672
+ out : ndarray, optional
673
+ Alternative output array in which to place the result. It must
674
+ have the same shape and buffer length as the expected output
675
+ but the type will be cast if necessary.
676
+ overwrite_input : bool, optional
677
+ If True, then allow use of memory of input array (a) for
678
+ calculations. The input array will be modified by the call to
679
+ median. This will save memory when you do not need to preserve
680
+ the contents of the input array. Treat the input as undefined,
681
+ but it will probably be fully or partially sorted. Default is
682
+ False. Note that, if `overwrite_input` is True, and the input
683
+ is not already an `ndarray`, an error will be raised.
684
+ keepdims : bool, optional
685
+ If this is set to True, the axes which are reduced are left
686
+ in the result as dimensions with size one. With this option,
687
+ the result will broadcast correctly against the input array.
688
+
689
+ .. versionadded:: 1.10.0
690
+
691
+ Returns
692
+ -------
693
+ median : ndarray
694
+ A new array holding the result is returned unless out is
695
+ specified, in which case a reference to out is returned.
696
+ Return data-type is `float64` for integers and floats smaller than
697
+ `float64`, or the input data-type, otherwise.
698
+
699
+ See Also
700
+ --------
701
+ mean
702
+
703
+ Notes
704
+ -----
705
+ Given a vector ``V`` with ``N`` non masked values, the median of ``V``
706
+ is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
707
+ ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
708
+ when ``N`` is even.
709
+
710
+ Examples
711
+ --------
712
+ >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
713
+ >>> np.ma.median(x)
714
+ 1.5
715
+
716
+ >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
717
+ >>> np.ma.median(x)
718
+ 2.5
719
+ >>> np.ma.median(x, axis=-1, overwrite_input=True)
720
+ masked_array(data=[2.0, 5.0],
721
+ mask=[False, False],
722
+ fill_value=1e+20)
723
+
724
+ """
725
+ if not hasattr(a, 'mask'):
726
+ m = np.median(getdata(a, subok=True), axis=axis,
727
+ out=out, overwrite_input=overwrite_input,
728
+ keepdims=keepdims)
729
+ if isinstance(m, np.ndarray) and 1 <= m.ndim:
730
+ return masked_array(m, copy=False)
731
+ else:
732
+ return m
733
+
734
+ return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
735
+ overwrite_input=overwrite_input)
736
+
737
+
738
+ def _median(a, axis=None, out=None, overwrite_input=False):
739
+ # when an unmasked NaN is present return it, so we need to sort the NaN
740
+ # values behind the mask
741
+ if np.issubdtype(a.dtype, np.inexact):
742
+ fill_value = np.inf
743
+ else:
744
+ fill_value = None
745
+ if overwrite_input:
746
+ if axis is None:
747
+ asorted = a.ravel()
748
+ asorted.sort(fill_value=fill_value)
749
+ else:
750
+ a.sort(axis=axis, fill_value=fill_value)
751
+ asorted = a
752
+ else:
753
+ asorted = sort(a, axis=axis, fill_value=fill_value)
754
+
755
+ if axis is None:
756
+ axis = 0
757
+ else:
758
+ axis = normalize_axis_index(axis, asorted.ndim)
759
+
760
+ if asorted.shape[axis] == 0:
761
+ # for empty axis integer indices fail so use slicing to get same result
762
+ # as median (which is mean of empty slice = nan)
763
+ indexer = [slice(None)] * asorted.ndim
764
+ indexer[axis] = slice(0, 0)
765
+ indexer = tuple(indexer)
766
+ return np.ma.mean(asorted[indexer], axis=axis, out=out)
767
+
768
+ if asorted.ndim == 1:
769
+ idx, odd = divmod(count(asorted), 2)
770
+ mid = asorted[idx + odd - 1:idx + 1]
771
+ if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
772
+ # avoid inf / x = masked
773
+ s = mid.sum(out=out)
774
+ if not odd:
775
+ s = np.true_divide(s, 2., casting='safe', out=out)
776
+ s = np.lib.utils._median_nancheck(asorted, s, axis)
777
+ else:
778
+ s = mid.mean(out=out)
779
+
780
+ # if result is masked either the input contained enough
781
+ # minimum_fill_value so that it would be the median or all values
782
+ # masked
783
+ if np.ma.is_masked(s) and not np.all(asorted.mask):
784
+ return np.ma.minimum_fill_value(asorted)
785
+ return s
786
+
787
+ counts = count(asorted, axis=axis, keepdims=True)
788
+ h = counts // 2
789
+
790
+ # duplicate high if odd number of elements so mean does nothing
791
+ odd = counts % 2 == 1
792
+ l = np.where(odd, h, h-1)
793
+
794
+ lh = np.concatenate([l,h], axis=axis)
795
+
796
+ # get low and high median
797
+ low_high = np.take_along_axis(asorted, lh, axis=axis)
798
+
799
+ def replace_masked(s):
800
+ # Replace masked entries with minimum_full_value unless it all values
801
+ # are masked. This is required as the sort order of values equal or
802
+ # larger than the fill value is undefined and a valid value placed
803
+ # elsewhere, e.g. [4, --, inf].
804
+ if np.ma.is_masked(s):
805
+ rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
806
+ s.data[rep] = np.ma.minimum_fill_value(asorted)
807
+ s.mask[rep] = False
808
+
809
+ replace_masked(low_high)
810
+
811
+ if np.issubdtype(asorted.dtype, np.inexact):
812
+ # avoid inf / x = masked
813
+ s = np.ma.sum(low_high, axis=axis, out=out)
814
+ np.true_divide(s.data, 2., casting='unsafe', out=s.data)
815
+
816
+ s = np.lib.utils._median_nancheck(asorted, s, axis)
817
+ else:
818
+ s = np.ma.mean(low_high, axis=axis, out=out)
819
+
820
+ return s
821
+
822
+
823
+ def compress_nd(x, axis=None):
824
+ """Suppress slices from multiple dimensions which contain masked values.
825
+
826
+ Parameters
827
+ ----------
828
+ x : array_like, MaskedArray
829
+ The array to operate on. If not a MaskedArray instance (or if no array
830
+ elements are masked), `x` is interpreted as a MaskedArray with `mask`
831
+ set to `nomask`.
832
+ axis : tuple of ints or int, optional
833
+ Which dimensions to suppress slices from can be configured with this
834
+ parameter.
835
+ - If axis is a tuple of ints, those are the axes to suppress slices from.
836
+ - If axis is an int, then that is the only axis to suppress slices from.
837
+ - If axis is None, all axis are selected.
838
+
839
+ Returns
840
+ -------
841
+ compress_array : ndarray
842
+ The compressed array.
843
+ """
844
+ x = asarray(x)
845
+ m = getmask(x)
846
+ # Set axis to tuple of ints
847
+ if axis is None:
848
+ axis = tuple(range(x.ndim))
849
+ else:
850
+ axis = normalize_axis_tuple(axis, x.ndim)
851
+
852
+ # Nothing is masked: return x
853
+ if m is nomask or not m.any():
854
+ return x._data
855
+ # All is masked: return empty
856
+ if m.all():
857
+ return nxarray([])
858
+ # Filter elements through boolean indexing
859
+ data = x._data
860
+ for ax in axis:
861
+ axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
862
+ data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
863
+ return data
864
+
865
+
866
+ def compress_rowcols(x, axis=None):
867
+ """
868
+ Suppress the rows and/or columns of a 2-D array that contain
869
+ masked values.
870
+
871
+ The suppression behavior is selected with the `axis` parameter.
872
+
873
+ - If axis is None, both rows and columns are suppressed.
874
+ - If axis is 0, only rows are suppressed.
875
+ - If axis is 1 or -1, only columns are suppressed.
876
+
877
+ Parameters
878
+ ----------
879
+ x : array_like, MaskedArray
880
+ The array to operate on. If not a MaskedArray instance (or if no array
881
+ elements are masked), `x` is interpreted as a MaskedArray with
882
+ `mask` set to `nomask`. Must be a 2D array.
883
+ axis : int, optional
884
+ Axis along which to perform the operation. Default is None.
885
+
886
+ Returns
887
+ -------
888
+ compressed_array : ndarray
889
+ The compressed array.
890
+
891
+ Examples
892
+ --------
893
+ >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
894
+ ... [1, 0, 0],
895
+ ... [0, 0, 0]])
896
+ >>> x
897
+ masked_array(
898
+ data=[[--, 1, 2],
899
+ [--, 4, 5],
900
+ [6, 7, 8]],
901
+ mask=[[ True, False, False],
902
+ [ True, False, False],
903
+ [False, False, False]],
904
+ fill_value=999999)
905
+
906
+ >>> np.ma.compress_rowcols(x)
907
+ array([[7, 8]])
908
+ >>> np.ma.compress_rowcols(x, 0)
909
+ array([[6, 7, 8]])
910
+ >>> np.ma.compress_rowcols(x, 1)
911
+ array([[1, 2],
912
+ [4, 5],
913
+ [7, 8]])
914
+
915
+ """
916
+ if asarray(x).ndim != 2:
917
+ raise NotImplementedError("compress_rowcols works for 2D arrays only.")
918
+ return compress_nd(x, axis=axis)
919
+
920
+
921
+ def compress_rows(a):
922
+ """
923
+ Suppress whole rows of a 2-D array that contain masked values.
924
+
925
+ This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
926
+ `compress_rowcols` for details.
927
+
928
+ See Also
929
+ --------
930
+ compress_rowcols
931
+
932
+ """
933
+ a = asarray(a)
934
+ if a.ndim != 2:
935
+ raise NotImplementedError("compress_rows works for 2D arrays only.")
936
+ return compress_rowcols(a, 0)
937
+
938
+
939
+ def compress_cols(a):
940
+ """
941
+ Suppress whole columns of a 2-D array that contain masked values.
942
+
943
+ This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
944
+ `compress_rowcols` for details.
945
+
946
+ See Also
947
+ --------
948
+ compress_rowcols
949
+
950
+ """
951
+ a = asarray(a)
952
+ if a.ndim != 2:
953
+ raise NotImplementedError("compress_cols works for 2D arrays only.")
954
+ return compress_rowcols(a, 1)
955
+
956
+
957
+ def mask_rowcols(a, axis=None):
958
+ """
959
+ Mask rows and/or columns of a 2D array that contain masked values.
960
+
961
+ Mask whole rows and/or columns of a 2D array that contain
962
+ masked values. The masking behavior is selected using the
963
+ `axis` parameter.
964
+
965
+ - If `axis` is None, rows *and* columns are masked.
966
+ - If `axis` is 0, only rows are masked.
967
+ - If `axis` is 1 or -1, only columns are masked.
968
+
969
+ Parameters
970
+ ----------
971
+ a : array_like, MaskedArray
972
+ The array to mask. If not a MaskedArray instance (or if no array
973
+ elements are masked), the result is a MaskedArray with `mask` set
974
+ to `nomask` (False). Must be a 2D array.
975
+ axis : int, optional
976
+ Axis along which to perform the operation. If None, applies to a
977
+ flattened version of the array.
978
+
979
+ Returns
980
+ -------
981
+ a : MaskedArray
982
+ A modified version of the input array, masked depending on the value
983
+ of the `axis` parameter.
984
+
985
+ Raises
986
+ ------
987
+ NotImplementedError
988
+ If input array `a` is not 2D.
989
+
990
+ See Also
991
+ --------
992
+ mask_rows : Mask rows of a 2D array that contain masked values.
993
+ mask_cols : Mask cols of a 2D array that contain masked values.
994
+ masked_where : Mask where a condition is met.
995
+
996
+ Notes
997
+ -----
998
+ The input array's mask is modified by this function.
999
+
1000
+ Examples
1001
+ --------
1002
+ >>> import numpy.ma as ma
1003
+ >>> a = np.zeros((3, 3), dtype=int)
1004
+ >>> a[1, 1] = 1
1005
+ >>> a
1006
+ array([[0, 0, 0],
1007
+ [0, 1, 0],
1008
+ [0, 0, 0]])
1009
+ >>> a = ma.masked_equal(a, 1)
1010
+ >>> a
1011
+ masked_array(
1012
+ data=[[0, 0, 0],
1013
+ [0, --, 0],
1014
+ [0, 0, 0]],
1015
+ mask=[[False, False, False],
1016
+ [False, True, False],
1017
+ [False, False, False]],
1018
+ fill_value=1)
1019
+ >>> ma.mask_rowcols(a)
1020
+ masked_array(
1021
+ data=[[0, --, 0],
1022
+ [--, --, --],
1023
+ [0, --, 0]],
1024
+ mask=[[False, True, False],
1025
+ [ True, True, True],
1026
+ [False, True, False]],
1027
+ fill_value=1)
1028
+
1029
+ """
1030
+ a = array(a, subok=False)
1031
+ if a.ndim != 2:
1032
+ raise NotImplementedError("mask_rowcols works for 2D arrays only.")
1033
+ m = getmask(a)
1034
+ # Nothing is masked: return a
1035
+ if m is nomask or not m.any():
1036
+ return a
1037
+ maskedval = m.nonzero()
1038
+ a._mask = a._mask.copy()
1039
+ if not axis:
1040
+ a[np.unique(maskedval[0])] = masked
1041
+ if axis in [None, 1, -1]:
1042
+ a[:, np.unique(maskedval[1])] = masked
1043
+ return a
1044
+
1045
+
1046
+ def mask_rows(a, axis=np._NoValue):
1047
+ """
1048
+ Mask rows of a 2D array that contain masked values.
1049
+
1050
+ This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
1051
+
1052
+ See Also
1053
+ --------
1054
+ mask_rowcols : Mask rows and/or columns of a 2D array.
1055
+ masked_where : Mask where a condition is met.
1056
+
1057
+ Examples
1058
+ --------
1059
+ >>> import numpy.ma as ma
1060
+ >>> a = np.zeros((3, 3), dtype=int)
1061
+ >>> a[1, 1] = 1
1062
+ >>> a
1063
+ array([[0, 0, 0],
1064
+ [0, 1, 0],
1065
+ [0, 0, 0]])
1066
+ >>> a = ma.masked_equal(a, 1)
1067
+ >>> a
1068
+ masked_array(
1069
+ data=[[0, 0, 0],
1070
+ [0, --, 0],
1071
+ [0, 0, 0]],
1072
+ mask=[[False, False, False],
1073
+ [False, True, False],
1074
+ [False, False, False]],
1075
+ fill_value=1)
1076
+
1077
+ >>> ma.mask_rows(a)
1078
+ masked_array(
1079
+ data=[[0, 0, 0],
1080
+ [--, --, --],
1081
+ [0, 0, 0]],
1082
+ mask=[[False, False, False],
1083
+ [ True, True, True],
1084
+ [False, False, False]],
1085
+ fill_value=1)
1086
+
1087
+ """
1088
+ if axis is not np._NoValue:
1089
+ # remove the axis argument when this deprecation expires
1090
+ # NumPy 1.18.0, 2019-11-28
1091
+ warnings.warn(
1092
+ "The axis argument has always been ignored, in future passing it "
1093
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
1094
+ return mask_rowcols(a, 0)
1095
+
1096
+
1097
+ def mask_cols(a, axis=np._NoValue):
1098
+ """
1099
+ Mask columns of a 2D array that contain masked values.
1100
+
1101
+ This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
1102
+
1103
+ See Also
1104
+ --------
1105
+ mask_rowcols : Mask rows and/or columns of a 2D array.
1106
+ masked_where : Mask where a condition is met.
1107
+
1108
+ Examples
1109
+ --------
1110
+ >>> import numpy.ma as ma
1111
+ >>> a = np.zeros((3, 3), dtype=int)
1112
+ >>> a[1, 1] = 1
1113
+ >>> a
1114
+ array([[0, 0, 0],
1115
+ [0, 1, 0],
1116
+ [0, 0, 0]])
1117
+ >>> a = ma.masked_equal(a, 1)
1118
+ >>> a
1119
+ masked_array(
1120
+ data=[[0, 0, 0],
1121
+ [0, --, 0],
1122
+ [0, 0, 0]],
1123
+ mask=[[False, False, False],
1124
+ [False, True, False],
1125
+ [False, False, False]],
1126
+ fill_value=1)
1127
+ >>> ma.mask_cols(a)
1128
+ masked_array(
1129
+ data=[[0, --, 0],
1130
+ [0, --, 0],
1131
+ [0, --, 0]],
1132
+ mask=[[False, True, False],
1133
+ [False, True, False],
1134
+ [False, True, False]],
1135
+ fill_value=1)
1136
+
1137
+ """
1138
+ if axis is not np._NoValue:
1139
+ # remove the axis argument when this deprecation expires
1140
+ # NumPy 1.18.0, 2019-11-28
1141
+ warnings.warn(
1142
+ "The axis argument has always been ignored, in future passing it "
1143
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
1144
+ return mask_rowcols(a, 1)
1145
+
1146
+
1147
+ #####--------------------------------------------------------------------------
1148
+ #---- --- arraysetops ---
1149
+ #####--------------------------------------------------------------------------
1150
+
1151
+ def ediff1d(arr, to_end=None, to_begin=None):
1152
+ """
1153
+ Compute the differences between consecutive elements of an array.
1154
+
1155
+ This function is the equivalent of `numpy.ediff1d` that takes masked
1156
+ values into account, see `numpy.ediff1d` for details.
1157
+
1158
+ See Also
1159
+ --------
1160
+ numpy.ediff1d : Equivalent function for ndarrays.
1161
+
1162
+ """
1163
+ arr = ma.asanyarray(arr).flat
1164
+ ed = arr[1:] - arr[:-1]
1165
+ arrays = [ed]
1166
+ #
1167
+ if to_begin is not None:
1168
+ arrays.insert(0, to_begin)
1169
+ if to_end is not None:
1170
+ arrays.append(to_end)
1171
+ #
1172
+ if len(arrays) != 1:
1173
+ # We'll save ourselves a copy of a potentially large array in the common
1174
+ # case where neither to_begin or to_end was given.
1175
+ ed = hstack(arrays)
1176
+ #
1177
+ return ed
1178
+
1179
+
1180
+ def unique(ar1, return_index=False, return_inverse=False):
1181
+ """
1182
+ Finds the unique elements of an array.
1183
+
1184
+ Masked values are considered the same element (masked). The output array
1185
+ is always a masked array. See `numpy.unique` for more details.
1186
+
1187
+ See Also
1188
+ --------
1189
+ numpy.unique : Equivalent function for ndarrays.
1190
+
1191
+ Examples
1192
+ --------
1193
+ >>> import numpy.ma as ma
1194
+ >>> a = [1, 2, 1000, 2, 3]
1195
+ >>> mask = [0, 0, 1, 0, 0]
1196
+ >>> masked_a = ma.masked_array(a, mask)
1197
+ >>> masked_a
1198
+ masked_array(data=[1, 2, --, 2, 3],
1199
+ mask=[False, False, True, False, False],
1200
+ fill_value=999999)
1201
+ >>> ma.unique(masked_a)
1202
+ masked_array(data=[1, 2, 3, --],
1203
+ mask=[False, False, False, True],
1204
+ fill_value=999999)
1205
+ >>> ma.unique(masked_a, return_index=True)
1206
+ (masked_array(data=[1, 2, 3, --],
1207
+ mask=[False, False, False, True],
1208
+ fill_value=999999), array([0, 1, 4, 2]))
1209
+ >>> ma.unique(masked_a, return_inverse=True)
1210
+ (masked_array(data=[1, 2, 3, --],
1211
+ mask=[False, False, False, True],
1212
+ fill_value=999999), array([0, 1, 3, 1, 2]))
1213
+ >>> ma.unique(masked_a, return_index=True, return_inverse=True)
1214
+ (masked_array(data=[1, 2, 3, --],
1215
+ mask=[False, False, False, True],
1216
+ fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2]))
1217
+ """
1218
+ output = np.unique(ar1,
1219
+ return_index=return_index,
1220
+ return_inverse=return_inverse)
1221
+ if isinstance(output, tuple):
1222
+ output = list(output)
1223
+ output[0] = output[0].view(MaskedArray)
1224
+ output = tuple(output)
1225
+ else:
1226
+ output = output.view(MaskedArray)
1227
+ return output
1228
+
1229
+
1230
+ def intersect1d(ar1, ar2, assume_unique=False):
1231
+ """
1232
+ Returns the unique elements common to both arrays.
1233
+
1234
+ Masked values are considered equal one to the other.
1235
+ The output is always a masked array.
1236
+
1237
+ See `numpy.intersect1d` for more details.
1238
+
1239
+ See Also
1240
+ --------
1241
+ numpy.intersect1d : Equivalent function for ndarrays.
1242
+
1243
+ Examples
1244
+ --------
1245
+ >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
1246
+ >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
1247
+ >>> np.ma.intersect1d(x, y)
1248
+ masked_array(data=[1, 3, --],
1249
+ mask=[False, False, True],
1250
+ fill_value=999999)
1251
+
1252
+ """
1253
+ if assume_unique:
1254
+ aux = ma.concatenate((ar1, ar2))
1255
+ else:
1256
+ # Might be faster than unique( intersect1d( ar1, ar2 ) )?
1257
+ aux = ma.concatenate((unique(ar1), unique(ar2)))
1258
+ aux.sort()
1259
+ return aux[:-1][aux[1:] == aux[:-1]]
1260
+
1261
+
1262
+ def setxor1d(ar1, ar2, assume_unique=False):
1263
+ """
1264
+ Set exclusive-or of 1-D arrays with unique elements.
1265
+
1266
+ The output is always a masked array. See `numpy.setxor1d` for more details.
1267
+
1268
+ See Also
1269
+ --------
1270
+ numpy.setxor1d : Equivalent function for ndarrays.
1271
+
1272
+ """
1273
+ if not assume_unique:
1274
+ ar1 = unique(ar1)
1275
+ ar2 = unique(ar2)
1276
+
1277
+ aux = ma.concatenate((ar1, ar2))
1278
+ if aux.size == 0:
1279
+ return aux
1280
+ aux.sort()
1281
+ auxf = aux.filled()
1282
+ # flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
1283
+ flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
1284
+ # flag2 = ediff1d( flag ) == 0
1285
+ flag2 = (flag[1:] == flag[:-1])
1286
+ return aux[flag2]
1287
+
1288
+
1289
+ def in1d(ar1, ar2, assume_unique=False, invert=False):
1290
+ """
1291
+ Test whether each element of an array is also present in a second
1292
+ array.
1293
+
1294
+ The output is always a masked array. See `numpy.in1d` for more details.
1295
+
1296
+ We recommend using :func:`isin` instead of `in1d` for new code.
1297
+
1298
+ See Also
1299
+ --------
1300
+ isin : Version of this function that preserves the shape of ar1.
1301
+ numpy.in1d : Equivalent function for ndarrays.
1302
+
1303
+ Notes
1304
+ -----
1305
+ .. versionadded:: 1.4.0
1306
+
1307
+ """
1308
+ if not assume_unique:
1309
+ ar1, rev_idx = unique(ar1, return_inverse=True)
1310
+ ar2 = unique(ar2)
1311
+
1312
+ ar = ma.concatenate((ar1, ar2))
1313
+ # We need this to be a stable sort, so always use 'mergesort'
1314
+ # here. The values from the first array should always come before
1315
+ # the values from the second array.
1316
+ order = ar.argsort(kind='mergesort')
1317
+ sar = ar[order]
1318
+ if invert:
1319
+ bool_ar = (sar[1:] != sar[:-1])
1320
+ else:
1321
+ bool_ar = (sar[1:] == sar[:-1])
1322
+ flag = ma.concatenate((bool_ar, [invert]))
1323
+ indx = order.argsort(kind='mergesort')[:len(ar1)]
1324
+
1325
+ if assume_unique:
1326
+ return flag[indx]
1327
+ else:
1328
+ return flag[indx][rev_idx]
1329
+
1330
+
1331
+ def isin(element, test_elements, assume_unique=False, invert=False):
1332
+ """
1333
+ Calculates `element in test_elements`, broadcasting over
1334
+ `element` only.
1335
+
1336
+ The output is always a masked array of the same shape as `element`.
1337
+ See `numpy.isin` for more details.
1338
+
1339
+ See Also
1340
+ --------
1341
+ in1d : Flattened version of this function.
1342
+ numpy.isin : Equivalent function for ndarrays.
1343
+
1344
+ Notes
1345
+ -----
1346
+ .. versionadded:: 1.13.0
1347
+
1348
+ """
1349
+ element = ma.asarray(element)
1350
+ return in1d(element, test_elements, assume_unique=assume_unique,
1351
+ invert=invert).reshape(element.shape)
1352
+
1353
+
1354
+ def union1d(ar1, ar2):
1355
+ """
1356
+ Union of two arrays.
1357
+
1358
+ The output is always a masked array. See `numpy.union1d` for more details.
1359
+
1360
+ See Also
1361
+ --------
1362
+ numpy.union1d : Equivalent function for ndarrays.
1363
+
1364
+ """
1365
+ return unique(ma.concatenate((ar1, ar2), axis=None))
1366
+
1367
+
1368
+ def setdiff1d(ar1, ar2, assume_unique=False):
1369
+ """
1370
+ Set difference of 1D arrays with unique elements.
1371
+
1372
+ The output is always a masked array. See `numpy.setdiff1d` for more
1373
+ details.
1374
+
1375
+ See Also
1376
+ --------
1377
+ numpy.setdiff1d : Equivalent function for ndarrays.
1378
+
1379
+ Examples
1380
+ --------
1381
+ >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
1382
+ >>> np.ma.setdiff1d(x, [1, 2])
1383
+ masked_array(data=[3, --],
1384
+ mask=[False, True],
1385
+ fill_value=999999)
1386
+
1387
+ """
1388
+ if assume_unique:
1389
+ ar1 = ma.asarray(ar1).ravel()
1390
+ else:
1391
+ ar1 = unique(ar1)
1392
+ ar2 = unique(ar2)
1393
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
1394
+
1395
+
1396
+ ###############################################################################
1397
+ # Covariance #
1398
+ ###############################################################################
1399
+
1400
+
1401
+ def _covhelper(x, y=None, rowvar=True, allow_masked=True):
1402
+ """
1403
+ Private function for the computation of covariance and correlation
1404
+ coefficients.
1405
+
1406
+ """
1407
+ x = ma.array(x, ndmin=2, copy=True, dtype=float)
1408
+ xmask = ma.getmaskarray(x)
1409
+ # Quick exit if we can't process masked data
1410
+ if not allow_masked and xmask.any():
1411
+ raise ValueError("Cannot process masked data.")
1412
+ #
1413
+ if x.shape[0] == 1:
1414
+ rowvar = True
1415
+ # Make sure that rowvar is either 0 or 1
1416
+ rowvar = int(bool(rowvar))
1417
+ axis = 1 - rowvar
1418
+ if rowvar:
1419
+ tup = (slice(None), None)
1420
+ else:
1421
+ tup = (None, slice(None))
1422
+ #
1423
+ if y is None:
1424
+ xnotmask = np.logical_not(xmask).astype(int)
1425
+ else:
1426
+ y = array(y, copy=False, ndmin=2, dtype=float)
1427
+ ymask = ma.getmaskarray(y)
1428
+ if not allow_masked and ymask.any():
1429
+ raise ValueError("Cannot process masked data.")
1430
+ if xmask.any() or ymask.any():
1431
+ if y.shape == x.shape:
1432
+ # Define some common mask
1433
+ common_mask = np.logical_or(xmask, ymask)
1434
+ if common_mask is not nomask:
1435
+ xmask = x._mask = y._mask = ymask = common_mask
1436
+ x._sharedmask = False
1437
+ y._sharedmask = False
1438
+ x = ma.concatenate((x, y), axis)
1439
+ xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
1440
+ x -= x.mean(axis=rowvar)[tup]
1441
+ return (x, xnotmask, rowvar)
1442
+
1443
+
1444
+ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
1445
+ """
1446
+ Estimate the covariance matrix.
1447
+
1448
+ Except for the handling of missing data this function does the same as
1449
+ `numpy.cov`. For more details and examples, see `numpy.cov`.
1450
+
1451
+ By default, masked values are recognized as such. If `x` and `y` have the
1452
+ same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
1453
+ ``y[i,j]`` will also be masked.
1454
+ Setting `allow_masked` to False will raise an exception if values are
1455
+ missing in either of the input arrays.
1456
+
1457
+ Parameters
1458
+ ----------
1459
+ x : array_like
1460
+ A 1-D or 2-D array containing multiple variables and observations.
1461
+ Each row of `x` represents a variable, and each column a single
1462
+ observation of all those variables. Also see `rowvar` below.
1463
+ y : array_like, optional
1464
+ An additional set of variables and observations. `y` has the same
1465
+ shape as `x`.
1466
+ rowvar : bool, optional
1467
+ If `rowvar` is True (default), then each row represents a
1468
+ variable, with observations in the columns. Otherwise, the relationship
1469
+ is transposed: each column represents a variable, while the rows
1470
+ contain observations.
1471
+ bias : bool, optional
1472
+ Default normalization (False) is by ``(N-1)``, where ``N`` is the
1473
+ number of observations given (unbiased estimate). If `bias` is True,
1474
+ then normalization is by ``N``. This keyword can be overridden by
1475
+ the keyword ``ddof`` in numpy versions >= 1.5.
1476
+ allow_masked : bool, optional
1477
+ If True, masked values are propagated pair-wise: if a value is masked
1478
+ in `x`, the corresponding value is masked in `y`.
1479
+ If False, raises a `ValueError` exception when some values are missing.
1480
+ ddof : {None, int}, optional
1481
+ If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
1482
+ the number of observations; this overrides the value implied by
1483
+ ``bias``. The default value is ``None``.
1484
+
1485
+ .. versionadded:: 1.5
1486
+
1487
+ Raises
1488
+ ------
1489
+ ValueError
1490
+ Raised if some values are missing and `allow_masked` is False.
1491
+
1492
+ See Also
1493
+ --------
1494
+ numpy.cov
1495
+
1496
+ """
1497
+ # Check inputs
1498
+ if ddof is not None and ddof != int(ddof):
1499
+ raise ValueError("ddof must be an integer")
1500
+ # Set up ddof
1501
+ if ddof is None:
1502
+ if bias:
1503
+ ddof = 0
1504
+ else:
1505
+ ddof = 1
1506
+
1507
+ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
1508
+ if not rowvar:
1509
+ fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
1510
+ result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
1511
+ else:
1512
+ fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
1513
+ result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
1514
+ return result
1515
+
1516
+
1517
+ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
1518
+ ddof=np._NoValue):
1519
+ """
1520
+ Return Pearson product-moment correlation coefficients.
1521
+
1522
+ Except for the handling of missing data this function does the same as
1523
+ `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
1524
+
1525
+ Parameters
1526
+ ----------
1527
+ x : array_like
1528
+ A 1-D or 2-D array containing multiple variables and observations.
1529
+ Each row of `x` represents a variable, and each column a single
1530
+ observation of all those variables. Also see `rowvar` below.
1531
+ y : array_like, optional
1532
+ An additional set of variables and observations. `y` has the same
1533
+ shape as `x`.
1534
+ rowvar : bool, optional
1535
+ If `rowvar` is True (default), then each row represents a
1536
+ variable, with observations in the columns. Otherwise, the relationship
1537
+ is transposed: each column represents a variable, while the rows
1538
+ contain observations.
1539
+ bias : _NoValue, optional
1540
+ Has no effect, do not use.
1541
+
1542
+ .. deprecated:: 1.10.0
1543
+ allow_masked : bool, optional
1544
+ If True, masked values are propagated pair-wise: if a value is masked
1545
+ in `x`, the corresponding value is masked in `y`.
1546
+ If False, raises an exception. Because `bias` is deprecated, this
1547
+ argument needs to be treated as keyword only to avoid a warning.
1548
+ ddof : _NoValue, optional
1549
+ Has no effect, do not use.
1550
+
1551
+ .. deprecated:: 1.10.0
1552
+
1553
+ See Also
1554
+ --------
1555
+ numpy.corrcoef : Equivalent function in top-level NumPy module.
1556
+ cov : Estimate the covariance matrix.
1557
+
1558
+ Notes
1559
+ -----
1560
+ This function accepts but discards arguments `bias` and `ddof`. This is
1561
+ for backwards compatibility with previous versions of this function. These
1562
+ arguments had no effect on the return values of the function and can be
1563
+ safely ignored in this and previous versions of numpy.
1564
+ """
1565
+ msg = 'bias and ddof have no effect and are deprecated'
1566
+ if bias is not np._NoValue or ddof is not np._NoValue:
1567
+ # 2015-03-15, 1.10
1568
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
1569
+ # Get the data
1570
+ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
1571
+ # Compute the covariance matrix
1572
+ if not rowvar:
1573
+ fact = np.dot(xnotmask.T, xnotmask) * 1.
1574
+ c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
1575
+ else:
1576
+ fact = np.dot(xnotmask, xnotmask.T) * 1.
1577
+ c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
1578
+ # Check whether we have a scalar
1579
+ try:
1580
+ diag = ma.diagonal(c)
1581
+ except ValueError:
1582
+ return 1
1583
+ #
1584
+ if xnotmask.all():
1585
+ _denom = ma.sqrt(ma.multiply.outer(diag, diag))
1586
+ else:
1587
+ _denom = diagflat(diag)
1588
+ _denom._sharedmask = False # We know return is always a copy
1589
+ n = x.shape[1 - rowvar]
1590
+ if rowvar:
1591
+ for i in range(n - 1):
1592
+ for j in range(i + 1, n):
1593
+ _x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
1594
+ _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
1595
+ else:
1596
+ for i in range(n - 1):
1597
+ for j in range(i + 1, n):
1598
+ _x = mask_cols(
1599
+ vstack((x[:, i], x[:, j]))).var(axis=1)
1600
+ _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
1601
+ return c / _denom
1602
+
1603
+ #####--------------------------------------------------------------------------
1604
+ #---- --- Concatenation helpers ---
1605
+ #####--------------------------------------------------------------------------
1606
+
1607
+ class MAxisConcatenator(AxisConcatenator):
1608
+ """
1609
+ Translate slice objects to concatenation along an axis.
1610
+
1611
+ For documentation on usage, see `mr_class`.
1612
+
1613
+ See Also
1614
+ --------
1615
+ mr_class
1616
+
1617
+ """
1618
+ concatenate = staticmethod(concatenate)
1619
+
1620
+ @classmethod
1621
+ def makemat(cls, arr):
1622
+ # There used to be a view as np.matrix here, but we may eventually
1623
+ # deprecate that class. In preparation, we use the unmasked version
1624
+ # to construct the matrix (with copy=False for backwards compatibility
1625
+ # with the .view)
1626
+ data = super().makemat(arr.data, copy=False)
1627
+ return array(data, mask=arr.mask)
1628
+
1629
+ def __getitem__(self, key):
1630
+ # matrix builder syntax, like 'a, b; c, d'
1631
+ if isinstance(key, str):
1632
+ raise MAError("Unavailable for masked array.")
1633
+
1634
+ return super().__getitem__(key)
1635
+
1636
+
1637
+ class mr_class(MAxisConcatenator):
1638
+ """
1639
+ Translate slice objects to concatenation along the first axis.
1640
+
1641
+ This is the masked array version of `lib.index_tricks.RClass`.
1642
+
1643
+ See Also
1644
+ --------
1645
+ lib.index_tricks.RClass
1646
+
1647
+ Examples
1648
+ --------
1649
+ >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
1650
+ masked_array(data=[1, 2, 3, ..., 4, 5, 6],
1651
+ mask=False,
1652
+ fill_value=999999)
1653
+
1654
+ """
1655
+ def __init__(self):
1656
+ MAxisConcatenator.__init__(self, 0)
1657
+
1658
+ mr_ = mr_class()
1659
+
1660
+
1661
+ #####--------------------------------------------------------------------------
1662
+ #---- Find unmasked data ---
1663
+ #####--------------------------------------------------------------------------
1664
+
1665
+ def ndenumerate(a, compressed=True):
1666
+ """
1667
+ Multidimensional index iterator.
1668
+
1669
+ Return an iterator yielding pairs of array coordinates and values,
1670
+ skipping elements that are masked. With `compressed=False`,
1671
+ `ma.masked` is yielded as the value of masked elements. This
1672
+ behavior differs from that of `numpy.ndenumerate`, which yields the
1673
+ value of the underlying data array.
1674
+
1675
+ Notes
1676
+ -----
1677
+ .. versionadded:: 1.23.0
1678
+
1679
+ Parameters
1680
+ ----------
1681
+ a : array_like
1682
+ An array with (possibly) masked elements.
1683
+ compressed : bool, optional
1684
+ If True (default), masked elements are skipped.
1685
+
1686
+ See Also
1687
+ --------
1688
+ numpy.ndenumerate : Equivalent function ignoring any mask.
1689
+
1690
+ Examples
1691
+ --------
1692
+ >>> a = np.ma.arange(9).reshape((3, 3))
1693
+ >>> a[1, 0] = np.ma.masked
1694
+ >>> a[1, 2] = np.ma.masked
1695
+ >>> a[2, 1] = np.ma.masked
1696
+ >>> a
1697
+ masked_array(
1698
+ data=[[0, 1, 2],
1699
+ [--, 4, --],
1700
+ [6, --, 8]],
1701
+ mask=[[False, False, False],
1702
+ [ True, False, True],
1703
+ [False, True, False]],
1704
+ fill_value=999999)
1705
+ >>> for index, x in np.ma.ndenumerate(a):
1706
+ ... print(index, x)
1707
+ (0, 0) 0
1708
+ (0, 1) 1
1709
+ (0, 2) 2
1710
+ (1, 1) 4
1711
+ (2, 0) 6
1712
+ (2, 2) 8
1713
+
1714
+ >>> for index, x in np.ma.ndenumerate(a, compressed=False):
1715
+ ... print(index, x)
1716
+ (0, 0) 0
1717
+ (0, 1) 1
1718
+ (0, 2) 2
1719
+ (1, 0) --
1720
+ (1, 1) 4
1721
+ (1, 2) --
1722
+ (2, 0) 6
1723
+ (2, 1) --
1724
+ (2, 2) 8
1725
+ """
1726
+ for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat):
1727
+ if not mask:
1728
+ yield it
1729
+ elif not compressed:
1730
+ yield it[0], masked
1731
+
1732
+
1733
+ def flatnotmasked_edges(a):
1734
+ """
1735
+ Find the indices of the first and last unmasked values.
1736
+
1737
+ Expects a 1-D `MaskedArray`, returns None if all values are masked.
1738
+
1739
+ Parameters
1740
+ ----------
1741
+ a : array_like
1742
+ Input 1-D `MaskedArray`
1743
+
1744
+ Returns
1745
+ -------
1746
+ edges : ndarray or None
1747
+ The indices of first and last non-masked value in the array.
1748
+ Returns None if all values are masked.
1749
+
1750
+ See Also
1751
+ --------
1752
+ flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
1753
+ clump_masked, clump_unmasked
1754
+
1755
+ Notes
1756
+ -----
1757
+ Only accepts 1-D arrays.
1758
+
1759
+ Examples
1760
+ --------
1761
+ >>> a = np.ma.arange(10)
1762
+ >>> np.ma.flatnotmasked_edges(a)
1763
+ array([0, 9])
1764
+
1765
+ >>> mask = (a < 3) | (a > 8) | (a == 5)
1766
+ >>> a[mask] = np.ma.masked
1767
+ >>> np.array(a[~a.mask])
1768
+ array([3, 4, 6, 7, 8])
1769
+
1770
+ >>> np.ma.flatnotmasked_edges(a)
1771
+ array([3, 8])
1772
+
1773
+ >>> a[:] = np.ma.masked
1774
+ >>> print(np.ma.flatnotmasked_edges(a))
1775
+ None
1776
+
1777
+ """
1778
+ m = getmask(a)
1779
+ if m is nomask or not np.any(m):
1780
+ return np.array([0, a.size - 1])
1781
+ unmasked = np.flatnonzero(~m)
1782
+ if len(unmasked) > 0:
1783
+ return unmasked[[0, -1]]
1784
+ else:
1785
+ return None
1786
+
1787
+
1788
+ def notmasked_edges(a, axis=None):
1789
+ """
1790
+ Find the indices of the first and last unmasked values along an axis.
1791
+
1792
+ If all values are masked, return None. Otherwise, return a list
1793
+ of two tuples, corresponding to the indices of the first and last
1794
+ unmasked values respectively.
1795
+
1796
+ Parameters
1797
+ ----------
1798
+ a : array_like
1799
+ The input array.
1800
+ axis : int, optional
1801
+ Axis along which to perform the operation.
1802
+ If None (default), applies to a flattened version of the array.
1803
+
1804
+ Returns
1805
+ -------
1806
+ edges : ndarray or list
1807
+ An array of start and end indexes if there are any masked data in
1808
+ the array. If there are no masked data in the array, `edges` is a
1809
+ list of the first and last index.
1810
+
1811
+ See Also
1812
+ --------
1813
+ flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
1814
+ clump_masked, clump_unmasked
1815
+
1816
+ Examples
1817
+ --------
1818
+ >>> a = np.arange(9).reshape((3, 3))
1819
+ >>> m = np.zeros_like(a)
1820
+ >>> m[1:, 1:] = 1
1821
+
1822
+ >>> am = np.ma.array(a, mask=m)
1823
+ >>> np.array(am[~am.mask])
1824
+ array([0, 1, 2, 3, 6])
1825
+
1826
+ >>> np.ma.notmasked_edges(am)
1827
+ array([0, 6])
1828
+
1829
+ """
1830
+ a = asarray(a)
1831
+ if axis is None or a.ndim == 1:
1832
+ return flatnotmasked_edges(a)
1833
+ m = getmaskarray(a)
1834
+ idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
1835
+ return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
1836
+ tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
1837
+
1838
+
1839
+ def flatnotmasked_contiguous(a):
1840
+ """
1841
+ Find contiguous unmasked data in a masked array.
1842
+
1843
+ Parameters
1844
+ ----------
1845
+ a : array_like
1846
+ The input array.
1847
+
1848
+ Returns
1849
+ -------
1850
+ slice_list : list
1851
+ A sorted sequence of `slice` objects (start index, end index).
1852
+
1853
+ .. versionchanged:: 1.15.0
1854
+ Now returns an empty list instead of None for a fully masked array
1855
+
1856
+ See Also
1857
+ --------
1858
+ flatnotmasked_edges, notmasked_contiguous, notmasked_edges
1859
+ clump_masked, clump_unmasked
1860
+
1861
+ Notes
1862
+ -----
1863
+ Only accepts 2-D arrays at most.
1864
+
1865
+ Examples
1866
+ --------
1867
+ >>> a = np.ma.arange(10)
1868
+ >>> np.ma.flatnotmasked_contiguous(a)
1869
+ [slice(0, 10, None)]
1870
+
1871
+ >>> mask = (a < 3) | (a > 8) | (a == 5)
1872
+ >>> a[mask] = np.ma.masked
1873
+ >>> np.array(a[~a.mask])
1874
+ array([3, 4, 6, 7, 8])
1875
+
1876
+ >>> np.ma.flatnotmasked_contiguous(a)
1877
+ [slice(3, 5, None), slice(6, 9, None)]
1878
+ >>> a[:] = np.ma.masked
1879
+ >>> np.ma.flatnotmasked_contiguous(a)
1880
+ []
1881
+
1882
+ """
1883
+ m = getmask(a)
1884
+ if m is nomask:
1885
+ return [slice(0, a.size)]
1886
+ i = 0
1887
+ result = []
1888
+ for (k, g) in itertools.groupby(m.ravel()):
1889
+ n = len(list(g))
1890
+ if not k:
1891
+ result.append(slice(i, i + n))
1892
+ i += n
1893
+ return result
1894
+
1895
+
1896
+ def notmasked_contiguous(a, axis=None):
1897
+ """
1898
+ Find contiguous unmasked data in a masked array along the given axis.
1899
+
1900
+ Parameters
1901
+ ----------
1902
+ a : array_like
1903
+ The input array.
1904
+ axis : int, optional
1905
+ Axis along which to perform the operation.
1906
+ If None (default), applies to a flattened version of the array, and this
1907
+ is the same as `flatnotmasked_contiguous`.
1908
+
1909
+ Returns
1910
+ -------
1911
+ endpoints : list
1912
+ A list of slices (start and end indexes) of unmasked indexes
1913
+ in the array.
1914
+
1915
+ If the input is 2d and axis is specified, the result is a list of lists.
1916
+
1917
+ See Also
1918
+ --------
1919
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
1920
+ clump_masked, clump_unmasked
1921
+
1922
+ Notes
1923
+ -----
1924
+ Only accepts 2-D arrays at most.
1925
+
1926
+ Examples
1927
+ --------
1928
+ >>> a = np.arange(12).reshape((3, 4))
1929
+ >>> mask = np.zeros_like(a)
1930
+ >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
1931
+ >>> ma = np.ma.array(a, mask=mask)
1932
+ >>> ma
1933
+ masked_array(
1934
+ data=[[0, --, 2, 3],
1935
+ [--, --, --, 7],
1936
+ [8, --, --, 11]],
1937
+ mask=[[False, True, False, False],
1938
+ [ True, True, True, False],
1939
+ [False, True, True, False]],
1940
+ fill_value=999999)
1941
+ >>> np.array(ma[~ma.mask])
1942
+ array([ 0, 2, 3, 7, 8, 11])
1943
+
1944
+ >>> np.ma.notmasked_contiguous(ma)
1945
+ [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
1946
+
1947
+ >>> np.ma.notmasked_contiguous(ma, axis=0)
1948
+ [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
1949
+
1950
+ >>> np.ma.notmasked_contiguous(ma, axis=1)
1951
+ [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
1952
+
1953
+ """
1954
+ a = asarray(a)
1955
+ nd = a.ndim
1956
+ if nd > 2:
1957
+ raise NotImplementedError("Currently limited to at most 2D array.")
1958
+ if axis is None or nd == 1:
1959
+ return flatnotmasked_contiguous(a)
1960
+ #
1961
+ result = []
1962
+ #
1963
+ other = (axis + 1) % 2
1964
+ idx = [0, 0]
1965
+ idx[axis] = slice(None, None)
1966
+ #
1967
+ for i in range(a.shape[other]):
1968
+ idx[other] = i
1969
+ result.append(flatnotmasked_contiguous(a[tuple(idx)]))
1970
+ return result
1971
+
1972
+
1973
+ def _ezclump(mask):
1974
+ """
1975
+ Finds the clumps (groups of data with the same values) for a 1D bool array.
1976
+
1977
+ Returns a series of slices.
1978
+ """
1979
+ if mask.ndim > 1:
1980
+ mask = mask.ravel()
1981
+ idx = (mask[1:] ^ mask[:-1]).nonzero()
1982
+ idx = idx[0] + 1
1983
+
1984
+ if mask[0]:
1985
+ if len(idx) == 0:
1986
+ return [slice(0, mask.size)]
1987
+
1988
+ r = [slice(0, idx[0])]
1989
+ r.extend((slice(left, right)
1990
+ for left, right in zip(idx[1:-1:2], idx[2::2])))
1991
+ else:
1992
+ if len(idx) == 0:
1993
+ return []
1994
+
1995
+ r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
1996
+
1997
+ if mask[-1]:
1998
+ r.append(slice(idx[-1], mask.size))
1999
+ return r
2000
+
2001
+
2002
+ def clump_unmasked(a):
2003
+ """
2004
+ Return list of slices corresponding to the unmasked clumps of a 1-D array.
2005
+ (A "clump" is defined as a contiguous region of the array).
2006
+
2007
+ Parameters
2008
+ ----------
2009
+ a : ndarray
2010
+ A one-dimensional masked array.
2011
+
2012
+ Returns
2013
+ -------
2014
+ slices : list of slice
2015
+ The list of slices, one for each continuous region of unmasked
2016
+ elements in `a`.
2017
+
2018
+ Notes
2019
+ -----
2020
+ .. versionadded:: 1.4.0
2021
+
2022
+ See Also
2023
+ --------
2024
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
2025
+ notmasked_contiguous, clump_masked
2026
+
2027
+ Examples
2028
+ --------
2029
+ >>> a = np.ma.masked_array(np.arange(10))
2030
+ >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
2031
+ >>> np.ma.clump_unmasked(a)
2032
+ [slice(3, 6, None), slice(7, 8, None)]
2033
+
2034
+ """
2035
+ mask = getattr(a, '_mask', nomask)
2036
+ if mask is nomask:
2037
+ return [slice(0, a.size)]
2038
+ return _ezclump(~mask)
2039
+
2040
+
2041
+ def clump_masked(a):
2042
+ """
2043
+ Returns a list of slices corresponding to the masked clumps of a 1-D array.
2044
+ (A "clump" is defined as a contiguous region of the array).
2045
+
2046
+ Parameters
2047
+ ----------
2048
+ a : ndarray
2049
+ A one-dimensional masked array.
2050
+
2051
+ Returns
2052
+ -------
2053
+ slices : list of slice
2054
+ The list of slices, one for each continuous region of masked elements
2055
+ in `a`.
2056
+
2057
+ Notes
2058
+ -----
2059
+ .. versionadded:: 1.4.0
2060
+
2061
+ See Also
2062
+ --------
2063
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
2064
+ notmasked_contiguous, clump_unmasked
2065
+
2066
+ Examples
2067
+ --------
2068
+ >>> a = np.ma.masked_array(np.arange(10))
2069
+ >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
2070
+ >>> np.ma.clump_masked(a)
2071
+ [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
2072
+
2073
+ """
2074
+ mask = ma.getmask(a)
2075
+ if mask is nomask:
2076
+ return []
2077
+ return _ezclump(mask)
2078
+
2079
+
2080
+ ###############################################################################
2081
+ # Polynomial fit #
2082
+ ###############################################################################
2083
+
2084
+
2085
+ def vander(x, n=None):
2086
+ """
2087
+ Masked values in the input array result in rows of zeros.
2088
+
2089
+ """
2090
+ _vander = np.vander(x, n)
2091
+ m = getmask(x)
2092
+ if m is not nomask:
2093
+ _vander[m] = 0
2094
+ return _vander
2095
+
2096
+ vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
2097
+
2098
+
2099
+ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
2100
+ """
2101
+ Any masked values in x is propagated in y, and vice-versa.
2102
+
2103
+ """
2104
+ x = asarray(x)
2105
+ y = asarray(y)
2106
+
2107
+ m = getmask(x)
2108
+ if y.ndim == 1:
2109
+ m = mask_or(m, getmask(y))
2110
+ elif y.ndim == 2:
2111
+ my = getmask(mask_rows(y))
2112
+ if my is not nomask:
2113
+ m = mask_or(m, my[:, 0])
2114
+ else:
2115
+ raise TypeError("Expected a 1D or 2D array for y!")
2116
+
2117
+ if w is not None:
2118
+ w = asarray(w)
2119
+ if w.ndim != 1:
2120
+ raise TypeError("expected a 1-d array for weights")
2121
+ if w.shape[0] != y.shape[0]:
2122
+ raise TypeError("expected w and y to have the same length")
2123
+ m = mask_or(m, getmask(w))
2124
+
2125
+ if m is not nomask:
2126
+ not_m = ~m
2127
+ if w is not None:
2128
+ w = w[not_m]
2129
+ return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
2130
+ else:
2131
+ return np.polyfit(x, y, deg, rcond, full, w, cov)
2132
+
2133
+ polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
venv/lib/python3.10/site-packages/numpy/ma/mrecords.py ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """:mod:`numpy.ma..mrecords`
2
+
3
+ Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
4
+ where fields can be accessed as attributes.
5
+ Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
6
+ and the masking of individual fields.
7
+
8
+ .. moduleauthor:: Pierre Gerard-Marchant
9
+
10
+ """
11
+ # We should make sure that no field is called '_mask','mask','_fieldmask',
12
+ # or whatever restricted keywords. An idea would be to no bother in the
13
+ # first place, and then rename the invalid fields with a trailing
14
+ # underscore. Maybe we could just overload the parser function ?
15
+
16
+ from numpy.ma import (
17
+ MAError, MaskedArray, masked, nomask, masked_array, getdata,
18
+ getmaskarray, filled
19
+ )
20
+ import numpy.ma as ma
21
+ import warnings
22
+
23
+ import numpy as np
24
+ from numpy import (
25
+ bool_, dtype, ndarray, recarray, array as narray
26
+ )
27
+ from numpy.core.records import (
28
+ fromarrays as recfromarrays, fromrecords as recfromrecords
29
+ )
30
+
31
+ _byteorderconv = np.core.records._byteorderconv
32
+
33
+
34
+ _check_fill_value = ma.core._check_fill_value
35
+
36
+
37
+ __all__ = [
38
+ 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
39
+ 'fromtextfile', 'addfield',
40
+ ]
41
+
42
+ reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
43
+
44
+
45
+ def _checknames(descr, names=None):
46
+ """
47
+ Checks that field names ``descr`` are not reserved keywords.
48
+
49
+ If this is the case, a default 'f%i' is substituted. If the argument
50
+ `names` is not None, updates the field names to valid names.
51
+
52
+ """
53
+ ndescr = len(descr)
54
+ default_names = ['f%i' % i for i in range(ndescr)]
55
+ if names is None:
56
+ new_names = default_names
57
+ else:
58
+ if isinstance(names, (tuple, list)):
59
+ new_names = names
60
+ elif isinstance(names, str):
61
+ new_names = names.split(',')
62
+ else:
63
+ raise NameError(f'illegal input names {names!r}')
64
+ nnames = len(new_names)
65
+ if nnames < ndescr:
66
+ new_names += default_names[nnames:]
67
+ ndescr = []
68
+ for (n, d, t) in zip(new_names, default_names, descr.descr):
69
+ if n in reserved_fields:
70
+ if t[0] in reserved_fields:
71
+ ndescr.append((d, t[1]))
72
+ else:
73
+ ndescr.append(t)
74
+ else:
75
+ ndescr.append((n, t[1]))
76
+ return np.dtype(ndescr)
77
+
78
+
79
+ def _get_fieldmask(self):
80
+ mdescr = [(n, '|b1') for n in self.dtype.names]
81
+ fdmask = np.empty(self.shape, dtype=mdescr)
82
+ fdmask.flat = tuple([False] * len(mdescr))
83
+ return fdmask
84
+
85
+
86
+ class MaskedRecords(MaskedArray):
87
+ """
88
+
89
+ Attributes
90
+ ----------
91
+ _data : recarray
92
+ Underlying data, as a record array.
93
+ _mask : boolean array
94
+ Mask of the records. A record is masked when all its fields are
95
+ masked.
96
+ _fieldmask : boolean recarray
97
+ Record array of booleans, setting the mask of each individual field
98
+ of each record.
99
+ _fill_value : record
100
+ Filling values for each field.
101
+
102
+ """
103
+
104
+ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
105
+ formats=None, names=None, titles=None,
106
+ byteorder=None, aligned=False,
107
+ mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
108
+ copy=False,
109
+ **options):
110
+
111
+ self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
112
+ strides=strides, formats=formats, names=names,
113
+ titles=titles, byteorder=byteorder,
114
+ aligned=aligned,)
115
+
116
+ mdtype = ma.make_mask_descr(self.dtype)
117
+ if mask is nomask or not np.size(mask):
118
+ if not keep_mask:
119
+ self._mask = tuple([False] * len(mdtype))
120
+ else:
121
+ mask = np.array(mask, copy=copy)
122
+ if mask.shape != self.shape:
123
+ (nd, nm) = (self.size, mask.size)
124
+ if nm == 1:
125
+ mask = np.resize(mask, self.shape)
126
+ elif nm == nd:
127
+ mask = np.reshape(mask, self.shape)
128
+ else:
129
+ msg = "Mask and data not compatible: data size is %i, " + \
130
+ "mask size is %i."
131
+ raise MAError(msg % (nd, nm))
132
+ if not keep_mask:
133
+ self.__setmask__(mask)
134
+ self._sharedmask = True
135
+ else:
136
+ if mask.dtype == mdtype:
137
+ _mask = mask
138
+ else:
139
+ _mask = np.array([tuple([m] * len(mdtype)) for m in mask],
140
+ dtype=mdtype)
141
+ self._mask = _mask
142
+ return self
143
+
144
+ def __array_finalize__(self, obj):
145
+ # Make sure we have a _fieldmask by default
146
+ _mask = getattr(obj, '_mask', None)
147
+ if _mask is None:
148
+ objmask = getattr(obj, '_mask', nomask)
149
+ _dtype = ndarray.__getattribute__(self, 'dtype')
150
+ if objmask is nomask:
151
+ _mask = ma.make_mask_none(self.shape, dtype=_dtype)
152
+ else:
153
+ mdescr = ma.make_mask_descr(_dtype)
154
+ _mask = narray([tuple([m] * len(mdescr)) for m in objmask],
155
+ dtype=mdescr).view(recarray)
156
+ # Update some of the attributes
157
+ _dict = self.__dict__
158
+ _dict.update(_mask=_mask)
159
+ self._update_from(obj)
160
+ if _dict['_baseclass'] == ndarray:
161
+ _dict['_baseclass'] = recarray
162
+ return
163
+
164
+ @property
165
+ def _data(self):
166
+ """
167
+ Returns the data as a recarray.
168
+
169
+ """
170
+ return ndarray.view(self, recarray)
171
+
172
+ @property
173
+ def _fieldmask(self):
174
+ """
175
+ Alias to mask.
176
+
177
+ """
178
+ return self._mask
179
+
180
+ def __len__(self):
181
+ """
182
+ Returns the length
183
+
184
+ """
185
+ # We have more than one record
186
+ if self.ndim:
187
+ return len(self._data)
188
+ # We have only one record: return the nb of fields
189
+ return len(self.dtype)
190
+
191
+ def __getattribute__(self, attr):
192
+ try:
193
+ return object.__getattribute__(self, attr)
194
+ except AttributeError:
195
+ # attr must be a fieldname
196
+ pass
197
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields
198
+ try:
199
+ res = fielddict[attr][:2]
200
+ except (TypeError, KeyError) as e:
201
+ raise AttributeError(
202
+ f'record array has no attribute {attr}') from e
203
+ # So far, so good
204
+ _localdict = ndarray.__getattribute__(self, '__dict__')
205
+ _data = ndarray.view(self, _localdict['_baseclass'])
206
+ obj = _data.getfield(*res)
207
+ if obj.dtype.names is not None:
208
+ raise NotImplementedError("MaskedRecords is currently limited to"
209
+ "simple records.")
210
+ # Get some special attributes
211
+ # Reset the object's mask
212
+ hasmasked = False
213
+ _mask = _localdict.get('_mask', None)
214
+ if _mask is not None:
215
+ try:
216
+ _mask = _mask[attr]
217
+ except IndexError:
218
+ # Couldn't find a mask: use the default (nomask)
219
+ pass
220
+ tp_len = len(_mask.dtype)
221
+ hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any()
222
+ if (obj.shape or hasmasked):
223
+ obj = obj.view(MaskedArray)
224
+ obj._baseclass = ndarray
225
+ obj._isfield = True
226
+ obj._mask = _mask
227
+ # Reset the field values
228
+ _fill_value = _localdict.get('_fill_value', None)
229
+ if _fill_value is not None:
230
+ try:
231
+ obj._fill_value = _fill_value[attr]
232
+ except ValueError:
233
+ obj._fill_value = None
234
+ else:
235
+ obj = obj.item()
236
+ return obj
237
+
238
+ def __setattr__(self, attr, val):
239
+ """
240
+ Sets the attribute attr to the value val.
241
+
242
+ """
243
+ # Should we call __setmask__ first ?
244
+ if attr in ['mask', 'fieldmask']:
245
+ self.__setmask__(val)
246
+ return
247
+ # Create a shortcut (so that we don't have to call getattr all the time)
248
+ _localdict = object.__getattribute__(self, '__dict__')
249
+ # Check whether we're creating a new field
250
+ newattr = attr not in _localdict
251
+ try:
252
+ # Is attr a generic attribute ?
253
+ ret = object.__setattr__(self, attr, val)
254
+ except Exception:
255
+ # Not a generic attribute: exit if it's not a valid field
256
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
257
+ optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
258
+ if not (attr in fielddict or attr in optinfo):
259
+ raise
260
+ else:
261
+ # Get the list of names
262
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
263
+ # Check the attribute
264
+ if attr not in fielddict:
265
+ return ret
266
+ if newattr:
267
+ # We just added this one or this setattr worked on an
268
+ # internal attribute.
269
+ try:
270
+ object.__delattr__(self, attr)
271
+ except Exception:
272
+ return ret
273
+ # Let's try to set the field
274
+ try:
275
+ res = fielddict[attr][:2]
276
+ except (TypeError, KeyError) as e:
277
+ raise AttributeError(
278
+ f'record array has no attribute {attr}') from e
279
+
280
+ if val is masked:
281
+ _fill_value = _localdict['_fill_value']
282
+ if _fill_value is not None:
283
+ dval = _localdict['_fill_value'][attr]
284
+ else:
285
+ dval = val
286
+ mval = True
287
+ else:
288
+ dval = filled(val)
289
+ mval = getmaskarray(val)
290
+ obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
291
+ _localdict['_mask'].__setitem__(attr, mval)
292
+ return obj
293
+
294
+ def __getitem__(self, indx):
295
+ """
296
+ Returns all the fields sharing the same fieldname base.
297
+
298
+ The fieldname base is either `_data` or `_mask`.
299
+
300
+ """
301
+ _localdict = self.__dict__
302
+ _mask = ndarray.__getattribute__(self, '_mask')
303
+ _data = ndarray.view(self, _localdict['_baseclass'])
304
+ # We want a field
305
+ if isinstance(indx, str):
306
+ # Make sure _sharedmask is True to propagate back to _fieldmask
307
+ # Don't use _set_mask, there are some copies being made that
308
+ # break propagation Don't force the mask to nomask, that wreaks
309
+ # easy masking
310
+ obj = _data[indx].view(MaskedArray)
311
+ obj._mask = _mask[indx]
312
+ obj._sharedmask = True
313
+ fval = _localdict['_fill_value']
314
+ if fval is not None:
315
+ obj._fill_value = fval[indx]
316
+ # Force to masked if the mask is True
317
+ if not obj.ndim and obj._mask:
318
+ return masked
319
+ return obj
320
+ # We want some elements.
321
+ # First, the data.
322
+ obj = np.array(_data[indx], copy=False).view(mrecarray)
323
+ obj._mask = np.array(_mask[indx], copy=False).view(recarray)
324
+ return obj
325
+
326
+ def __setitem__(self, indx, value):
327
+ """
328
+ Sets the given record to value.
329
+
330
+ """
331
+ MaskedArray.__setitem__(self, indx, value)
332
+ if isinstance(indx, str):
333
+ self._mask[indx] = ma.getmaskarray(value)
334
+
335
+ def __str__(self):
336
+ """
337
+ Calculates the string representation.
338
+
339
+ """
340
+ if self.size > 1:
341
+ mstr = [f"({','.join([str(i) for i in s])})"
342
+ for s in zip(*[getattr(self, f) for f in self.dtype.names])]
343
+ return f"[{', '.join(mstr)}]"
344
+ else:
345
+ mstr = [f"{','.join([str(i) for i in s])}"
346
+ for s in zip([getattr(self, f) for f in self.dtype.names])]
347
+ return f"({', '.join(mstr)})"
348
+
349
+ def __repr__(self):
350
+ """
351
+ Calculates the repr representation.
352
+
353
+ """
354
+ _names = self.dtype.names
355
+ fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
356
+ reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
357
+ reprstr.insert(0, 'masked_records(')
358
+ reprstr.extend([fmt % (' fill_value', self.fill_value),
359
+ ' )'])
360
+ return str("\n".join(reprstr))
361
+
362
+ def view(self, dtype=None, type=None):
363
+ """
364
+ Returns a view of the mrecarray.
365
+
366
+ """
367
+ # OK, basic copy-paste from MaskedArray.view.
368
+ if dtype is None:
369
+ if type is None:
370
+ output = ndarray.view(self)
371
+ else:
372
+ output = ndarray.view(self, type)
373
+ # Here again.
374
+ elif type is None:
375
+ try:
376
+ if issubclass(dtype, ndarray):
377
+ output = ndarray.view(self, dtype)
378
+ else:
379
+ output = ndarray.view(self, dtype)
380
+ # OK, there's the change
381
+ except TypeError:
382
+ dtype = np.dtype(dtype)
383
+ # we need to revert to MaskedArray, but keeping the possibility
384
+ # of subclasses (eg, TimeSeriesRecords), so we'll force a type
385
+ # set to the first parent
386
+ if dtype.fields is None:
387
+ basetype = self.__class__.__bases__[0]
388
+ output = self.__array__().view(dtype, basetype)
389
+ output._update_from(self)
390
+ else:
391
+ output = ndarray.view(self, dtype)
392
+ output._fill_value = None
393
+ else:
394
+ output = ndarray.view(self, dtype, type)
395
+ # Update the mask, just like in MaskedArray.view
396
+ if (getattr(output, '_mask', nomask) is not nomask):
397
+ mdtype = ma.make_mask_descr(output.dtype)
398
+ output._mask = self._mask.view(mdtype, ndarray)
399
+ output._mask.shape = output.shape
400
+ return output
401
+
402
+ def harden_mask(self):
403
+ """
404
+ Forces the mask to hard.
405
+
406
+ """
407
+ self._hardmask = True
408
+
409
+ def soften_mask(self):
410
+ """
411
+ Forces the mask to soft
412
+
413
+ """
414
+ self._hardmask = False
415
+
416
+ def copy(self):
417
+ """
418
+ Returns a copy of the masked record.
419
+
420
+ """
421
+ copied = self._data.copy().view(type(self))
422
+ copied._mask = self._mask.copy()
423
+ return copied
424
+
425
+ def tolist(self, fill_value=None):
426
+ """
427
+ Return the data portion of the array as a list.
428
+
429
+ Data items are converted to the nearest compatible Python type.
430
+ Masked values are converted to fill_value. If fill_value is None,
431
+ the corresponding entries in the output list will be ``None``.
432
+
433
+ """
434
+ if fill_value is not None:
435
+ return self.filled(fill_value).tolist()
436
+ result = narray(self.filled().tolist(), dtype=object)
437
+ mask = narray(self._mask.tolist())
438
+ result[mask] = None
439
+ return result.tolist()
440
+
441
+ def __getstate__(self):
442
+ """Return the internal state of the masked array.
443
+
444
+ This is for pickling.
445
+
446
+ """
447
+ state = (1,
448
+ self.shape,
449
+ self.dtype,
450
+ self.flags.fnc,
451
+ self._data.tobytes(),
452
+ self._mask.tobytes(),
453
+ self._fill_value,
454
+ )
455
+ return state
456
+
457
+ def __setstate__(self, state):
458
+ """
459
+ Restore the internal state of the masked array.
460
+
461
+ This is for pickling. ``state`` is typically the output of the
462
+ ``__getstate__`` output, and is a 5-tuple:
463
+
464
+ - class name
465
+ - a tuple giving the shape of the data
466
+ - a typecode for the data
467
+ - a binary string for the data
468
+ - a binary string for the mask.
469
+
470
+ """
471
+ (ver, shp, typ, isf, raw, msk, flv) = state
472
+ ndarray.__setstate__(self, (shp, typ, isf, raw))
473
+ mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
474
+ self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
475
+ self.fill_value = flv
476
+
477
+ def __reduce__(self):
478
+ """
479
+ Return a 3-tuple for pickling a MaskedArray.
480
+
481
+ """
482
+ return (_mrreconstruct,
483
+ (self.__class__, self._baseclass, (0,), 'b',),
484
+ self.__getstate__())
485
+
486
+
487
+ def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
488
+ """
489
+ Build a new MaskedArray from the information stored in a pickle.
490
+
491
+ """
492
+ _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
493
+ _mask = ndarray.__new__(ndarray, baseshape, 'b1')
494
+ return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
495
+
496
+ mrecarray = MaskedRecords
497
+
498
+
499
+ ###############################################################################
500
+ # Constructors #
501
+ ###############################################################################
502
+
503
+
504
+ def fromarrays(arraylist, dtype=None, shape=None, formats=None,
505
+ names=None, titles=None, aligned=False, byteorder=None,
506
+ fill_value=None):
507
+ """
508
+ Creates a mrecarray from a (flat) list of masked arrays.
509
+
510
+ Parameters
511
+ ----------
512
+ arraylist : sequence
513
+ A list of (masked) arrays. Each element of the sequence is first converted
514
+ to a masked array if needed. If a 2D array is passed as argument, it is
515
+ processed line by line
516
+ dtype : {None, dtype}, optional
517
+ Data type descriptor.
518
+ shape : {None, integer}, optional
519
+ Number of records. If None, shape is defined from the shape of the
520
+ first array in the list.
521
+ formats : {None, sequence}, optional
522
+ Sequence of formats for each individual field. If None, the formats will
523
+ be autodetected by inspecting the fields and selecting the highest dtype
524
+ possible.
525
+ names : {None, sequence}, optional
526
+ Sequence of the names of each field.
527
+ fill_value : {None, sequence}, optional
528
+ Sequence of data to be used as filling values.
529
+
530
+ Notes
531
+ -----
532
+ Lists of tuples should be preferred over lists of lists for faster processing.
533
+
534
+ """
535
+ datalist = [getdata(x) for x in arraylist]
536
+ masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
537
+ _array = recfromarrays(datalist,
538
+ dtype=dtype, shape=shape, formats=formats,
539
+ names=names, titles=titles, aligned=aligned,
540
+ byteorder=byteorder).view(mrecarray)
541
+ _array._mask.flat = list(zip(*masklist))
542
+ if fill_value is not None:
543
+ _array.fill_value = fill_value
544
+ return _array
545
+
546
+
547
+ def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
548
+ titles=None, aligned=False, byteorder=None,
549
+ fill_value=None, mask=nomask):
550
+ """
551
+ Creates a MaskedRecords from a list of records.
552
+
553
+ Parameters
554
+ ----------
555
+ reclist : sequence
556
+ A list of records. Each element of the sequence is first converted
557
+ to a masked array if needed. If a 2D array is passed as argument, it is
558
+ processed line by line
559
+ dtype : {None, dtype}, optional
560
+ Data type descriptor.
561
+ shape : {None,int}, optional
562
+ Number of records. If None, ``shape`` is defined from the shape of the
563
+ first array in the list.
564
+ formats : {None, sequence}, optional
565
+ Sequence of formats for each individual field. If None, the formats will
566
+ be autodetected by inspecting the fields and selecting the highest dtype
567
+ possible.
568
+ names : {None, sequence}, optional
569
+ Sequence of the names of each field.
570
+ fill_value : {None, sequence}, optional
571
+ Sequence of data to be used as filling values.
572
+ mask : {nomask, sequence}, optional.
573
+ External mask to apply on the data.
574
+
575
+ Notes
576
+ -----
577
+ Lists of tuples should be preferred over lists of lists for faster processing.
578
+
579
+ """
580
+ # Grab the initial _fieldmask, if needed:
581
+ _mask = getattr(reclist, '_mask', None)
582
+ # Get the list of records.
583
+ if isinstance(reclist, ndarray):
584
+ # Make sure we don't have some hidden mask
585
+ if isinstance(reclist, MaskedArray):
586
+ reclist = reclist.filled().view(ndarray)
587
+ # Grab the initial dtype, just in case
588
+ if dtype is None:
589
+ dtype = reclist.dtype
590
+ reclist = reclist.tolist()
591
+ mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
592
+ names=names, titles=titles,
593
+ aligned=aligned, byteorder=byteorder).view(mrecarray)
594
+ # Set the fill_value if needed
595
+ if fill_value is not None:
596
+ mrec.fill_value = fill_value
597
+ # Now, let's deal w/ the mask
598
+ if mask is not nomask:
599
+ mask = np.array(mask, copy=False)
600
+ maskrecordlength = len(mask.dtype)
601
+ if maskrecordlength:
602
+ mrec._mask.flat = mask
603
+ elif mask.ndim == 2:
604
+ mrec._mask.flat = [tuple(m) for m in mask]
605
+ else:
606
+ mrec.__setmask__(mask)
607
+ if _mask is not None:
608
+ mrec._mask[:] = _mask
609
+ return mrec
610
+
611
+
612
+ def _guessvartypes(arr):
613
+ """
614
+ Tries to guess the dtypes of the str_ ndarray `arr`.
615
+
616
+ Guesses by testing element-wise conversion. Returns a list of dtypes.
617
+ The array is first converted to ndarray. If the array is 2D, the test
618
+ is performed on the first line. An exception is raised if the file is
619
+ 3D or more.
620
+
621
+ """
622
+ vartypes = []
623
+ arr = np.asarray(arr)
624
+ if arr.ndim == 2:
625
+ arr = arr[0]
626
+ elif arr.ndim > 2:
627
+ raise ValueError("The array should be 2D at most!")
628
+ # Start the conversion loop.
629
+ for f in arr:
630
+ try:
631
+ int(f)
632
+ except (ValueError, TypeError):
633
+ try:
634
+ float(f)
635
+ except (ValueError, TypeError):
636
+ try:
637
+ complex(f)
638
+ except (ValueError, TypeError):
639
+ vartypes.append(arr.dtype)
640
+ else:
641
+ vartypes.append(np.dtype(complex))
642
+ else:
643
+ vartypes.append(np.dtype(float))
644
+ else:
645
+ vartypes.append(np.dtype(int))
646
+ return vartypes
647
+
648
+
649
+ def openfile(fname):
650
+ """
651
+ Opens the file handle of file `fname`.
652
+
653
+ """
654
+ # A file handle
655
+ if hasattr(fname, 'readline'):
656
+ return fname
657
+ # Try to open the file and guess its type
658
+ try:
659
+ f = open(fname)
660
+ except FileNotFoundError as e:
661
+ raise FileNotFoundError(f"No such file: '{fname}'") from e
662
+ if f.readline()[:2] != "\\x":
663
+ f.seek(0, 0)
664
+ return f
665
+ f.close()
666
+ raise NotImplementedError("Wow, binary file")
667
+
668
+
669
+ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='',
670
+ varnames=None, vartypes=None,
671
+ *, delimitor=np._NoValue): # backwards compatibility
672
+ """
673
+ Creates a mrecarray from data stored in the file `filename`.
674
+
675
+ Parameters
676
+ ----------
677
+ fname : {file name/handle}
678
+ Handle of an opened file.
679
+ delimiter : {None, string}, optional
680
+ Alphanumeric character used to separate columns in the file.
681
+ If None, any (group of) white spacestring(s) will be used.
682
+ commentchar : {'#', string}, optional
683
+ Alphanumeric character used to mark the start of a comment.
684
+ missingchar : {'', string}, optional
685
+ String indicating missing data, and used to create the masks.
686
+ varnames : {None, sequence}, optional
687
+ Sequence of the variable names. If None, a list will be created from
688
+ the first non empty line of the file.
689
+ vartypes : {None, sequence}, optional
690
+ Sequence of the variables dtypes. If None, it will be estimated from
691
+ the first non-commented line.
692
+
693
+
694
+ Ultra simple: the varnames are in the header, one line"""
695
+ if delimitor is not np._NoValue:
696
+ if delimiter is not None:
697
+ raise TypeError("fromtextfile() got multiple values for argument "
698
+ "'delimiter'")
699
+ # NumPy 1.22.0, 2021-09-23
700
+ warnings.warn("The 'delimitor' keyword argument of "
701
+ "numpy.ma.mrecords.fromtextfile() is deprecated "
702
+ "since NumPy 1.22.0, use 'delimiter' instead.",
703
+ DeprecationWarning, stacklevel=2)
704
+ delimiter = delimitor
705
+
706
+ # Try to open the file.
707
+ ftext = openfile(fname)
708
+
709
+ # Get the first non-empty line as the varnames
710
+ while True:
711
+ line = ftext.readline()
712
+ firstline = line[:line.find(commentchar)].strip()
713
+ _varnames = firstline.split(delimiter)
714
+ if len(_varnames) > 1:
715
+ break
716
+ if varnames is None:
717
+ varnames = _varnames
718
+
719
+ # Get the data.
720
+ _variables = masked_array([line.strip().split(delimiter) for line in ftext
721
+ if line[0] != commentchar and len(line) > 1])
722
+ (_, nfields) = _variables.shape
723
+ ftext.close()
724
+
725
+ # Try to guess the dtype.
726
+ if vartypes is None:
727
+ vartypes = _guessvartypes(_variables[0])
728
+ else:
729
+ vartypes = [np.dtype(v) for v in vartypes]
730
+ if len(vartypes) != nfields:
731
+ msg = "Attempting to %i dtypes for %i fields!"
732
+ msg += " Reverting to default."
733
+ warnings.warn(msg % (len(vartypes), nfields), stacklevel=2)
734
+ vartypes = _guessvartypes(_variables[0])
735
+
736
+ # Construct the descriptor.
737
+ mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
738
+ mfillv = [ma.default_fill_value(f) for f in vartypes]
739
+
740
+ # Get the data and the mask.
741
+ # We just need a list of masked_arrays. It's easier to create it like that:
742
+ _mask = (_variables.T == missingchar)
743
+ _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
744
+ for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
745
+
746
+ return fromarrays(_datalist, dtype=mdescr)
747
+
748
+
749
+ def addfield(mrecord, newfield, newfieldname=None):
750
+ """Adds a new field to the masked record array
751
+
752
+ Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
753
+ is None, the new field name is set to 'fi', where `i` is the number of
754
+ existing fields.
755
+
756
+ """
757
+ _data = mrecord._data
758
+ _mask = mrecord._mask
759
+ if newfieldname is None or newfieldname in reserved_fields:
760
+ newfieldname = 'f%i' % len(_data.dtype)
761
+ newfield = ma.array(newfield)
762
+ # Get the new data.
763
+ # Create a new empty recarray
764
+ newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
765
+ newdata = recarray(_data.shape, newdtype)
766
+ # Add the existing field
767
+ [newdata.setfield(_data.getfield(*f), *f)
768
+ for f in _data.dtype.fields.values()]
769
+ # Add the new field
770
+ newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
771
+ newdata = newdata.view(MaskedRecords)
772
+ # Get the new mask
773
+ # Create a new empty recarray
774
+ newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
775
+ newmask = recarray(_data.shape, newmdtype)
776
+ # Add the old masks
777
+ [newmask.setfield(_mask.getfield(*f), *f)
778
+ for f in _mask.dtype.fields.values()]
779
+ # Add the mask of the new field
780
+ newmask.setfield(getmaskarray(newfield),
781
+ *newmask.dtype.fields[newfieldname])
782
+ newdata._mask = newmask
783
+ return newdata
venv/lib/python3.10/site-packages/numpy/ma/setup.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ def configuration(parent_package='',top_path=None):
3
+ from numpy.distutils.misc_util import Configuration
4
+ config = Configuration('ma', parent_package, top_path)
5
+ config.add_subpackage('tests')
6
+ config.add_data_files('*.pyi')
7
+ return config
8
+
9
+ if __name__ == "__main__":
10
+ from numpy.distutils.core import setup
11
+ config = configuration(top_path='').todict()
12
+ setup(**config)
venv/lib/python3.10/site-packages/numpy/random/LICENSE.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **This software is dual-licensed under the The University of Illinois/NCSA
2
+ Open Source License (NCSA) and The 3-Clause BSD License**
3
+
4
+ # NCSA Open Source License
5
+ **Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
6
+
7
+ Developed by: Kevin Sheppard (<[email protected]>,
8
9
+ [http://www.kevinsheppard.com](http://www.kevinsheppard.com)
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
12
+ this software and associated documentation files (the "Software"), to deal with
13
+ the Software without restriction, including without limitation the rights to
14
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
15
+ of the Software, and to permit persons to whom the Software is furnished to do
16
+ so, subject to the following conditions:
17
+
18
+ Redistributions of source code must retain the above copyright notice, this
19
+ list of conditions and the following disclaimers.
20
+
21
+ Redistributions in binary form must reproduce the above copyright notice, this
22
+ list of conditions and the following disclaimers in the documentation and/or
23
+ other materials provided with the distribution.
24
+
25
+ Neither the names of Kevin Sheppard, nor the names of any contributors may be
26
+ used to endorse or promote products derived from this Software without specific
27
+ prior written permission.
28
+
29
+ **THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
35
+ THE SOFTWARE.**
36
+
37
+
38
+ # 3-Clause BSD License
39
+ **Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
40
+
41
+ Redistribution and use in source and binary forms, with or without
42
+ modification, are permitted provided that the following conditions are met:
43
+
44
+ 1. Redistributions of source code must retain the above copyright notice,
45
+ this list of conditions and the following disclaimer.
46
+
47
+ 2. Redistributions in binary form must reproduce the above copyright notice,
48
+ this list of conditions and the following disclaimer in the documentation
49
+ and/or other materials provided with the distribution.
50
+
51
+ 3. Neither the name of the copyright holder nor the names of its contributors
52
+ may be used to endorse or promote products derived from this software
53
+ without specific prior written permission.
54
+
55
+ **THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
56
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
59
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
65
+ THE POSSIBILITY OF SUCH DAMAGE.**
66
+
67
+ # Components
68
+
69
+ Many parts of this module have been derived from original sources,
70
+ often the algorithm's designer. Component licenses are located with
71
+ the component code.
venv/lib/python3.10/site-packages/numpy/random/__init__.pxd ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as np
2
+ from libc.stdint cimport uint32_t, uint64_t
3
+
4
+ cdef extern from "numpy/random/bitgen.h":
5
+ struct bitgen:
6
+ void *state
7
+ uint64_t (*next_uint64)(void *st) nogil
8
+ uint32_t (*next_uint32)(void *st) nogil
9
+ double (*next_double)(void *st) nogil
10
+ uint64_t (*next_raw)(void *st) nogil
11
+
12
+ ctypedef bitgen bitgen_t
13
+
14
+ from numpy.random.bit_generator cimport BitGenerator, SeedSequence
venv/lib/python3.10/site-packages/numpy/random/__init__.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================
3
+ Random Number Generation
4
+ ========================
5
+
6
+ Use ``default_rng()`` to create a `Generator` and call its methods.
7
+
8
+ =============== =========================================================
9
+ Generator
10
+ --------------- ---------------------------------------------------------
11
+ Generator Class implementing all of the random number distributions
12
+ default_rng Default constructor for ``Generator``
13
+ =============== =========================================================
14
+
15
+ ============================================= ===
16
+ BitGenerator Streams that work with Generator
17
+ --------------------------------------------- ---
18
+ MT19937
19
+ PCG64
20
+ PCG64DXSM
21
+ Philox
22
+ SFC64
23
+ ============================================= ===
24
+
25
+ ============================================= ===
26
+ Getting entropy to initialize a BitGenerator
27
+ --------------------------------------------- ---
28
+ SeedSequence
29
+ ============================================= ===
30
+
31
+
32
+ Legacy
33
+ ------
34
+
35
+ For backwards compatibility with previous versions of numpy before 1.17, the
36
+ various aliases to the global `RandomState` methods are left alone and do not
37
+ use the new `Generator` API.
38
+
39
+ ==================== =========================================================
40
+ Utility functions
41
+ -------------------- ---------------------------------------------------------
42
+ random Uniformly distributed floats over ``[0, 1)``
43
+ bytes Uniformly distributed random bytes.
44
+ permutation Randomly permute a sequence / generate a random sequence.
45
+ shuffle Randomly permute a sequence in place.
46
+ choice Random sample from 1-D array.
47
+ ==================== =========================================================
48
+
49
+ ==================== =========================================================
50
+ Compatibility
51
+ functions - removed
52
+ in the new API
53
+ -------------------- ---------------------------------------------------------
54
+ rand Uniformly distributed values.
55
+ randn Normally distributed values.
56
+ ranf Uniformly distributed floating point numbers.
57
+ random_integers Uniformly distributed integers in a given range.
58
+ (deprecated, use ``integers(..., closed=True)`` instead)
59
+ random_sample Alias for `random_sample`
60
+ randint Uniformly distributed integers in a given range
61
+ seed Seed the legacy random number generator.
62
+ ==================== =========================================================
63
+
64
+ ==================== =========================================================
65
+ Univariate
66
+ distributions
67
+ -------------------- ---------------------------------------------------------
68
+ beta Beta distribution over ``[0, 1]``.
69
+ binomial Binomial distribution.
70
+ chisquare :math:`\\chi^2` distribution.
71
+ exponential Exponential distribution.
72
+ f F (Fisher-Snedecor) distribution.
73
+ gamma Gamma distribution.
74
+ geometric Geometric distribution.
75
+ gumbel Gumbel distribution.
76
+ hypergeometric Hypergeometric distribution.
77
+ laplace Laplace distribution.
78
+ logistic Logistic distribution.
79
+ lognormal Log-normal distribution.
80
+ logseries Logarithmic series distribution.
81
+ negative_binomial Negative binomial distribution.
82
+ noncentral_chisquare Non-central chi-square distribution.
83
+ noncentral_f Non-central F distribution.
84
+ normal Normal / Gaussian distribution.
85
+ pareto Pareto distribution.
86
+ poisson Poisson distribution.
87
+ power Power distribution.
88
+ rayleigh Rayleigh distribution.
89
+ triangular Triangular distribution.
90
+ uniform Uniform distribution.
91
+ vonmises Von Mises circular distribution.
92
+ wald Wald (inverse Gaussian) distribution.
93
+ weibull Weibull distribution.
94
+ zipf Zipf's distribution over ranked data.
95
+ ==================== =========================================================
96
+
97
+ ==================== ==========================================================
98
+ Multivariate
99
+ distributions
100
+ -------------------- ----------------------------------------------------------
101
+ dirichlet Multivariate generalization of Beta distribution.
102
+ multinomial Multivariate generalization of the binomial distribution.
103
+ multivariate_normal Multivariate generalization of the normal distribution.
104
+ ==================== ==========================================================
105
+
106
+ ==================== =========================================================
107
+ Standard
108
+ distributions
109
+ -------------------- ---------------------------------------------------------
110
+ standard_cauchy Standard Cauchy-Lorentz distribution.
111
+ standard_exponential Standard exponential distribution.
112
+ standard_gamma Standard Gamma distribution.
113
+ standard_normal Standard normal distribution.
114
+ standard_t Standard Student's t-distribution.
115
+ ==================== =========================================================
116
+
117
+ ==================== =========================================================
118
+ Internal functions
119
+ -------------------- ---------------------------------------------------------
120
+ get_state Get tuple representing internal state of generator.
121
+ set_state Set state of generator.
122
+ ==================== =========================================================
123
+
124
+
125
+ """
126
+ __all__ = [
127
+ 'beta',
128
+ 'binomial',
129
+ 'bytes',
130
+ 'chisquare',
131
+ 'choice',
132
+ 'dirichlet',
133
+ 'exponential',
134
+ 'f',
135
+ 'gamma',
136
+ 'geometric',
137
+ 'get_state',
138
+ 'gumbel',
139
+ 'hypergeometric',
140
+ 'laplace',
141
+ 'logistic',
142
+ 'lognormal',
143
+ 'logseries',
144
+ 'multinomial',
145
+ 'multivariate_normal',
146
+ 'negative_binomial',
147
+ 'noncentral_chisquare',
148
+ 'noncentral_f',
149
+ 'normal',
150
+ 'pareto',
151
+ 'permutation',
152
+ 'poisson',
153
+ 'power',
154
+ 'rand',
155
+ 'randint',
156
+ 'randn',
157
+ 'random',
158
+ 'random_integers',
159
+ 'random_sample',
160
+ 'ranf',
161
+ 'rayleigh',
162
+ 'sample',
163
+ 'seed',
164
+ 'set_state',
165
+ 'shuffle',
166
+ 'standard_cauchy',
167
+ 'standard_exponential',
168
+ 'standard_gamma',
169
+ 'standard_normal',
170
+ 'standard_t',
171
+ 'triangular',
172
+ 'uniform',
173
+ 'vonmises',
174
+ 'wald',
175
+ 'weibull',
176
+ 'zipf',
177
+ ]
178
+
179
+ # add these for module-freeze analysis (like PyInstaller)
180
+ from . import _pickle
181
+ from . import _common
182
+ from . import _bounded_integers
183
+
184
+ from ._generator import Generator, default_rng
185
+ from .bit_generator import SeedSequence, BitGenerator
186
+ from ._mt19937 import MT19937
187
+ from ._pcg64 import PCG64, PCG64DXSM
188
+ from ._philox import Philox
189
+ from ._sfc64 import SFC64
190
+ from .mtrand import *
191
+
192
+ __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
193
+ 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
194
+ 'BitGenerator']
195
+
196
+
197
+ def __RandomState_ctor():
198
+ """Return a RandomState instance.
199
+
200
+ This function exists solely to assist (un)pickling.
201
+
202
+ Note that the state of the RandomState returned here is irrelevant, as this
203
+ function's entire purpose is to return a newly allocated RandomState whose
204
+ state pickle can set. Consequently the RandomState returned by this function
205
+ is a freshly allocated copy with a seed=0.
206
+
207
+ See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
208
+
209
+ """
210
+ return RandomState(seed=0)
211
+
212
+
213
+ from numpy._pytesttester import PytestTester
214
+ test = PytestTester(__name__)
215
+ del PytestTester
venv/lib/python3.10/site-packages/numpy/random/__init__.pyi ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from numpy.random._generator import Generator as Generator
4
+ from numpy.random._generator import default_rng as default_rng
5
+ from numpy.random._mt19937 import MT19937 as MT19937
6
+ from numpy.random._pcg64 import (
7
+ PCG64 as PCG64,
8
+ PCG64DXSM as PCG64DXSM,
9
+ )
10
+ from numpy.random._philox import Philox as Philox
11
+ from numpy.random._sfc64 import SFC64 as SFC64
12
+ from numpy.random.bit_generator import BitGenerator as BitGenerator
13
+ from numpy.random.bit_generator import SeedSequence as SeedSequence
14
+ from numpy.random.mtrand import (
15
+ RandomState as RandomState,
16
+ beta as beta,
17
+ binomial as binomial,
18
+ bytes as bytes,
19
+ chisquare as chisquare,
20
+ choice as choice,
21
+ dirichlet as dirichlet,
22
+ exponential as exponential,
23
+ f as f,
24
+ gamma as gamma,
25
+ geometric as geometric,
26
+ get_bit_generator as get_bit_generator,
27
+ get_state as get_state,
28
+ gumbel as gumbel,
29
+ hypergeometric as hypergeometric,
30
+ laplace as laplace,
31
+ logistic as logistic,
32
+ lognormal as lognormal,
33
+ logseries as logseries,
34
+ multinomial as multinomial,
35
+ multivariate_normal as multivariate_normal,
36
+ negative_binomial as negative_binomial,
37
+ noncentral_chisquare as noncentral_chisquare,
38
+ noncentral_f as noncentral_f,
39
+ normal as normal,
40
+ pareto as pareto,
41
+ permutation as permutation,
42
+ poisson as poisson,
43
+ power as power,
44
+ rand as rand,
45
+ randint as randint,
46
+ randn as randn,
47
+ random as random,
48
+ random_integers as random_integers,
49
+ random_sample as random_sample,
50
+ ranf as ranf,
51
+ rayleigh as rayleigh,
52
+ sample as sample,
53
+ seed as seed,
54
+ set_bit_generator as set_bit_generator,
55
+ set_state as set_state,
56
+ shuffle as shuffle,
57
+ standard_cauchy as standard_cauchy,
58
+ standard_exponential as standard_exponential,
59
+ standard_gamma as standard_gamma,
60
+ standard_normal as standard_normal,
61
+ standard_t as standard_t,
62
+ triangular as triangular,
63
+ uniform as uniform,
64
+ vonmises as vonmises,
65
+ wald as wald,
66
+ weibull as weibull,
67
+ zipf as zipf,
68
+ )
69
+
70
+ __all__: list[str]
71
+ __path__: list[str]
72
+ test: PytestTester
venv/lib/python3.10/site-packages/numpy/random/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.43 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/__pycache__/_pickle.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_bounded_integers.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (379 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_bounded_integers.pxd ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
2
+ int8_t, int16_t, int32_t, int64_t, intptr_t)
3
+ import numpy as np
4
+ cimport numpy as np
5
+ ctypedef np.npy_bool bool_t
6
+
7
+ from numpy.random cimport bitgen_t
8
+
9
+ cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
10
+ """Mask generator for use in bounded random numbers"""
11
+ # Smallest bit mask >= max
12
+ cdef uint64_t mask = max_val
13
+ mask |= mask >> 1
14
+ mask |= mask >> 2
15
+ mask |= mask >> 4
16
+ mask |= mask >> 8
17
+ mask |= mask >> 16
18
+ mask |= mask >> 32
19
+ return mask
20
+
21
+ cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
22
+ cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
23
+ cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
24
+ cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
25
+ cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
26
+ cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
27
+ cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
28
+ cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
29
+ cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
venv/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (276 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_common.pxd ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #cython: language_level=3
2
+
3
+ from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
4
+
5
+ import numpy as np
6
+ cimport numpy as np
7
+
8
+ from numpy.random cimport bitgen_t
9
+
10
+ cdef double POISSON_LAM_MAX
11
+ cdef double LEGACY_POISSON_LAM_MAX
12
+ cdef uint64_t MAXSIZE
13
+
14
+ cdef enum ConstraintType:
15
+ CONS_NONE
16
+ CONS_NON_NEGATIVE
17
+ CONS_POSITIVE
18
+ CONS_POSITIVE_NOT_NAN
19
+ CONS_BOUNDED_0_1
20
+ CONS_BOUNDED_GT_0_1
21
+ CONS_BOUNDED_LT_0_1
22
+ CONS_GT_1
23
+ CONS_GTE_1
24
+ CONS_POISSON
25
+ LEGACY_CONS_POISSON
26
+
27
+ ctypedef ConstraintType constraint_type
28
+
29
+ cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
30
+ cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
31
+ cdef object prepare_cffi(bitgen_t *bitgen)
32
+ cdef object prepare_ctypes(bitgen_t *bitgen)
33
+ cdef int check_constraint(double val, object name, constraint_type cons) except -1
34
+ cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
35
+
36
+ cdef extern from "include/aligned_malloc.h":
37
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
38
+ cdef void *PyArray_malloc_aligned(size_t n)
39
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
40
+ cdef void PyArray_free_aligned(void *p)
41
+
42
+ ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil
43
+ ctypedef double (*random_double_0)(void *state) noexcept nogil
44
+ ctypedef double (*random_double_1)(void *state, double a) noexcept nogil
45
+ ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil
46
+ ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil
47
+
48
+ ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil
49
+ ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil
50
+ ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil
51
+
52
+ ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil
53
+ ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil
54
+ ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil
55
+ ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil
56
+ ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil
57
+ ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil
58
+
59
+ ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil
60
+ ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil
61
+
62
+ ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil
63
+ ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil
64
+
65
+ cdef double kahan_sum(double *darr, np.npy_intp n) noexcept
66
+
67
+ cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil:
68
+ return (rnd >> 11) * (1.0 / 9007199254740992.0)
69
+
70
+ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
71
+
72
+ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
73
+
74
+ cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
75
+
76
+ cdef object wrap_int(object val, object bits)
77
+
78
+ cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
79
+
80
+ cdef validate_output_shape(iter_shape, np.ndarray output)
81
+
82
+ cdef object cont(void *func, void *state, object size, object lock, int narg,
83
+ object a, object a_name, constraint_type a_constraint,
84
+ object b, object b_name, constraint_type b_constraint,
85
+ object c, object c_name, constraint_type c_constraint,
86
+ object out)
87
+
88
+ cdef object disc(void *func, void *state, object size, object lock,
89
+ int narg_double, int narg_int64,
90
+ object a, object a_name, constraint_type a_constraint,
91
+ object b, object b_name, constraint_type b_constraint,
92
+ object c, object c_name, constraint_type c_constraint)
93
+
94
+ cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
95
+ object a, object a_name, constraint_type a_constraint,
96
+ object out)
97
+
98
+ cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
99
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
100
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
101
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
102
+
103
+ cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
104
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
105
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
106
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-310.pyc ADDED
Binary file (943 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/extending.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Use cffi to access any of the underlying C functions from distributions.h
3
+ """
4
+ import os
5
+ import numpy as np
6
+ import cffi
7
+ from .parse import parse_distributions_h
8
+ ffi = cffi.FFI()
9
+
10
+ inc_dir = os.path.join(np.get_include(), 'numpy')
11
+
12
+ # Basic numpy types
13
+ ffi.cdef('''
14
+ typedef intptr_t npy_intp;
15
+ typedef unsigned char npy_bool;
16
+
17
+ ''')
18
+
19
+ parse_distributions_h(ffi, inc_dir)
20
+
21
+ lib = ffi.dlopen(np.random._generator.__file__)
22
+
23
+ # Compare the distributions.h random_standard_normal_fill to
24
+ # Generator.standard_random
25
+ bit_gen = np.random.PCG64()
26
+ rng = np.random.Generator(bit_gen)
27
+ state = bit_gen.state
28
+
29
+ interface = rng.bit_generator.cffi
30
+ n = 100
31
+ vals_cffi = ffi.new('double[%d]' % n)
32
+ lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
33
+
34
+ # reset the state
35
+ bit_gen.state = state
36
+
37
+ vals = rng.standard_normal(n)
38
+
39
+ for i in range(n):
40
+ assert vals[i] == vals_cffi[i]
venv/lib/python3.10/site-packages/numpy/random/_examples/cffi/parse.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def parse_distributions_h(ffi, inc_dir):
5
+ """
6
+ Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
7
+
8
+ Read the function declarations without the "#define ..." macros that will
9
+ be filled in when loading the library.
10
+ """
11
+
12
+ with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
13
+ s = []
14
+ for line in fid:
15
+ # massage the include file
16
+ if line.strip().startswith('#'):
17
+ continue
18
+ s.append(line)
19
+ ffi.cdef('\n'.join(s))
20
+
21
+ with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
22
+ s = []
23
+ in_skip = 0
24
+ ignoring = False
25
+ for line in fid:
26
+ # check for and remove extern "C" guards
27
+ if ignoring:
28
+ if line.strip().startswith('#endif'):
29
+ ignoring = False
30
+ continue
31
+ if line.strip().startswith('#ifdef __cplusplus'):
32
+ ignoring = True
33
+
34
+ # massage the include file
35
+ if line.strip().startswith('#'):
36
+ continue
37
+
38
+ # skip any inlined function definition
39
+ # which starts with 'static inline xxx(...) {'
40
+ # and ends with a closing '}'
41
+ if line.strip().startswith('static inline'):
42
+ in_skip += line.count('{')
43
+ continue
44
+ elif in_skip > 0:
45
+ in_skip += line.count('{')
46
+ in_skip -= line.count('}')
47
+ continue
48
+
49
+ # replace defines with their value or remove them
50
+ line = line.replace('DECLDIR', '')
51
+ line = line.replace('RAND_INT_TYPE', 'int64_t')
52
+ s.append(line)
53
+ ffi.cdef('\n'.join(s))
54
+
venv/lib/python3.10/site-packages/numpy/random/_examples/cython/extending.pyx ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #cython: language_level=3
3
+
4
+ from libc.stdint cimport uint32_t
5
+ from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
6
+
7
+ import numpy as np
8
+ cimport numpy as np
9
+ cimport cython
10
+
11
+ from numpy.random cimport bitgen_t
12
+ from numpy.random import PCG64
13
+
14
+ np.import_array()
15
+
16
+
17
+ @cython.boundscheck(False)
18
+ @cython.wraparound(False)
19
+ def uniform_mean(Py_ssize_t n):
20
+ cdef Py_ssize_t i
21
+ cdef bitgen_t *rng
22
+ cdef const char *capsule_name = "BitGenerator"
23
+ cdef double[::1] random_values
24
+ cdef np.ndarray randoms
25
+
26
+ x = PCG64()
27
+ capsule = x.capsule
28
+ if not PyCapsule_IsValid(capsule, capsule_name):
29
+ raise ValueError("Invalid pointer to anon_func_state")
30
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
31
+ random_values = np.empty(n)
32
+ # Best practice is to acquire the lock whenever generating random values.
33
+ # This prevents other threads from modifying the state. Acquiring the lock
34
+ # is only necessary if the GIL is also released, as in this example.
35
+ with x.lock, nogil:
36
+ for i in range(n):
37
+ random_values[i] = rng.next_double(rng.state)
38
+ randoms = np.asarray(random_values)
39
+ return randoms.mean()
40
+
41
+
42
+ # This function is declared nogil so it can be used without the GIL below
43
+ cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
44
+ cdef uint32_t mask, delta, val
45
+ mask = delta = ub - lb
46
+ mask |= mask >> 1
47
+ mask |= mask >> 2
48
+ mask |= mask >> 4
49
+ mask |= mask >> 8
50
+ mask |= mask >> 16
51
+
52
+ val = rng.next_uint32(rng.state) & mask
53
+ while val > delta:
54
+ val = rng.next_uint32(rng.state) & mask
55
+
56
+ return lb + val
57
+
58
+
59
+ @cython.boundscheck(False)
60
+ @cython.wraparound(False)
61
+ def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
62
+ cdef Py_ssize_t i
63
+ cdef bitgen_t *rng
64
+ cdef uint32_t[::1] out
65
+ cdef const char *capsule_name = "BitGenerator"
66
+
67
+ x = PCG64()
68
+ out = np.empty(n, dtype=np.uint32)
69
+ capsule = x.capsule
70
+
71
+ if not PyCapsule_IsValid(capsule, capsule_name):
72
+ raise ValueError("Invalid pointer to anon_func_state")
73
+ rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
74
+
75
+ with x.lock, nogil:
76
+ for i in range(n):
77
+ out[i] = bounded_uint(lb, ub, rng)
78
+ return np.asarray(out)
venv/lib/python3.10/site-packages/numpy/random/_examples/cython/extending_distributions.pyx ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #cython: language_level=3
3
+ """
4
+ This file shows how the to use a BitGenerator to create a distribution.
5
+ """
6
+ import numpy as np
7
+ cimport numpy as np
8
+ cimport cython
9
+ from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
10
+ from libc.stdint cimport uint16_t, uint64_t
11
+ from numpy.random cimport bitgen_t
12
+ from numpy.random import PCG64
13
+ from numpy.random.c_distributions cimport (
14
+ random_standard_uniform_fill, random_standard_uniform_fill_f)
15
+
16
+
17
+ @cython.boundscheck(False)
18
+ @cython.wraparound(False)
19
+ def uniforms(Py_ssize_t n):
20
+ """
21
+ Create an array of `n` uniformly distributed doubles.
22
+ A 'real' distribution would want to process the values into
23
+ some non-uniform distribution
24
+ """
25
+ cdef Py_ssize_t i
26
+ cdef bitgen_t *rng
27
+ cdef const char *capsule_name = "BitGenerator"
28
+ cdef double[::1] random_values
29
+
30
+ x = PCG64()
31
+ capsule = x.capsule
32
+ # Optional check that the capsule if from a BitGenerator
33
+ if not PyCapsule_IsValid(capsule, capsule_name):
34
+ raise ValueError("Invalid pointer to anon_func_state")
35
+ # Cast the pointer
36
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
37
+ random_values = np.empty(n, dtype='float64')
38
+ with x.lock, nogil:
39
+ for i in range(n):
40
+ # Call the function
41
+ random_values[i] = rng.next_double(rng.state)
42
+ randoms = np.asarray(random_values)
43
+
44
+ return randoms
45
+
46
+ # cython example 2
47
+ @cython.boundscheck(False)
48
+ @cython.wraparound(False)
49
+ def uint10_uniforms(Py_ssize_t n):
50
+ """Uniform 10 bit integers stored as 16-bit unsigned integers"""
51
+ cdef Py_ssize_t i
52
+ cdef bitgen_t *rng
53
+ cdef const char *capsule_name = "BitGenerator"
54
+ cdef uint16_t[::1] random_values
55
+ cdef int bits_remaining
56
+ cdef int width = 10
57
+ cdef uint64_t buff, mask = 0x3FF
58
+
59
+ x = PCG64()
60
+ capsule = x.capsule
61
+ if not PyCapsule_IsValid(capsule, capsule_name):
62
+ raise ValueError("Invalid pointer to anon_func_state")
63
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
64
+ random_values = np.empty(n, dtype='uint16')
65
+ # Best practice is to release GIL and acquire the lock
66
+ bits_remaining = 0
67
+ with x.lock, nogil:
68
+ for i in range(n):
69
+ if bits_remaining < width:
70
+ buff = rng.next_uint64(rng.state)
71
+ random_values[i] = buff & mask
72
+ buff >>= width
73
+
74
+ randoms = np.asarray(random_values)
75
+ return randoms
76
+
77
+ # cython example 3
78
+ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
79
+ """
80
+ Create an array of `n` uniformly distributed doubles via a "fill" function.
81
+
82
+ A 'real' distribution would want to process the values into
83
+ some non-uniform distribution
84
+
85
+ Parameters
86
+ ----------
87
+ bit_generator: BitGenerator instance
88
+ n: int
89
+ Output vector length
90
+ dtype: {str, dtype}, optional
91
+ Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
92
+ default dtype value is 'd'
93
+ """
94
+ cdef Py_ssize_t i
95
+ cdef bitgen_t *rng
96
+ cdef const char *capsule_name = "BitGenerator"
97
+ cdef np.ndarray randoms
98
+
99
+ capsule = bit_generator.capsule
100
+ # Optional check that the capsule if from a BitGenerator
101
+ if not PyCapsule_IsValid(capsule, capsule_name):
102
+ raise ValueError("Invalid pointer to anon_func_state")
103
+ # Cast the pointer
104
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
105
+
106
+ _dtype = np.dtype(dtype)
107
+ randoms = np.empty(n, dtype=_dtype)
108
+ if _dtype == np.float32:
109
+ with bit_generator.lock:
110
+ random_standard_uniform_fill_f(rng, n, <float*>np.PyArray_DATA(randoms))
111
+ elif _dtype == np.float64:
112
+ with bit_generator.lock:
113
+ random_standard_uniform_fill(rng, n, <double*>np.PyArray_DATA(randoms))
114
+ else:
115
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
116
+ return randoms
117
+
venv/lib/python3.10/site-packages/numpy/random/_examples/cython/meson.build ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ project('random-build-examples', 'c', 'cpp', 'cython')
2
+
3
+ py_mod = import('python')
4
+ py3 = py_mod.find_installation(pure: false)
5
+
6
+ cc = meson.get_compiler('c')
7
+ cy = meson.get_compiler('cython')
8
+
9
+ if not cy.version().version_compare('>=0.29.35')
10
+ error('tests requires Cython >= 0.29.35')
11
+ endif
12
+
13
+ _numpy_abs = run_command(py3, ['-c',
14
+ 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'],
15
+ check: true).stdout().strip()
16
+
17
+ npymath_path = _numpy_abs / 'core' / 'lib'
18
+ npy_include_path = _numpy_abs / 'core' / 'include'
19
+ npyrandom_path = _numpy_abs / 'random' / 'lib'
20
+ npymath_lib = cc.find_library('npymath', dirs: npymath_path)
21
+ npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path)
22
+
23
+ py3.extension_module(
24
+ 'extending_distributions',
25
+ 'extending_distributions.pyx',
26
+ install: false,
27
+ include_directories: [npy_include_path],
28
+ dependencies: [npyrandom_lib, npymath_lib],
29
+ )
30
+ py3.extension_module(
31
+ 'extending',
32
+ 'extending.pyx',
33
+ install: false,
34
+ include_directories: [npy_include_path],
35
+ dependencies: [npyrandom_lib, npymath_lib],
36
+ )
37
+ py3.extension_module(
38
+ 'extending_cpp',
39
+ 'extending_distributions.pyx',
40
+ install: false,
41
+ override_options : ['cython_language=cpp'],
42
+ cython_args: ['--module-name', 'extending_cpp'],
43
+ include_directories: [npy_include_path],
44
+ dependencies: [npyrandom_lib, npymath_lib],
45
+ )
venv/lib/python3.10/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_examples/numba/extending.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import numba as nb
3
+
4
+ from numpy.random import PCG64
5
+ from timeit import timeit
6
+
7
+ bit_gen = PCG64()
8
+ next_d = bit_gen.cffi.next_double
9
+ state_addr = bit_gen.cffi.state_address
10
+
11
+ def normals(n, state):
12
+ out = np.empty(n)
13
+ for i in range((n + 1) // 2):
14
+ x1 = 2.0 * next_d(state) - 1.0
15
+ x2 = 2.0 * next_d(state) - 1.0
16
+ r2 = x1 * x1 + x2 * x2
17
+ while r2 >= 1.0 or r2 == 0.0:
18
+ x1 = 2.0 * next_d(state) - 1.0
19
+ x2 = 2.0 * next_d(state) - 1.0
20
+ r2 = x1 * x1 + x2 * x2
21
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
22
+ out[2 * i] = f * x1
23
+ if 2 * i + 1 < n:
24
+ out[2 * i + 1] = f * x2
25
+ return out
26
+
27
+ # Compile using Numba
28
+ normalsj = nb.jit(normals, nopython=True)
29
+ # Must use state address not state with numba
30
+ n = 10000
31
+
32
+ def numbacall():
33
+ return normalsj(n, state_addr)
34
+
35
+ rg = np.random.Generator(PCG64())
36
+
37
+ def numpycall():
38
+ return rg.normal(size=n)
39
+
40
+ # Check that the functions work
41
+ r1 = numbacall()
42
+ r2 = numpycall()
43
+ assert r1.shape == (n,)
44
+ assert r1.shape == r2.shape
45
+
46
+ t1 = timeit(numbacall, number=1000)
47
+ print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms')
48
+ t2 = timeit(numpycall, number=1000)
49
+ print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms')
50
+
51
+ # example 2
52
+
53
+ next_u32 = bit_gen.ctypes.next_uint32
54
+ ctypes_state = bit_gen.ctypes.state
55
+
56
+ @nb.jit(nopython=True)
57
+ def bounded_uint(lb, ub, state):
58
+ mask = delta = ub - lb
59
+ mask |= mask >> 1
60
+ mask |= mask >> 2
61
+ mask |= mask >> 4
62
+ mask |= mask >> 8
63
+ mask |= mask >> 16
64
+
65
+ val = next_u32(state) & mask
66
+ while val > delta:
67
+ val = next_u32(state) & mask
68
+
69
+ return lb + val
70
+
71
+
72
+ print(bounded_uint(323, 2394691, ctypes_state.value))
73
+
74
+
75
+ @nb.jit(nopython=True)
76
+ def bounded_uints(lb, ub, n, state):
77
+ out = np.empty(n, dtype=np.uint32)
78
+ for i in range(n):
79
+ out[i] = bounded_uint(lb, ub, state)
80
+
81
+
82
+ bounded_uints(323, 2394691, 10000000, ctypes_state.value)
83
+
84
+
venv/lib/python3.10/site-packages/numpy/random/_examples/numba/extending_distributions.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ Building the required library in this example requires a source distribution
3
+ of NumPy or clone of the NumPy git repository since distributions.c is not
4
+ included in binary distributions.
5
+
6
+ On *nix, execute in numpy/random/src/distributions
7
+
8
+ export ${PYTHON_VERSION}=3.8 # Python version
9
+ export PYTHON_INCLUDE=#path to Python's include folder, usually \
10
+ ${PYTHON_HOME}/include/python${PYTHON_VERSION}m
11
+ export NUMPY_INCLUDE=#path to numpy's include folder, usually \
12
+ ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
13
+ gcc -shared -o libdistributions.so -fPIC distributions.c \
14
+ -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
15
+ mv libdistributions.so ../../_examples/numba/
16
+
17
+ On Windows
18
+
19
+ rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
20
+ set PYTHON_HOME=c:\Anaconda
21
+ set PYTHON_VERSION=38
22
+ cl.exe /LD .\distributions.c -DDLL_EXPORT \
23
+ -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
24
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
25
+ move distributions.dll ../../_examples/numba/
26
+ """
27
+ import os
28
+
29
+ import numba as nb
30
+ import numpy as np
31
+ from cffi import FFI
32
+
33
+ from numpy.random import PCG64
34
+
35
+ ffi = FFI()
36
+ if os.path.exists('./distributions.dll'):
37
+ lib = ffi.dlopen('./distributions.dll')
38
+ elif os.path.exists('./libdistributions.so'):
39
+ lib = ffi.dlopen('./libdistributions.so')
40
+ else:
41
+ raise RuntimeError('Required DLL/so file was not found.')
42
+
43
+ ffi.cdef("""
44
+ double random_standard_normal(void *bitgen_state);
45
+ """)
46
+ x = PCG64()
47
+ xffi = x.cffi
48
+ bit_generator = xffi.bit_generator
49
+
50
+ random_standard_normal = lib.random_standard_normal
51
+
52
+
53
+ def normals(n, bit_generator):
54
+ out = np.empty(n)
55
+ for i in range(n):
56
+ out[i] = random_standard_normal(bit_generator)
57
+ return out
58
+
59
+
60
+ normalsj = nb.jit(normals, nopython=True)
61
+
62
+ # Numba requires a memory address for void *
63
+ # Can also get address from x.ctypes.bit_generator.value
64
+ bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
65
+
66
+ norm = normalsj(1000, bit_generator_address)
67
+ print(norm[:12])
venv/lib/python3.10/site-packages/numpy/random/_generator.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (976 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_generator.pyi ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, Union, overload, TypeVar, Literal
3
+
4
+ from numpy import (
5
+ bool_,
6
+ dtype,
7
+ float32,
8
+ float64,
9
+ int8,
10
+ int16,
11
+ int32,
12
+ int64,
13
+ int_,
14
+ ndarray,
15
+ uint,
16
+ uint8,
17
+ uint16,
18
+ uint32,
19
+ uint64,
20
+ )
21
+ from numpy.random import BitGenerator, SeedSequence
22
+ from numpy._typing import (
23
+ ArrayLike,
24
+ _ArrayLikeFloat_co,
25
+ _ArrayLikeInt_co,
26
+ _DoubleCodes,
27
+ _DTypeLikeBool,
28
+ _DTypeLikeInt,
29
+ _DTypeLikeUInt,
30
+ _Float32Codes,
31
+ _Float64Codes,
32
+ _FloatLike_co,
33
+ _Int8Codes,
34
+ _Int16Codes,
35
+ _Int32Codes,
36
+ _Int64Codes,
37
+ _IntCodes,
38
+ _ShapeLike,
39
+ _SingleCodes,
40
+ _SupportsDType,
41
+ _UInt8Codes,
42
+ _UInt16Codes,
43
+ _UInt32Codes,
44
+ _UInt64Codes,
45
+ _UIntCodes,
46
+ )
47
+
48
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
49
+
50
+ _DTypeLikeFloat32 = Union[
51
+ dtype[float32],
52
+ _SupportsDType[dtype[float32]],
53
+ type[float32],
54
+ _Float32Codes,
55
+ _SingleCodes,
56
+ ]
57
+
58
+ _DTypeLikeFloat64 = Union[
59
+ dtype[float64],
60
+ _SupportsDType[dtype[float64]],
61
+ type[float],
62
+ type[float64],
63
+ _Float64Codes,
64
+ _DoubleCodes,
65
+ ]
66
+
67
+ class Generator:
68
+ def __init__(self, bit_generator: BitGenerator) -> None: ...
69
+ def __repr__(self) -> str: ...
70
+ def __str__(self) -> str: ...
71
+ def __getstate__(self) -> dict[str, Any]: ...
72
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
73
+ def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ...
74
+ @property
75
+ def bit_generator(self) -> BitGenerator: ...
76
+ def spawn(self, n_children: int) -> list[Generator]: ...
77
+ def bytes(self, length: int) -> bytes: ...
78
+ @overload
79
+ def standard_normal( # type: ignore[misc]
80
+ self,
81
+ size: None = ...,
82
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
83
+ out: None = ...,
84
+ ) -> float: ...
85
+ @overload
86
+ def standard_normal( # type: ignore[misc]
87
+ self,
88
+ size: _ShapeLike = ...,
89
+ ) -> ndarray[Any, dtype[float64]]: ...
90
+ @overload
91
+ def standard_normal( # type: ignore[misc]
92
+ self,
93
+ *,
94
+ out: ndarray[Any, dtype[float64]] = ...,
95
+ ) -> ndarray[Any, dtype[float64]]: ...
96
+ @overload
97
+ def standard_normal( # type: ignore[misc]
98
+ self,
99
+ size: _ShapeLike = ...,
100
+ dtype: _DTypeLikeFloat32 = ...,
101
+ out: None | ndarray[Any, dtype[float32]] = ...,
102
+ ) -> ndarray[Any, dtype[float32]]: ...
103
+ @overload
104
+ def standard_normal( # type: ignore[misc]
105
+ self,
106
+ size: _ShapeLike = ...,
107
+ dtype: _DTypeLikeFloat64 = ...,
108
+ out: None | ndarray[Any, dtype[float64]] = ...,
109
+ ) -> ndarray[Any, dtype[float64]]: ...
110
+ @overload
111
+ def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ...
112
+ @overload
113
+ def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ...
114
+ @overload
115
+ def standard_exponential( # type: ignore[misc]
116
+ self,
117
+ size: None = ...,
118
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
119
+ method: Literal["zig", "inv"] = ...,
120
+ out: None = ...,
121
+ ) -> float: ...
122
+ @overload
123
+ def standard_exponential(
124
+ self,
125
+ size: _ShapeLike = ...,
126
+ ) -> ndarray[Any, dtype[float64]]: ...
127
+ @overload
128
+ def standard_exponential(
129
+ self,
130
+ *,
131
+ out: ndarray[Any, dtype[float64]] = ...,
132
+ ) -> ndarray[Any, dtype[float64]]: ...
133
+ @overload
134
+ def standard_exponential(
135
+ self,
136
+ size: _ShapeLike = ...,
137
+ *,
138
+ method: Literal["zig", "inv"] = ...,
139
+ out: None | ndarray[Any, dtype[float64]] = ...,
140
+ ) -> ndarray[Any, dtype[float64]]: ...
141
+ @overload
142
+ def standard_exponential(
143
+ self,
144
+ size: _ShapeLike = ...,
145
+ dtype: _DTypeLikeFloat32 = ...,
146
+ method: Literal["zig", "inv"] = ...,
147
+ out: None | ndarray[Any, dtype[float32]] = ...,
148
+ ) -> ndarray[Any, dtype[float32]]: ...
149
+ @overload
150
+ def standard_exponential(
151
+ self,
152
+ size: _ShapeLike = ...,
153
+ dtype: _DTypeLikeFloat64 = ...,
154
+ method: Literal["zig", "inv"] = ...,
155
+ out: None | ndarray[Any, dtype[float64]] = ...,
156
+ ) -> ndarray[Any, dtype[float64]]: ...
157
+ @overload
158
+ def random( # type: ignore[misc]
159
+ self,
160
+ size: None = ...,
161
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
162
+ out: None = ...,
163
+ ) -> float: ...
164
+ @overload
165
+ def random(
166
+ self,
167
+ *,
168
+ out: ndarray[Any, dtype[float64]] = ...,
169
+ ) -> ndarray[Any, dtype[float64]]: ...
170
+ @overload
171
+ def random(
172
+ self,
173
+ size: _ShapeLike = ...,
174
+ *,
175
+ out: None | ndarray[Any, dtype[float64]] = ...,
176
+ ) -> ndarray[Any, dtype[float64]]: ...
177
+ @overload
178
+ def random(
179
+ self,
180
+ size: _ShapeLike = ...,
181
+ dtype: _DTypeLikeFloat32 = ...,
182
+ out: None | ndarray[Any, dtype[float32]] = ...,
183
+ ) -> ndarray[Any, dtype[float32]]: ...
184
+ @overload
185
+ def random(
186
+ self,
187
+ size: _ShapeLike = ...,
188
+ dtype: _DTypeLikeFloat64 = ...,
189
+ out: None | ndarray[Any, dtype[float64]] = ...,
190
+ ) -> ndarray[Any, dtype[float64]]: ...
191
+ @overload
192
+ def beta(
193
+ self,
194
+ a: _FloatLike_co,
195
+ b: _FloatLike_co,
196
+ size: None = ...,
197
+ ) -> float: ... # type: ignore[misc]
198
+ @overload
199
+ def beta(
200
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
201
+ ) -> ndarray[Any, dtype[float64]]: ...
202
+ @overload
203
+ def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
204
+ @overload
205
+ def exponential(
206
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
207
+ ) -> ndarray[Any, dtype[float64]]: ...
208
+ @overload
209
+ def integers( # type: ignore[misc]
210
+ self,
211
+ low: int,
212
+ high: None | int = ...,
213
+ ) -> int: ...
214
+ @overload
215
+ def integers( # type: ignore[misc]
216
+ self,
217
+ low: int,
218
+ high: None | int = ...,
219
+ size: None = ...,
220
+ dtype: _DTypeLikeBool = ...,
221
+ endpoint: bool = ...,
222
+ ) -> bool: ...
223
+ @overload
224
+ def integers( # type: ignore[misc]
225
+ self,
226
+ low: int,
227
+ high: None | int = ...,
228
+ size: None = ...,
229
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
230
+ endpoint: bool = ...,
231
+ ) -> int: ...
232
+ @overload
233
+ def integers( # type: ignore[misc]
234
+ self,
235
+ low: _ArrayLikeInt_co,
236
+ high: None | _ArrayLikeInt_co = ...,
237
+ size: None | _ShapeLike = ...,
238
+ ) -> ndarray[Any, dtype[int64]]: ...
239
+ @overload
240
+ def integers( # type: ignore[misc]
241
+ self,
242
+ low: _ArrayLikeInt_co,
243
+ high: None | _ArrayLikeInt_co = ...,
244
+ size: None | _ShapeLike = ...,
245
+ dtype: _DTypeLikeBool = ...,
246
+ endpoint: bool = ...,
247
+ ) -> ndarray[Any, dtype[bool_]]: ...
248
+ @overload
249
+ def integers( # type: ignore[misc]
250
+ self,
251
+ low: _ArrayLikeInt_co,
252
+ high: None | _ArrayLikeInt_co = ...,
253
+ size: None | _ShapeLike = ...,
254
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
255
+ endpoint: bool = ...,
256
+ ) -> ndarray[Any, dtype[int8]]: ...
257
+ @overload
258
+ def integers( # type: ignore[misc]
259
+ self,
260
+ low: _ArrayLikeInt_co,
261
+ high: None | _ArrayLikeInt_co = ...,
262
+ size: None | _ShapeLike = ...,
263
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
264
+ endpoint: bool = ...,
265
+ ) -> ndarray[Any, dtype[int16]]: ...
266
+ @overload
267
+ def integers( # type: ignore[misc]
268
+ self,
269
+ low: _ArrayLikeInt_co,
270
+ high: None | _ArrayLikeInt_co = ...,
271
+ size: None | _ShapeLike = ...,
272
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
273
+ endpoint: bool = ...,
274
+ ) -> ndarray[Any, dtype[int32]]: ...
275
+ @overload
276
+ def integers( # type: ignore[misc]
277
+ self,
278
+ low: _ArrayLikeInt_co,
279
+ high: None | _ArrayLikeInt_co = ...,
280
+ size: None | _ShapeLike = ...,
281
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
282
+ endpoint: bool = ...,
283
+ ) -> ndarray[Any, dtype[int64]]: ...
284
+ @overload
285
+ def integers( # type: ignore[misc]
286
+ self,
287
+ low: _ArrayLikeInt_co,
288
+ high: None | _ArrayLikeInt_co = ...,
289
+ size: None | _ShapeLike = ...,
290
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
291
+ endpoint: bool = ...,
292
+ ) -> ndarray[Any, dtype[uint8]]: ...
293
+ @overload
294
+ def integers( # type: ignore[misc]
295
+ self,
296
+ low: _ArrayLikeInt_co,
297
+ high: None | _ArrayLikeInt_co = ...,
298
+ size: None | _ShapeLike = ...,
299
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
300
+ endpoint: bool = ...,
301
+ ) -> ndarray[Any, dtype[uint16]]: ...
302
+ @overload
303
+ def integers( # type: ignore[misc]
304
+ self,
305
+ low: _ArrayLikeInt_co,
306
+ high: None | _ArrayLikeInt_co = ...,
307
+ size: None | _ShapeLike = ...,
308
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
309
+ endpoint: bool = ...,
310
+ ) -> ndarray[Any, dtype[uint32]]: ...
311
+ @overload
312
+ def integers( # type: ignore[misc]
313
+ self,
314
+ low: _ArrayLikeInt_co,
315
+ high: None | _ArrayLikeInt_co = ...,
316
+ size: None | _ShapeLike = ...,
317
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
318
+ endpoint: bool = ...,
319
+ ) -> ndarray[Any, dtype[uint64]]: ...
320
+ @overload
321
+ def integers( # type: ignore[misc]
322
+ self,
323
+ low: _ArrayLikeInt_co,
324
+ high: None | _ArrayLikeInt_co = ...,
325
+ size: None | _ShapeLike = ...,
326
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
327
+ endpoint: bool = ...,
328
+ ) -> ndarray[Any, dtype[int_]]: ...
329
+ @overload
330
+ def integers( # type: ignore[misc]
331
+ self,
332
+ low: _ArrayLikeInt_co,
333
+ high: None | _ArrayLikeInt_co = ...,
334
+ size: None | _ShapeLike = ...,
335
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
336
+ endpoint: bool = ...,
337
+ ) -> ndarray[Any, dtype[uint]]: ...
338
+ # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any]
339
+ @overload
340
+ def choice(
341
+ self,
342
+ a: int,
343
+ size: None = ...,
344
+ replace: bool = ...,
345
+ p: None | _ArrayLikeFloat_co = ...,
346
+ axis: int = ...,
347
+ shuffle: bool = ...,
348
+ ) -> int: ...
349
+ @overload
350
+ def choice(
351
+ self,
352
+ a: int,
353
+ size: _ShapeLike = ...,
354
+ replace: bool = ...,
355
+ p: None | _ArrayLikeFloat_co = ...,
356
+ axis: int = ...,
357
+ shuffle: bool = ...,
358
+ ) -> ndarray[Any, dtype[int64]]: ...
359
+ @overload
360
+ def choice(
361
+ self,
362
+ a: ArrayLike,
363
+ size: None = ...,
364
+ replace: bool = ...,
365
+ p: None | _ArrayLikeFloat_co = ...,
366
+ axis: int = ...,
367
+ shuffle: bool = ...,
368
+ ) -> Any: ...
369
+ @overload
370
+ def choice(
371
+ self,
372
+ a: ArrayLike,
373
+ size: _ShapeLike = ...,
374
+ replace: bool = ...,
375
+ p: None | _ArrayLikeFloat_co = ...,
376
+ axis: int = ...,
377
+ shuffle: bool = ...,
378
+ ) -> ndarray[Any, Any]: ...
379
+ @overload
380
+ def uniform(
381
+ self,
382
+ low: _FloatLike_co = ...,
383
+ high: _FloatLike_co = ...,
384
+ size: None = ...,
385
+ ) -> float: ... # type: ignore[misc]
386
+ @overload
387
+ def uniform(
388
+ self,
389
+ low: _ArrayLikeFloat_co = ...,
390
+ high: _ArrayLikeFloat_co = ...,
391
+ size: None | _ShapeLike = ...,
392
+ ) -> ndarray[Any, dtype[float64]]: ...
393
+ @overload
394
+ def normal(
395
+ self,
396
+ loc: _FloatLike_co = ...,
397
+ scale: _FloatLike_co = ...,
398
+ size: None = ...,
399
+ ) -> float: ... # type: ignore[misc]
400
+ @overload
401
+ def normal(
402
+ self,
403
+ loc: _ArrayLikeFloat_co = ...,
404
+ scale: _ArrayLikeFloat_co = ...,
405
+ size: None | _ShapeLike = ...,
406
+ ) -> ndarray[Any, dtype[float64]]: ...
407
+ @overload
408
+ def standard_gamma( # type: ignore[misc]
409
+ self,
410
+ shape: _FloatLike_co,
411
+ size: None = ...,
412
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
413
+ out: None = ...,
414
+ ) -> float: ...
415
+ @overload
416
+ def standard_gamma(
417
+ self,
418
+ shape: _ArrayLikeFloat_co,
419
+ size: None | _ShapeLike = ...,
420
+ ) -> ndarray[Any, dtype[float64]]: ...
421
+ @overload
422
+ def standard_gamma(
423
+ self,
424
+ shape: _ArrayLikeFloat_co,
425
+ *,
426
+ out: ndarray[Any, dtype[float64]] = ...,
427
+ ) -> ndarray[Any, dtype[float64]]: ...
428
+ @overload
429
+ def standard_gamma(
430
+ self,
431
+ shape: _ArrayLikeFloat_co,
432
+ size: None | _ShapeLike = ...,
433
+ dtype: _DTypeLikeFloat32 = ...,
434
+ out: None | ndarray[Any, dtype[float32]] = ...,
435
+ ) -> ndarray[Any, dtype[float32]]: ...
436
+ @overload
437
+ def standard_gamma(
438
+ self,
439
+ shape: _ArrayLikeFloat_co,
440
+ size: None | _ShapeLike = ...,
441
+ dtype: _DTypeLikeFloat64 = ...,
442
+ out: None | ndarray[Any, dtype[float64]] = ...,
443
+ ) -> ndarray[Any, dtype[float64]]: ...
444
+ @overload
445
+ def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
446
+ @overload
447
+ def gamma(
448
+ self,
449
+ shape: _ArrayLikeFloat_co,
450
+ scale: _ArrayLikeFloat_co = ...,
451
+ size: None | _ShapeLike = ...,
452
+ ) -> ndarray[Any, dtype[float64]]: ...
453
+ @overload
454
+ def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
455
+ @overload
456
+ def f(
457
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
458
+ ) -> ndarray[Any, dtype[float64]]: ...
459
+ @overload
460
+ def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
461
+ @overload
462
+ def noncentral_f(
463
+ self,
464
+ dfnum: _ArrayLikeFloat_co,
465
+ dfden: _ArrayLikeFloat_co,
466
+ nonc: _ArrayLikeFloat_co,
467
+ size: None | _ShapeLike = ...,
468
+ ) -> ndarray[Any, dtype[float64]]: ...
469
+ @overload
470
+ def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
471
+ @overload
472
+ def chisquare(
473
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
474
+ ) -> ndarray[Any, dtype[float64]]: ...
475
+ @overload
476
+ def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
477
+ @overload
478
+ def noncentral_chisquare(
479
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
480
+ ) -> ndarray[Any, dtype[float64]]: ...
481
+ @overload
482
+ def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
483
+ @overload
484
+ def standard_t(
485
+ self, df: _ArrayLikeFloat_co, size: None = ...
486
+ ) -> ndarray[Any, dtype[float64]]: ...
487
+ @overload
488
+ def standard_t(
489
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
490
+ ) -> ndarray[Any, dtype[float64]]: ...
491
+ @overload
492
+ def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
493
+ @overload
494
+ def vonmises(
495
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
496
+ ) -> ndarray[Any, dtype[float64]]: ...
497
+ @overload
498
+ def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
499
+ @overload
500
+ def pareto(
501
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
502
+ ) -> ndarray[Any, dtype[float64]]: ...
503
+ @overload
504
+ def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
505
+ @overload
506
+ def weibull(
507
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
508
+ ) -> ndarray[Any, dtype[float64]]: ...
509
+ @overload
510
+ def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
511
+ @overload
512
+ def power(
513
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
514
+ ) -> ndarray[Any, dtype[float64]]: ...
515
+ @overload
516
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
517
+ @overload
518
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
519
+ @overload
520
+ def laplace(
521
+ self,
522
+ loc: _FloatLike_co = ...,
523
+ scale: _FloatLike_co = ...,
524
+ size: None = ...,
525
+ ) -> float: ... # type: ignore[misc]
526
+ @overload
527
+ def laplace(
528
+ self,
529
+ loc: _ArrayLikeFloat_co = ...,
530
+ scale: _ArrayLikeFloat_co = ...,
531
+ size: None | _ShapeLike = ...,
532
+ ) -> ndarray[Any, dtype[float64]]: ...
533
+ @overload
534
+ def gumbel(
535
+ self,
536
+ loc: _FloatLike_co = ...,
537
+ scale: _FloatLike_co = ...,
538
+ size: None = ...,
539
+ ) -> float: ... # type: ignore[misc]
540
+ @overload
541
+ def gumbel(
542
+ self,
543
+ loc: _ArrayLikeFloat_co = ...,
544
+ scale: _ArrayLikeFloat_co = ...,
545
+ size: None | _ShapeLike = ...,
546
+ ) -> ndarray[Any, dtype[float64]]: ...
547
+ @overload
548
+ def logistic(
549
+ self,
550
+ loc: _FloatLike_co = ...,
551
+ scale: _FloatLike_co = ...,
552
+ size: None = ...,
553
+ ) -> float: ... # type: ignore[misc]
554
+ @overload
555
+ def logistic(
556
+ self,
557
+ loc: _ArrayLikeFloat_co = ...,
558
+ scale: _ArrayLikeFloat_co = ...,
559
+ size: None | _ShapeLike = ...,
560
+ ) -> ndarray[Any, dtype[float64]]: ...
561
+ @overload
562
+ def lognormal(
563
+ self,
564
+ mean: _FloatLike_co = ...,
565
+ sigma: _FloatLike_co = ...,
566
+ size: None = ...,
567
+ ) -> float: ... # type: ignore[misc]
568
+ @overload
569
+ def lognormal(
570
+ self,
571
+ mean: _ArrayLikeFloat_co = ...,
572
+ sigma: _ArrayLikeFloat_co = ...,
573
+ size: None | _ShapeLike = ...,
574
+ ) -> ndarray[Any, dtype[float64]]: ...
575
+ @overload
576
+ def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
577
+ @overload
578
+ def rayleigh(
579
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
580
+ ) -> ndarray[Any, dtype[float64]]: ...
581
+ @overload
582
+ def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
583
+ @overload
584
+ def wald(
585
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
586
+ ) -> ndarray[Any, dtype[float64]]: ...
587
+ @overload
588
+ def triangular(
589
+ self,
590
+ left: _FloatLike_co,
591
+ mode: _FloatLike_co,
592
+ right: _FloatLike_co,
593
+ size: None = ...,
594
+ ) -> float: ... # type: ignore[misc]
595
+ @overload
596
+ def triangular(
597
+ self,
598
+ left: _ArrayLikeFloat_co,
599
+ mode: _ArrayLikeFloat_co,
600
+ right: _ArrayLikeFloat_co,
601
+ size: None | _ShapeLike = ...,
602
+ ) -> ndarray[Any, dtype[float64]]: ...
603
+ @overload
604
+ def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
605
+ @overload
606
+ def binomial(
607
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
608
+ ) -> ndarray[Any, dtype[int64]]: ...
609
+ @overload
610
+ def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
611
+ @overload
612
+ def negative_binomial(
613
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
614
+ ) -> ndarray[Any, dtype[int64]]: ...
615
+ @overload
616
+ def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
617
+ @overload
618
+ def poisson(
619
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
620
+ ) -> ndarray[Any, dtype[int64]]: ...
621
+ @overload
622
+ def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
623
+ @overload
624
+ def zipf(
625
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
626
+ ) -> ndarray[Any, dtype[int64]]: ...
627
+ @overload
628
+ def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
629
+ @overload
630
+ def geometric(
631
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
632
+ ) -> ndarray[Any, dtype[int64]]: ...
633
+ @overload
634
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
635
+ @overload
636
+ def hypergeometric(
637
+ self,
638
+ ngood: _ArrayLikeInt_co,
639
+ nbad: _ArrayLikeInt_co,
640
+ nsample: _ArrayLikeInt_co,
641
+ size: None | _ShapeLike = ...,
642
+ ) -> ndarray[Any, dtype[int64]]: ...
643
+ @overload
644
+ def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
645
+ @overload
646
+ def logseries(
647
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
648
+ ) -> ndarray[Any, dtype[int64]]: ...
649
+ def multivariate_normal(
650
+ self,
651
+ mean: _ArrayLikeFloat_co,
652
+ cov: _ArrayLikeFloat_co,
653
+ size: None | _ShapeLike = ...,
654
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
655
+ tol: float = ...,
656
+ *,
657
+ method: Literal["svd", "eigh", "cholesky"] = ...,
658
+ ) -> ndarray[Any, dtype[float64]]: ...
659
+ def multinomial(
660
+ self, n: _ArrayLikeInt_co,
661
+ pvals: _ArrayLikeFloat_co,
662
+ size: None | _ShapeLike = ...
663
+ ) -> ndarray[Any, dtype[int64]]: ...
664
+ def multivariate_hypergeometric(
665
+ self,
666
+ colors: _ArrayLikeInt_co,
667
+ nsample: int,
668
+ size: None | _ShapeLike = ...,
669
+ method: Literal["marginals", "count"] = ...,
670
+ ) -> ndarray[Any, dtype[int64]]: ...
671
+ def dirichlet(
672
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
673
+ ) -> ndarray[Any, dtype[float64]]: ...
674
+ def permuted(
675
+ self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ...
676
+ ) -> ndarray[Any, Any]: ...
677
+ def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
678
+
679
+ def default_rng(
680
+ seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...
681
+ ) -> Generator: ...
venv/lib/python3.10/site-packages/numpy/random/_mt19937.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (120 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_mt19937.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype, ndarray, uint32
4
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
5
+ from numpy._typing import _ArrayLikeInt_co
6
+
7
+ class _MT19937Internal(TypedDict):
8
+ key: ndarray[Any, dtype[uint32]]
9
+ pos: int
10
+
11
+ class _MT19937State(TypedDict):
12
+ bit_generator: str
13
+ state: _MT19937Internal
14
+
15
+ class MT19937(BitGenerator):
16
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
17
+ def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
18
+ def jumped(self, jumps: int = ...) -> MT19937: ...
19
+ @property
20
+ def state(self) -> _MT19937State: ...
21
+ @state.setter
22
+ def state(self, value: _MT19937State) -> None: ...
venv/lib/python3.10/site-packages/numpy/random/_pcg64.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (126 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_pcg64.pyi ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict
2
+
3
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
4
+ from numpy._typing import _ArrayLikeInt_co
5
+
6
+ class _PCG64Internal(TypedDict):
7
+ state: int
8
+ inc: int
9
+
10
+ class _PCG64State(TypedDict):
11
+ bit_generator: str
12
+ state: _PCG64Internal
13
+ has_uint32: int
14
+ uinteger: int
15
+
16
+ class PCG64(BitGenerator):
17
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
18
+ def jumped(self, jumps: int = ...) -> PCG64: ...
19
+ @property
20
+ def state(
21
+ self,
22
+ ) -> _PCG64State: ...
23
+ @state.setter
24
+ def state(
25
+ self,
26
+ value: _PCG64State,
27
+ ) -> None: ...
28
+ def advance(self, delta: int) -> PCG64: ...
29
+
30
+ class PCG64DXSM(BitGenerator):
31
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
32
+ def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
33
+ @property
34
+ def state(
35
+ self,
36
+ ) -> _PCG64State: ...
37
+ @state.setter
38
+ def state(
39
+ self,
40
+ value: _PCG64State,
41
+ ) -> None: ...
42
+ def advance(self, delta: int) -> PCG64DXSM: ...
venv/lib/python3.10/site-packages/numpy/random/_philox.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (107 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_philox.pyi ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype, ndarray, uint64
4
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
5
+ from numpy._typing import _ArrayLikeInt_co
6
+
7
+ class _PhiloxInternal(TypedDict):
8
+ counter: ndarray[Any, dtype[uint64]]
9
+ key: ndarray[Any, dtype[uint64]]
10
+
11
+ class _PhiloxState(TypedDict):
12
+ bit_generator: str
13
+ state: _PhiloxInternal
14
+ buffer: ndarray[Any, dtype[uint64]]
15
+ buffer_pos: int
16
+ has_uint32: int
17
+ uinteger: int
18
+
19
+ class Philox(BitGenerator):
20
+ def __init__(
21
+ self,
22
+ seed: None | _ArrayLikeInt_co | SeedSequence = ...,
23
+ counter: None | _ArrayLikeInt_co = ...,
24
+ key: None | _ArrayLikeInt_co = ...,
25
+ ) -> None: ...
26
+ @property
27
+ def state(
28
+ self,
29
+ ) -> _PhiloxState: ...
30
+ @state.setter
31
+ def state(
32
+ self,
33
+ value: _PhiloxState,
34
+ ) -> None: ...
35
+ def jumped(self, jumps: int = ...) -> Philox: ...
36
+ def advance(self, delta: int) -> Philox: ...
venv/lib/python3.10/site-packages/numpy/random/_pickle.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .mtrand import RandomState
2
+ from ._philox import Philox
3
+ from ._pcg64 import PCG64, PCG64DXSM
4
+ from ._sfc64 import SFC64
5
+
6
+ from ._generator import Generator
7
+ from ._mt19937 import MT19937
8
+
9
+ BitGenerators = {'MT19937': MT19937,
10
+ 'PCG64': PCG64,
11
+ 'PCG64DXSM': PCG64DXSM,
12
+ 'Philox': Philox,
13
+ 'SFC64': SFC64,
14
+ }
15
+
16
+
17
+ def __bit_generator_ctor(bit_generator_name='MT19937'):
18
+ """
19
+ Pickling helper function that returns a bit generator object
20
+
21
+ Parameters
22
+ ----------
23
+ bit_generator_name : str
24
+ String containing the name of the BitGenerator
25
+
26
+ Returns
27
+ -------
28
+ bit_generator : BitGenerator
29
+ BitGenerator instance
30
+ """
31
+ if bit_generator_name in BitGenerators:
32
+ bit_generator = BitGenerators[bit_generator_name]
33
+ else:
34
+ raise ValueError(str(bit_generator_name) + ' is not a known '
35
+ 'BitGenerator module.')
36
+
37
+ return bit_generator()
38
+
39
+
40
+ def __generator_ctor(bit_generator_name="MT19937",
41
+ bit_generator_ctor=__bit_generator_ctor):
42
+ """
43
+ Pickling helper function that returns a Generator object
44
+
45
+ Parameters
46
+ ----------
47
+ bit_generator_name : str
48
+ String containing the core BitGenerator's name
49
+ bit_generator_ctor : callable, optional
50
+ Callable function that takes bit_generator_name as its only argument
51
+ and returns an instantized bit generator.
52
+
53
+ Returns
54
+ -------
55
+ rg : Generator
56
+ Generator using the named core BitGenerator
57
+ """
58
+ return Generator(bit_generator_ctor(bit_generator_name))
59
+
60
+
61
+ def __randomstate_ctor(bit_generator_name="MT19937",
62
+ bit_generator_ctor=__bit_generator_ctor):
63
+ """
64
+ Pickling helper function that returns a legacy RandomState-like object
65
+
66
+ Parameters
67
+ ----------
68
+ bit_generator_name : str
69
+ String containing the core BitGenerator's name
70
+ bit_generator_ctor : callable, optional
71
+ Callable function that takes bit_generator_name as its only argument
72
+ and returns an instantized bit generator.
73
+
74
+ Returns
75
+ -------
76
+ rs : RandomState
77
+ Legacy RandomState using the named core BitGenerator
78
+ """
79
+
80
+ return RandomState(bit_generator_ctor(bit_generator_name))
venv/lib/python3.10/site-packages/numpy/random/_sfc64.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (76.7 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/_sfc64.pyi ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype as dtype
4
+ from numpy import ndarray as ndarray
5
+ from numpy import uint64
6
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
7
+ from numpy._typing import _ArrayLikeInt_co
8
+
9
+ class _SFC64Internal(TypedDict):
10
+ state: ndarray[Any, dtype[uint64]]
11
+
12
+ class _SFC64State(TypedDict):
13
+ bit_generator: str
14
+ state: _SFC64Internal
15
+ has_uint32: int
16
+ uinteger: int
17
+
18
+ class SFC64(BitGenerator):
19
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
20
+ @property
21
+ def state(
22
+ self,
23
+ ) -> _SFC64State: ...
24
+ @state.setter
25
+ def state(
26
+ self,
27
+ value: _SFC64State,
28
+ ) -> None: ...
venv/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (247 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/bit_generator.pxd ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as np
2
+ from libc.stdint cimport uint32_t, uint64_t
3
+
4
+ cdef extern from "numpy/random/bitgen.h":
5
+ struct bitgen:
6
+ void *state
7
+ uint64_t (*next_uint64)(void *st) nogil
8
+ uint32_t (*next_uint32)(void *st) nogil
9
+ double (*next_double)(void *st) nogil
10
+ uint64_t (*next_raw)(void *st) nogil
11
+
12
+ ctypedef bitgen bitgen_t
13
+
14
+ cdef class BitGenerator():
15
+ cdef readonly object _seed_seq
16
+ cdef readonly object lock
17
+ cdef bitgen_t _bitgen
18
+ cdef readonly object _ctypes
19
+ cdef readonly object _cffi
20
+ cdef readonly object capsule
21
+
22
+
23
+ cdef class SeedSequence():
24
+ cdef readonly object entropy
25
+ cdef readonly tuple spawn_key
26
+ cdef readonly Py_ssize_t pool_size
27
+ cdef readonly object pool
28
+ cdef readonly uint32_t n_children_spawned
29
+
30
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
31
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array)
32
+ cdef get_assembled_entropy(self)
33
+
34
+ cdef class SeedlessSequence():
35
+ pass
venv/lib/python3.10/site-packages/numpy/random/bit_generator.pyi ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from threading import Lock
3
+ from collections.abc import Callable, Mapping, Sequence
4
+ from typing import (
5
+ Any,
6
+ NamedTuple,
7
+ TypedDict,
8
+ TypeVar,
9
+ Union,
10
+ overload,
11
+ Literal,
12
+ )
13
+
14
+ from numpy import dtype, ndarray, uint32, uint64
15
+ from numpy._typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
16
+
17
+ _T = TypeVar("_T")
18
+
19
+ _DTypeLikeUint32 = Union[
20
+ dtype[uint32],
21
+ _SupportsDType[dtype[uint32]],
22
+ type[uint32],
23
+ _UInt32Codes,
24
+ ]
25
+ _DTypeLikeUint64 = Union[
26
+ dtype[uint64],
27
+ _SupportsDType[dtype[uint64]],
28
+ type[uint64],
29
+ _UInt64Codes,
30
+ ]
31
+
32
+ class _SeedSeqState(TypedDict):
33
+ entropy: None | int | Sequence[int]
34
+ spawn_key: tuple[int, ...]
35
+ pool_size: int
36
+ n_children_spawned: int
37
+
38
+ class _Interface(NamedTuple):
39
+ state_address: Any
40
+ state: Any
41
+ next_uint64: Any
42
+ next_uint32: Any
43
+ next_double: Any
44
+ bit_generator: Any
45
+
46
+ class ISeedSequence(abc.ABC):
47
+ @abc.abstractmethod
48
+ def generate_state(
49
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
50
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
51
+
52
+ class ISpawnableSeedSequence(ISeedSequence):
53
+ @abc.abstractmethod
54
+ def spawn(self: _T, n_children: int) -> list[_T]: ...
55
+
56
+ class SeedlessSeedSequence(ISpawnableSeedSequence):
57
+ def generate_state(
58
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
59
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
60
+ def spawn(self: _T, n_children: int) -> list[_T]: ...
61
+
62
+ class SeedSequence(ISpawnableSeedSequence):
63
+ entropy: None | int | Sequence[int]
64
+ spawn_key: tuple[int, ...]
65
+ pool_size: int
66
+ n_children_spawned: int
67
+ pool: ndarray[Any, dtype[uint32]]
68
+ def __init__(
69
+ self,
70
+ entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ...,
71
+ *,
72
+ spawn_key: Sequence[int] = ...,
73
+ pool_size: int = ...,
74
+ n_children_spawned: int = ...,
75
+ ) -> None: ...
76
+ def __repr__(self) -> str: ...
77
+ @property
78
+ def state(
79
+ self,
80
+ ) -> _SeedSeqState: ...
81
+ def generate_state(
82
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
83
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
84
+ def spawn(self, n_children: int) -> list[SeedSequence]: ...
85
+
86
+ class BitGenerator(abc.ABC):
87
+ lock: Lock
88
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
89
+ def __getstate__(self) -> dict[str, Any]: ...
90
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
91
+ def __reduce__(
92
+ self,
93
+ ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ...
94
+ @abc.abstractmethod
95
+ @property
96
+ def state(self) -> Mapping[str, Any]: ...
97
+ @state.setter
98
+ def state(self, value: Mapping[str, Any]) -> None: ...
99
+ @property
100
+ def seed_seq(self) -> ISeedSequence: ...
101
+ def spawn(self, n_children: int) -> list[BitGenerator]: ...
102
+ @overload
103
+ def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
104
+ @overload
105
+ def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc]
106
+ @overload
107
+ def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc]
108
+ def _benchmark(self, cnt: int, method: str = ...) -> None: ...
109
+ @property
110
+ def ctypes(self) -> _Interface: ...
111
+ @property
112
+ def cffi(self) -> _Interface: ...
venv/lib/python3.10/site-packages/numpy/random/c_distributions.pxd ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!python
2
+ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
3
+ from numpy cimport npy_intp
4
+
5
+ from libc.stdint cimport (uint64_t, int32_t, int64_t)
6
+ from numpy.random cimport bitgen_t
7
+
8
+ cdef extern from "numpy/random/distributions.h":
9
+
10
+ struct s_binomial_t:
11
+ int has_binomial
12
+ double psave
13
+ int64_t nsave
14
+ double r
15
+ double q
16
+ double fm
17
+ int64_t m
18
+ double p1
19
+ double xm
20
+ double xl
21
+ double xr
22
+ double c
23
+ double laml
24
+ double lamr
25
+ double p2
26
+ double p3
27
+ double p4
28
+
29
+ ctypedef s_binomial_t binomial_t
30
+
31
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
32
+ double random_standard_uniform(bitgen_t *bitgen_state) nogil
33
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
34
+ void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
35
+
36
+ double random_standard_exponential(bitgen_t *bitgen_state) nogil
37
+ float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
38
+ void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
39
+ void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
40
+ void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
41
+ void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
42
+
43
+ double random_standard_normal(bitgen_t* bitgen_state) nogil
44
+ float random_standard_normal_f(bitgen_t *bitgen_state) nogil
45
+ void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
46
+ void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
47
+ double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
48
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
49
+
50
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
51
+ void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
52
+ float random_standard_normal_f(bitgen_t* bitgen_state) nogil
53
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
54
+
55
+ int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
56
+ int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
57
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
58
+ uint64_t random_uint(bitgen_t *bitgen_state) nogil
59
+
60
+ double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
61
+
62
+ double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
63
+ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
64
+
65
+ double random_exponential(bitgen_t *bitgen_state, double scale) nogil
66
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
67
+ double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
68
+ double random_chisquare(bitgen_t *bitgen_state, double df) nogil
69
+ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
70
+ double random_standard_cauchy(bitgen_t *bitgen_state) nogil
71
+ double random_pareto(bitgen_t *bitgen_state, double a) nogil
72
+ double random_weibull(bitgen_t *bitgen_state, double a) nogil
73
+ double random_power(bitgen_t *bitgen_state, double a) nogil
74
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
75
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
76
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
77
+ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
78
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
79
+ double random_standard_t(bitgen_t *bitgen_state, double df) nogil
80
+ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
81
+ double nonc) nogil
82
+ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
83
+ double dfden, double nonc) nogil
84
+ double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
85
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
86
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
87
+ double right) nogil
88
+
89
+ int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
90
+ int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
91
+ int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
92
+ int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
93
+ int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
94
+ int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
95
+ int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
96
+ int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
97
+ int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
98
+ int64_t sample) nogil
99
+
100
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
101
+
102
+ # Generate random uint64 numbers in closed interval [off, off + rng].
103
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
104
+ uint64_t off, uint64_t rng,
105
+ uint64_t mask, bint use_masked) nogil
106
+
107
+ void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
108
+ double *pix, npy_intp d, binomial_t *binomial) nogil
109
+
110
+ int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
111
+ int64_t total,
112
+ size_t num_colors, int64_t *colors,
113
+ int64_t nsample,
114
+ size_t num_variates, int64_t *variates) nogil
115
+ void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
116
+ int64_t total,
117
+ size_t num_colors, int64_t *colors,
118
+ int64_t nsample,
119
+ size_t num_variates, int64_t *variates) nogil
120
+
venv/lib/python3.10/site-packages/numpy/random/lib/libnpyrandom.a ADDED
Binary file (71.9 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (784 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/mtrand.pyi ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ from collections.abc import Callable
3
+ from typing import Any, Union, overload, Literal
4
+
5
+ from numpy import (
6
+ bool_,
7
+ dtype,
8
+ float32,
9
+ float64,
10
+ int8,
11
+ int16,
12
+ int32,
13
+ int64,
14
+ int_,
15
+ ndarray,
16
+ uint,
17
+ uint8,
18
+ uint16,
19
+ uint32,
20
+ uint64,
21
+ )
22
+ from numpy.random.bit_generator import BitGenerator
23
+ from numpy._typing import (
24
+ ArrayLike,
25
+ _ArrayLikeFloat_co,
26
+ _ArrayLikeInt_co,
27
+ _DoubleCodes,
28
+ _DTypeLikeBool,
29
+ _DTypeLikeInt,
30
+ _DTypeLikeUInt,
31
+ _Float32Codes,
32
+ _Float64Codes,
33
+ _Int8Codes,
34
+ _Int16Codes,
35
+ _Int32Codes,
36
+ _Int64Codes,
37
+ _IntCodes,
38
+ _ShapeLike,
39
+ _SingleCodes,
40
+ _SupportsDType,
41
+ _UInt8Codes,
42
+ _UInt16Codes,
43
+ _UInt32Codes,
44
+ _UInt64Codes,
45
+ _UIntCodes,
46
+ )
47
+
48
+ _DTypeLikeFloat32 = Union[
49
+ dtype[float32],
50
+ _SupportsDType[dtype[float32]],
51
+ type[float32],
52
+ _Float32Codes,
53
+ _SingleCodes,
54
+ ]
55
+
56
+ _DTypeLikeFloat64 = Union[
57
+ dtype[float64],
58
+ _SupportsDType[dtype[float64]],
59
+ type[float],
60
+ type[float64],
61
+ _Float64Codes,
62
+ _DoubleCodes,
63
+ ]
64
+
65
+ class RandomState:
66
+ _bit_generator: BitGenerator
67
+ def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ...
68
+ def __repr__(self) -> str: ...
69
+ def __str__(self) -> str: ...
70
+ def __getstate__(self) -> dict[str, Any]: ...
71
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
72
+ def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ...
73
+ def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ...
74
+ @overload
75
+ def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
76
+ @overload
77
+ def get_state(
78
+ self, legacy: Literal[True] = ...
79
+ ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ...
80
+ def set_state(
81
+ self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]
82
+ ) -> None: ...
83
+ @overload
84
+ def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
85
+ @overload
86
+ def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
87
+ @overload
88
+ def random(self, size: None = ...) -> float: ... # type: ignore[misc]
89
+ @overload
90
+ def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
91
+ @overload
92
+ def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
93
+ @overload
94
+ def beta(
95
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
96
+ ) -> ndarray[Any, dtype[float64]]: ...
97
+ @overload
98
+ def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
99
+ @overload
100
+ def exponential(
101
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
102
+ ) -> ndarray[Any, dtype[float64]]: ...
103
+ @overload
104
+ def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
105
+ @overload
106
+ def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
107
+ @overload
108
+ def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
109
+ @overload
110
+ def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ...
111
+ @overload
112
+ def randint( # type: ignore[misc]
113
+ self,
114
+ low: int,
115
+ high: None | int = ...,
116
+ ) -> int: ...
117
+ @overload
118
+ def randint( # type: ignore[misc]
119
+ self,
120
+ low: int,
121
+ high: None | int = ...,
122
+ size: None = ...,
123
+ dtype: _DTypeLikeBool = ...,
124
+ ) -> bool: ...
125
+ @overload
126
+ def randint( # type: ignore[misc]
127
+ self,
128
+ low: int,
129
+ high: None | int = ...,
130
+ size: None = ...,
131
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
132
+ ) -> int: ...
133
+ @overload
134
+ def randint( # type: ignore[misc]
135
+ self,
136
+ low: _ArrayLikeInt_co,
137
+ high: None | _ArrayLikeInt_co = ...,
138
+ size: None | _ShapeLike = ...,
139
+ ) -> ndarray[Any, dtype[int_]]: ...
140
+ @overload
141
+ def randint( # type: ignore[misc]
142
+ self,
143
+ low: _ArrayLikeInt_co,
144
+ high: None | _ArrayLikeInt_co = ...,
145
+ size: None | _ShapeLike = ...,
146
+ dtype: _DTypeLikeBool = ...,
147
+ ) -> ndarray[Any, dtype[bool_]]: ...
148
+ @overload
149
+ def randint( # type: ignore[misc]
150
+ self,
151
+ low: _ArrayLikeInt_co,
152
+ high: None | _ArrayLikeInt_co = ...,
153
+ size: None | _ShapeLike = ...,
154
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
155
+ ) -> ndarray[Any, dtype[int8]]: ...
156
+ @overload
157
+ def randint( # type: ignore[misc]
158
+ self,
159
+ low: _ArrayLikeInt_co,
160
+ high: None | _ArrayLikeInt_co = ...,
161
+ size: None | _ShapeLike = ...,
162
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
163
+ ) -> ndarray[Any, dtype[int16]]: ...
164
+ @overload
165
+ def randint( # type: ignore[misc]
166
+ self,
167
+ low: _ArrayLikeInt_co,
168
+ high: None | _ArrayLikeInt_co = ...,
169
+ size: None | _ShapeLike = ...,
170
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
171
+ ) -> ndarray[Any, dtype[int32]]: ...
172
+ @overload
173
+ def randint( # type: ignore[misc]
174
+ self,
175
+ low: _ArrayLikeInt_co,
176
+ high: None | _ArrayLikeInt_co = ...,
177
+ size: None | _ShapeLike = ...,
178
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
179
+ ) -> ndarray[Any, dtype[int64]]: ...
180
+ @overload
181
+ def randint( # type: ignore[misc]
182
+ self,
183
+ low: _ArrayLikeInt_co,
184
+ high: None | _ArrayLikeInt_co = ...,
185
+ size: None | _ShapeLike = ...,
186
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
187
+ ) -> ndarray[Any, dtype[uint8]]: ...
188
+ @overload
189
+ def randint( # type: ignore[misc]
190
+ self,
191
+ low: _ArrayLikeInt_co,
192
+ high: None | _ArrayLikeInt_co = ...,
193
+ size: None | _ShapeLike = ...,
194
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
195
+ ) -> ndarray[Any, dtype[uint16]]: ...
196
+ @overload
197
+ def randint( # type: ignore[misc]
198
+ self,
199
+ low: _ArrayLikeInt_co,
200
+ high: None | _ArrayLikeInt_co = ...,
201
+ size: None | _ShapeLike = ...,
202
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
203
+ ) -> ndarray[Any, dtype[uint32]]: ...
204
+ @overload
205
+ def randint( # type: ignore[misc]
206
+ self,
207
+ low: _ArrayLikeInt_co,
208
+ high: None | _ArrayLikeInt_co = ...,
209
+ size: None | _ShapeLike = ...,
210
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
211
+ ) -> ndarray[Any, dtype[uint64]]: ...
212
+ @overload
213
+ def randint( # type: ignore[misc]
214
+ self,
215
+ low: _ArrayLikeInt_co,
216
+ high: None | _ArrayLikeInt_co = ...,
217
+ size: None | _ShapeLike = ...,
218
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
219
+ ) -> ndarray[Any, dtype[int_]]: ...
220
+ @overload
221
+ def randint( # type: ignore[misc]
222
+ self,
223
+ low: _ArrayLikeInt_co,
224
+ high: None | _ArrayLikeInt_co = ...,
225
+ size: None | _ShapeLike = ...,
226
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
227
+ ) -> ndarray[Any, dtype[uint]]: ...
228
+ def bytes(self, length: int) -> builtins.bytes: ...
229
+ @overload
230
+ def choice(
231
+ self,
232
+ a: int,
233
+ size: None = ...,
234
+ replace: bool = ...,
235
+ p: None | _ArrayLikeFloat_co = ...,
236
+ ) -> int: ...
237
+ @overload
238
+ def choice(
239
+ self,
240
+ a: int,
241
+ size: _ShapeLike = ...,
242
+ replace: bool = ...,
243
+ p: None | _ArrayLikeFloat_co = ...,
244
+ ) -> ndarray[Any, dtype[int_]]: ...
245
+ @overload
246
+ def choice(
247
+ self,
248
+ a: ArrayLike,
249
+ size: None = ...,
250
+ replace: bool = ...,
251
+ p: None | _ArrayLikeFloat_co = ...,
252
+ ) -> Any: ...
253
+ @overload
254
+ def choice(
255
+ self,
256
+ a: ArrayLike,
257
+ size: _ShapeLike = ...,
258
+ replace: bool = ...,
259
+ p: None | _ArrayLikeFloat_co = ...,
260
+ ) -> ndarray[Any, Any]: ...
261
+ @overload
262
+ def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
263
+ @overload
264
+ def uniform(
265
+ self,
266
+ low: _ArrayLikeFloat_co = ...,
267
+ high: _ArrayLikeFloat_co = ...,
268
+ size: None | _ShapeLike = ...,
269
+ ) -> ndarray[Any, dtype[float64]]: ...
270
+ @overload
271
+ def rand(self) -> float: ...
272
+ @overload
273
+ def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
274
+ @overload
275
+ def randn(self) -> float: ...
276
+ @overload
277
+ def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
278
+ @overload
279
+ def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc]
280
+ @overload
281
+ def random_integers(
282
+ self,
283
+ low: _ArrayLikeInt_co,
284
+ high: None | _ArrayLikeInt_co = ...,
285
+ size: None | _ShapeLike = ...,
286
+ ) -> ndarray[Any, dtype[int_]]: ...
287
+ @overload
288
+ def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
289
+ @overload
290
+ def standard_normal( # type: ignore[misc]
291
+ self, size: _ShapeLike = ...
292
+ ) -> ndarray[Any, dtype[float64]]: ...
293
+ @overload
294
+ def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
295
+ @overload
296
+ def normal(
297
+ self,
298
+ loc: _ArrayLikeFloat_co = ...,
299
+ scale: _ArrayLikeFloat_co = ...,
300
+ size: None | _ShapeLike = ...,
301
+ ) -> ndarray[Any, dtype[float64]]: ...
302
+ @overload
303
+ def standard_gamma( # type: ignore[misc]
304
+ self,
305
+ shape: float,
306
+ size: None = ...,
307
+ ) -> float: ...
308
+ @overload
309
+ def standard_gamma(
310
+ self,
311
+ shape: _ArrayLikeFloat_co,
312
+ size: None | _ShapeLike = ...,
313
+ ) -> ndarray[Any, dtype[float64]]: ...
314
+ @overload
315
+ def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
316
+ @overload
317
+ def gamma(
318
+ self,
319
+ shape: _ArrayLikeFloat_co,
320
+ scale: _ArrayLikeFloat_co = ...,
321
+ size: None | _ShapeLike = ...,
322
+ ) -> ndarray[Any, dtype[float64]]: ...
323
+ @overload
324
+ def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
325
+ @overload
326
+ def f(
327
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
328
+ ) -> ndarray[Any, dtype[float64]]: ...
329
+ @overload
330
+ def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
331
+ @overload
332
+ def noncentral_f(
333
+ self,
334
+ dfnum: _ArrayLikeFloat_co,
335
+ dfden: _ArrayLikeFloat_co,
336
+ nonc: _ArrayLikeFloat_co,
337
+ size: None | _ShapeLike = ...,
338
+ ) -> ndarray[Any, dtype[float64]]: ...
339
+ @overload
340
+ def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
341
+ @overload
342
+ def chisquare(
343
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
344
+ ) -> ndarray[Any, dtype[float64]]: ...
345
+ @overload
346
+ def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
347
+ @overload
348
+ def noncentral_chisquare(
349
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
350
+ ) -> ndarray[Any, dtype[float64]]: ...
351
+ @overload
352
+ def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
353
+ @overload
354
+ def standard_t(
355
+ self, df: _ArrayLikeFloat_co, size: None = ...
356
+ ) -> ndarray[Any, dtype[float64]]: ...
357
+ @overload
358
+ def standard_t(
359
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
360
+ ) -> ndarray[Any, dtype[float64]]: ...
361
+ @overload
362
+ def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
363
+ @overload
364
+ def vonmises(
365
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
366
+ ) -> ndarray[Any, dtype[float64]]: ...
367
+ @overload
368
+ def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
369
+ @overload
370
+ def pareto(
371
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
372
+ ) -> ndarray[Any, dtype[float64]]: ...
373
+ @overload
374
+ def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
375
+ @overload
376
+ def weibull(
377
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
378
+ ) -> ndarray[Any, dtype[float64]]: ...
379
+ @overload
380
+ def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
381
+ @overload
382
+ def power(
383
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
384
+ ) -> ndarray[Any, dtype[float64]]: ...
385
+ @overload
386
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
387
+ @overload
388
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
389
+ @overload
390
+ def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
391
+ @overload
392
+ def laplace(
393
+ self,
394
+ loc: _ArrayLikeFloat_co = ...,
395
+ scale: _ArrayLikeFloat_co = ...,
396
+ size: None | _ShapeLike = ...,
397
+ ) -> ndarray[Any, dtype[float64]]: ...
398
+ @overload
399
+ def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
400
+ @overload
401
+ def gumbel(
402
+ self,
403
+ loc: _ArrayLikeFloat_co = ...,
404
+ scale: _ArrayLikeFloat_co = ...,
405
+ size: None | _ShapeLike = ...,
406
+ ) -> ndarray[Any, dtype[float64]]: ...
407
+ @overload
408
+ def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
409
+ @overload
410
+ def logistic(
411
+ self,
412
+ loc: _ArrayLikeFloat_co = ...,
413
+ scale: _ArrayLikeFloat_co = ...,
414
+ size: None | _ShapeLike = ...,
415
+ ) -> ndarray[Any, dtype[float64]]: ...
416
+ @overload
417
+ def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
418
+ @overload
419
+ def lognormal(
420
+ self,
421
+ mean: _ArrayLikeFloat_co = ...,
422
+ sigma: _ArrayLikeFloat_co = ...,
423
+ size: None | _ShapeLike = ...,
424
+ ) -> ndarray[Any, dtype[float64]]: ...
425
+ @overload
426
+ def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
427
+ @overload
428
+ def rayleigh(
429
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
430
+ ) -> ndarray[Any, dtype[float64]]: ...
431
+ @overload
432
+ def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
433
+ @overload
434
+ def wald(
435
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
436
+ ) -> ndarray[Any, dtype[float64]]: ...
437
+ @overload
438
+ def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
439
+ @overload
440
+ def triangular(
441
+ self,
442
+ left: _ArrayLikeFloat_co,
443
+ mode: _ArrayLikeFloat_co,
444
+ right: _ArrayLikeFloat_co,
445
+ size: None | _ShapeLike = ...,
446
+ ) -> ndarray[Any, dtype[float64]]: ...
447
+ @overload
448
+ def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
449
+ @overload
450
+ def binomial(
451
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
452
+ ) -> ndarray[Any, dtype[int_]]: ...
453
+ @overload
454
+ def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
455
+ @overload
456
+ def negative_binomial(
457
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
458
+ ) -> ndarray[Any, dtype[int_]]: ...
459
+ @overload
460
+ def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
461
+ @overload
462
+ def poisson(
463
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
464
+ ) -> ndarray[Any, dtype[int_]]: ...
465
+ @overload
466
+ def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
467
+ @overload
468
+ def zipf(
469
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
470
+ ) -> ndarray[Any, dtype[int_]]: ...
471
+ @overload
472
+ def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
473
+ @overload
474
+ def geometric(
475
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
476
+ ) -> ndarray[Any, dtype[int_]]: ...
477
+ @overload
478
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
479
+ @overload
480
+ def hypergeometric(
481
+ self,
482
+ ngood: _ArrayLikeInt_co,
483
+ nbad: _ArrayLikeInt_co,
484
+ nsample: _ArrayLikeInt_co,
485
+ size: None | _ShapeLike = ...,
486
+ ) -> ndarray[Any, dtype[int_]]: ...
487
+ @overload
488
+ def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
489
+ @overload
490
+ def logseries(
491
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
492
+ ) -> ndarray[Any, dtype[int_]]: ...
493
+ def multivariate_normal(
494
+ self,
495
+ mean: _ArrayLikeFloat_co,
496
+ cov: _ArrayLikeFloat_co,
497
+ size: None | _ShapeLike = ...,
498
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
499
+ tol: float = ...,
500
+ ) -> ndarray[Any, dtype[float64]]: ...
501
+ def multinomial(
502
+ self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
503
+ ) -> ndarray[Any, dtype[int_]]: ...
504
+ def dirichlet(
505
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
506
+ ) -> ndarray[Any, dtype[float64]]: ...
507
+ def shuffle(self, x: ArrayLike) -> None: ...
508
+ @overload
509
+ def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ...
510
+ @overload
511
+ def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ...
512
+
513
+ _rand: RandomState
514
+
515
+ beta = _rand.beta
516
+ binomial = _rand.binomial
517
+ bytes = _rand.bytes
518
+ chisquare = _rand.chisquare
519
+ choice = _rand.choice
520
+ dirichlet = _rand.dirichlet
521
+ exponential = _rand.exponential
522
+ f = _rand.f
523
+ gamma = _rand.gamma
524
+ get_state = _rand.get_state
525
+ geometric = _rand.geometric
526
+ gumbel = _rand.gumbel
527
+ hypergeometric = _rand.hypergeometric
528
+ laplace = _rand.laplace
529
+ logistic = _rand.logistic
530
+ lognormal = _rand.lognormal
531
+ logseries = _rand.logseries
532
+ multinomial = _rand.multinomial
533
+ multivariate_normal = _rand.multivariate_normal
534
+ negative_binomial = _rand.negative_binomial
535
+ noncentral_chisquare = _rand.noncentral_chisquare
536
+ noncentral_f = _rand.noncentral_f
537
+ normal = _rand.normal
538
+ pareto = _rand.pareto
539
+ permutation = _rand.permutation
540
+ poisson = _rand.poisson
541
+ power = _rand.power
542
+ rand = _rand.rand
543
+ randint = _rand.randint
544
+ randn = _rand.randn
545
+ random = _rand.random
546
+ random_integers = _rand.random_integers
547
+ random_sample = _rand.random_sample
548
+ rayleigh = _rand.rayleigh
549
+ seed = _rand.seed
550
+ set_state = _rand.set_state
551
+ shuffle = _rand.shuffle
552
+ standard_cauchy = _rand.standard_cauchy
553
+ standard_exponential = _rand.standard_exponential
554
+ standard_gamma = _rand.standard_gamma
555
+ standard_normal = _rand.standard_normal
556
+ standard_t = _rand.standard_t
557
+ triangular = _rand.triangular
558
+ uniform = _rand.uniform
559
+ vonmises = _rand.vonmises
560
+ wald = _rand.wald
561
+ weibull = _rand.weibull
562
+ zipf = _rand.zipf
563
+ # Two legacy that are trivial wrappers around random_sample
564
+ sample = _rand.random_sample
565
+ ranf = _rand.random_sample
566
+
567
+ def set_bit_generator(bitgen: BitGenerator) -> None:
568
+ ...
569
+
570
+ def get_bit_generator() -> BitGenerator:
571
+ ...
venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-310.pyc ADDED
Binary file (3.15 kB). View file
 
venv/lib/python3.10/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-310.pyc ADDED
Binary file (88.9 kB). View file