index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
724,694
scipy.sparse._matrix
getcol
Returns a copy of column j of the matrix, as an (m x 1) sparse matrix (column vector).
def getcol(self, j): """Returns a copy of column j of the matrix, as an (m x 1) sparse matrix (column vector). """ return self._getcol(j)
(self, j)
724,695
scipy.sparse._matrix
getformat
Matrix storage format
def getformat(self): """Matrix storage format""" return self.format
(self)
724,696
scipy.sparse._matrix
getmaxprint
Maximum number of elements to display when printed.
def getmaxprint(self): """Maximum number of elements to display when printed.""" return self._getmaxprint()
(self)
724,697
scipy.sparse._matrix
getnnz
Number of stored values, including explicit zeros. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row.
def getnnz(self, axis=None): """Number of stored values, including explicit zeros. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row. """ return self._getnnz(axis=axis)
(self, axis=None)
724,698
scipy.sparse._matrix
getrow
Returns a copy of row i of the matrix, as a (1 x n) sparse matrix (row vector).
def getrow(self, i): """Returns a copy of row i of the matrix, as a (1 x n) sparse matrix (row vector). """ return self._getrow(i)
(self, i)
724,699
scipy.sparse._data
log1p
Element-wise log1p. See `numpy.log1p` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,700
scipy.sparse._data
max
Return the maximum of the array/matrix or maximum along an axis. This takes all elements into account, not just the non-zero ones. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the maximum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amax : coo_matrix or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- min : The minimum value of a sparse array/matrix along a given axis. numpy.matrix.max : NumPy's implementation of 'max' for matrices
def max(self, axis=None, out=None): """ Return the maximum of the array/matrix or maximum along an axis. This takes all elements into account, not just the non-zero ones. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the maximum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amax : coo_matrix or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- min : The minimum value of a sparse array/matrix along a given axis. numpy.matrix.max : NumPy's implementation of 'max' for matrices """ return self._min_or_max(axis, out, np.maximum)
(self, axis=None, out=None)
724,701
scipy.sparse._compressed
maximum
Element-wise maximum between this and another array/matrix.
def maximum(self, other): return self._maximum_minimum(other, np.maximum, '_maximum_', lambda x: np.asarray(x) > 0)
(self, other)
724,702
scipy.sparse._base
mean
Compute the arithmetic mean along the specified axis. Returns the average of the array/matrix elements. The average is taken over all elements in the array/matrix by default, otherwise over the specified axis. `float64` intermediate and return values are used for integer inputs. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the mean is computed. The default is to compute the mean of all elements in the array/matrix (i.e., `axis` = `None`). dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is `float64`; for floating point inputs, it is the same as the input dtype. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- m : np.matrix See Also -------- numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
def mean(self, axis=None, dtype=None, out=None): """ Compute the arithmetic mean along the specified axis. Returns the average of the array/matrix elements. The average is taken over all elements in the array/matrix by default, otherwise over the specified axis. `float64` intermediate and return values are used for integer inputs. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the mean is computed. The default is to compute the mean of all elements in the array/matrix (i.e., `axis` = `None`). dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is `float64`; for floating point inputs, it is the same as the input dtype. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- m : np.matrix See Also -------- numpy.matrix.mean : NumPy's implementation of 'mean' for matrices """ validateaxis(axis) res_dtype = self.dtype.type integral = (np.issubdtype(self.dtype, np.integer) or np.issubdtype(self.dtype, np.bool_)) # output dtype if dtype is None: if integral: res_dtype = np.float64 else: res_dtype = np.dtype(dtype).type # intermediate dtype for summation inter_dtype = np.float64 if integral else res_dtype inter_self = self.astype(inter_dtype) if self.ndim == 1: if axis not in (None, -1, 0): raise ValueError("axis must be None, -1 or 0") res = inter_self / self.shape[-1] return res.sum(dtype=res_dtype, out=out) if axis is None: return (inter_self / (self.shape[0] * self.shape[1]))\ .sum(dtype=res_dtype, out=out) if axis < 0: axis += 2 # axis = 0 or 1 now if axis == 0: return (inter_self * (1.0 / self.shape[0])).sum( axis=0, dtype=res_dtype, out=out) else: return (inter_self * (1.0 / self.shape[1])).sum( axis=1, dtype=res_dtype, out=out)
(self, axis=None, dtype=None, out=None)
724,703
scipy.sparse._data
min
Return the minimum of the array/matrix or maximum along an axis. This takes all elements into account, not just the non-zero ones. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the minimum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amin : coo_matrix or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- max : The maximum value of a sparse array/matrix along a given axis. numpy.matrix.min : NumPy's implementation of 'min' for matrices
def min(self, axis=None, out=None): """ Return the minimum of the array/matrix or maximum along an axis. This takes all elements into account, not just the non-zero ones. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the minimum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amin : coo_matrix or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- max : The maximum value of a sparse array/matrix along a given axis. numpy.matrix.min : NumPy's implementation of 'min' for matrices """ return self._min_or_max(axis, out, np.minimum)
(self, axis=None, out=None)
724,704
scipy.sparse._compressed
minimum
Element-wise minimum between this and another array/matrix.
def minimum(self, other): return self._maximum_minimum(other, np.minimum, '_minimum_', lambda x: np.asarray(x) < 0)
(self, other)
724,705
scipy.sparse._compressed
multiply
Point-wise multiplication by another array/matrix, vector, or scalar.
def multiply(self, other): """Point-wise multiplication by another array/matrix, vector, or scalar. """ # Scalar multiplication. if isscalarlike(other): return self._mul_scalar(other) # Sparse matrix or vector. if issparse(other): if self.shape == other.shape: other = self.__class__(other) return self._binopt(other, '_elmul_') if other.ndim == 1: raise TypeError("broadcast from a 1d array not yet supported") # Single element. elif other.shape == (1, 1): return self._mul_scalar(other.toarray()[0, 0]) elif self.shape == (1, 1): return other._mul_scalar(self.toarray()[0, 0]) # A row times a column. elif self.shape[1] == 1 and other.shape[0] == 1: return self._matmul_sparse(other.tocsc()) elif self.shape[0] == 1 and other.shape[1] == 1: return other._matmul_sparse(self.tocsc()) # Row vector times matrix. other is a row. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: other = self._dia_container( (other.toarray().ravel(), [0]), shape=(other.shape[1], other.shape[1]) ) return self._matmul_sparse(other) # self is a row. elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: copy = self._dia_container( (self.toarray().ravel(), [0]), shape=(self.shape[1], self.shape[1]) ) return other._matmul_sparse(copy) # Column vector times matrix. other is a column. elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: other = self._dia_container( (other.toarray().ravel(), [0]), shape=(other.shape[0], other.shape[0]) ) return other._matmul_sparse(self) # self is a column. elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: copy = self._dia_container( (self.toarray().ravel(), [0]), shape=(self.shape[0], self.shape[0]) ) return copy._matmul_sparse(other) else: raise ValueError("inconsistent shapes") # Assume other is a dense matrix/array, which produces a single-item # object array if other isn't convertible to ndarray. other = np.atleast_2d(other) if other.ndim != 2: return np.multiply(self.toarray(), other) # Single element / wrapped object. if other.size == 1: if other.dtype == np.object_: # 'other' not convertible to ndarray. return NotImplemented return self._mul_scalar(other.flat[0]) # Fast case for trivial sparse matrix. elif self.shape == (1, 1): return np.multiply(self.toarray()[0, 0], other) ret = self.tocoo() # Matching shapes. if self.shape == other.shape: data = np.multiply(ret.data, other[ret.row, ret.col]) # Sparse row vector times... elif self.shape[0] == 1: if other.shape[1] == 1: # Dense column vector. data = np.multiply(ret.data, other) elif other.shape[1] == self.shape[1]: # Dense matrix. data = np.multiply(ret.data, other[:, ret.col]) else: raise ValueError("inconsistent shapes") row = np.repeat(np.arange(other.shape[0]), len(ret.row)) col = np.tile(ret.col, other.shape[0]) return self._coo_container( (data.view(np.ndarray).ravel(), (row, col)), shape=(other.shape[0], self.shape[1]), copy=False ) # Sparse column vector times... elif self.shape[1] == 1: if other.shape[0] == 1: # Dense row vector. data = np.multiply(ret.data[:, None], other) elif other.shape[0] == self.shape[0]: # Dense matrix. data = np.multiply(ret.data[:, None], other[ret.row]) else: raise ValueError("inconsistent shapes") row = np.repeat(ret.row, other.shape[1]) col = np.tile(np.arange(other.shape[1]), len(ret.col)) return self._coo_container( (data.view(np.ndarray).ravel(), (row, col)), shape=(self.shape[0], other.shape[1]), copy=False ) # Sparse matrix times dense row vector. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: data = np.multiply(ret.data, other[:, ret.col].ravel()) # Sparse matrix times dense column vector. elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: data = np.multiply(ret.data, other[ret.row].ravel()) else: raise ValueError("inconsistent shapes") ret.data = data.view(np.ndarray).ravel() return ret
(self, other)
724,706
scipy.sparse._data
nanmax
Return the maximum of the array/matrix or maximum along an axis, ignoring any NaNs. This takes all elements into account, not just the non-zero ones. .. versionadded:: 1.11.0 Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the maximum is computed. The default is to compute the maximum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amax : coo_matrix or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- nanmin : The minimum value of a sparse array/matrix along a given axis, ignoring NaNs. max : The maximum value of a sparse array/matrix along a given axis, propagating NaNs. numpy.nanmax : NumPy's implementation of 'nanmax'.
def nanmax(self, axis=None, out=None): """ Return the maximum of the array/matrix or maximum along an axis, ignoring any NaNs. This takes all elements into account, not just the non-zero ones. .. versionadded:: 1.11.0 Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the maximum is computed. The default is to compute the maximum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amax : coo_matrix or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- nanmin : The minimum value of a sparse array/matrix along a given axis, ignoring NaNs. max : The maximum value of a sparse array/matrix along a given axis, propagating NaNs. numpy.nanmax : NumPy's implementation of 'nanmax'. """ return self._min_or_max(axis, out, np.fmax)
(self, axis=None, out=None)
724,707
scipy.sparse._data
nanmin
Return the minimum of the array/matrix or minimum along an axis, ignoring any NaNs. This takes all elements into account, not just the non-zero ones. .. versionadded:: 1.11.0 Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the minimum is computed. The default is to compute the minimum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amin : coo_matrix or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- nanmax : The maximum value of a sparse array/matrix along a given axis, ignoring NaNs. min : The minimum value of a sparse array/matrix along a given axis, propagating NaNs. numpy.nanmin : NumPy's implementation of 'nanmin'.
def nanmin(self, axis=None, out=None): """ Return the minimum of the array/matrix or minimum along an axis, ignoring any NaNs. This takes all elements into account, not just the non-zero ones. .. versionadded:: 1.11.0 Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the minimum is computed. The default is to compute the minimum over all elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amin : coo_matrix or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- nanmax : The maximum value of a sparse array/matrix along a given axis, ignoring NaNs. min : The minimum value of a sparse array/matrix along a given axis, propagating NaNs. numpy.nanmin : NumPy's implementation of 'nanmin'. """ return self._min_or_max(axis, out, np.fmin)
(self, axis=None, out=None)
724,708
scipy.sparse._csc
nonzero
Nonzero indices of the array/matrix. Returns a tuple of arrays (row,col) containing the indices of the non-zero elements of the array. Examples -------- >>> from scipy.sparse import csr_array >>> A = csr_array([[1,2,0],[0,0,3],[4,0,5]]) >>> A.nonzero() (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
def nonzero(self): # CSC can't use _cs_matrix's .nonzero method because it # returns the indices sorted for self transposed. # Get row and col indices, from _cs_matrix.tocoo major_dim, minor_dim = self._swap(self.shape) minor_indices = self.indices major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) expandptr(major_dim, self.indptr, major_indices) row, col = self._swap((major_indices, minor_indices)) # Remove explicit zeros nz_mask = self.data != 0 row = row[nz_mask] col = col[nz_mask] # Sort them to be in C-style order ind = np.argsort(row, kind='mergesort') row = row[ind] col = col[ind] return row, col
(self)
724,709
scipy.sparse._data
power
This function performs element-wise power. Parameters ---------- n : scalar n is a non-zero scalar (nonzero avoids dense ones creation) If zero power is desired, special case it to use `np.ones` dtype : If dtype is not specified, the current dtype will be preserved. Raises ------ NotImplementedError : if n is a zero scalar If zero power is desired, special case it to use `np.ones(A.shape, dtype=A.dtype)`
def power(self, n, dtype=None): """ This function performs element-wise power. Parameters ---------- n : scalar n is a non-zero scalar (nonzero avoids dense ones creation) If zero power is desired, special case it to use `np.ones` dtype : If dtype is not specified, the current dtype will be preserved. Raises ------ NotImplementedError : if n is a zero scalar If zero power is desired, special case it to use `np.ones(A.shape, dtype=A.dtype)` """ if not isscalarlike(n): raise NotImplementedError("input is not scalar") if not n: raise NotImplementedError( "zero power is not supported as it would densify the matrix.\n" "Use `np.ones(A.shape, dtype=A.dtype)` for this case." ) data = self._deduped_data() if dtype is not None: data = data.astype(dtype) return self._with_data(data ** n)
(self, n, dtype=None)
724,710
scipy.sparse._compressed
prune
Remove empty space after all non-zero elements.
def prune(self): """Remove empty space after all non-zero elements. """ major_dim = self._swap(self.shape)[0] if len(self.indptr) != major_dim + 1: raise ValueError('index pointer has invalid length') if len(self.indices) < self.nnz: raise ValueError('indices array has fewer than nnz elements') if len(self.data) < self.nnz: raise ValueError('data array has fewer than nnz elements') self.indices = _prune_array(self.indices[:self.nnz]) self.data = _prune_array(self.data[:self.nnz])
(self)
724,711
scipy.sparse._data
rad2deg
Element-wise rad2deg. See `numpy.rad2deg` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,712
scipy.sparse._base
reshape
reshape(self, shape, order='C', copy=False) Gives a new shape to a sparse array/matrix without changing its data. Parameters ---------- shape : length-2 tuple of ints The new shape should be compatible with the original shape. order : {'C', 'F'}, optional Read the elements using this index order. 'C' means to read and write the elements using C-like index order; e.g., read entire first row, then second row, etc. 'F' means to read and write the elements using Fortran-like index order; e.g., read entire first column, then second column, etc. copy : bool, optional Indicates whether or not attributes of self should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array being used. Returns ------- reshaped : sparse array/matrix A sparse array/matrix with the given `shape`, not necessarily of the same format as the current object. See Also -------- numpy.reshape : NumPy's implementation of 'reshape' for ndarrays
def reshape(self, *args, **kwargs): """reshape(self, shape, order='C', copy=False) Gives a new shape to a sparse array/matrix without changing its data. Parameters ---------- shape : length-2 tuple of ints The new shape should be compatible with the original shape. order : {'C', 'F'}, optional Read the elements using this index order. 'C' means to read and write the elements using C-like index order; e.g., read entire first row, then second row, etc. 'F' means to read and write the elements using Fortran-like index order; e.g., read entire first column, then second column, etc. copy : bool, optional Indicates whether or not attributes of self should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array being used. Returns ------- reshaped : sparse array/matrix A sparse array/matrix with the given `shape`, not necessarily of the same format as the current object. See Also -------- numpy.reshape : NumPy's implementation of 'reshape' for ndarrays """ # If the shape already matches, don't bother doing an actual reshape # Otherwise, the default is to convert to COO and use its reshape is_array = isinstance(self, sparray) shape = check_shape(args, self.shape, allow_1d=is_array) order, copy = check_reshape_kwargs(kwargs) if shape == self.shape: if copy: return self.copy() else: return self return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
(self, *args, **kwargs)
724,713
scipy.sparse._compressed
resize
Resize the array/matrix in-place to dimensions given by ``shape`` Any elements that lie within the new shape will remain at the same indices, while non-zero elements lying outside the new shape are removed. Parameters ---------- shape : (int, int) number of rows and columns in the new array/matrix Notes ----- The semantics are not identical to `numpy.ndarray.resize` or `numpy.resize`. Here, the same data will be maintained at each index before and after reshape, if that index is within the new bounds. In numpy, resizing maintains contiguity of the array, moving elements around in the logical array but not within a flattened representation. We give no guarantees about whether the underlying data attributes (arrays, etc.) will be modified in place or replaced with new objects.
def resize(self, *shape): shape = check_shape(shape) if hasattr(self, 'blocksize'): bm, bn = self.blocksize new_M, rm = divmod(shape[0], bm) new_N, rn = divmod(shape[1], bn) if rm or rn: raise ValueError("shape must be divisible into {} blocks. " "Got {}".format(self.blocksize, shape)) M, N = self.shape[0] // bm, self.shape[1] // bn else: new_M, new_N = self._swap(shape) M, N = self._swap(self.shape) if new_M < M: self.indices = self.indices[:self.indptr[new_M]] self.data = self.data[:self.indptr[new_M]] self.indptr = self.indptr[:new_M + 1] elif new_M > M: self.indptr = np.resize(self.indptr, new_M + 1) self.indptr[M + 1:].fill(self.indptr[M]) if new_N < N: mask = self.indices < new_N if not np.all(mask): self.indices = self.indices[mask] self.data = self.data[mask] major_index, val = self._minor_reduce(np.add, mask) self.indptr.fill(0) self.indptr[1:][major_index] = val np.cumsum(self.indptr, out=self.indptr) self._shape = shape
(self, *shape)
724,714
scipy.sparse._data
rint
Element-wise rint. See `numpy.rint` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,715
scipy.sparse._matrix
set_shape
Set the shape of the matrix in-place
def set_shape(self, shape): """Set the shape of the matrix in-place""" # Make sure copy is False since this is in place # Make sure format is unchanged because we are doing a __dict__ swap new_self = self.reshape(shape, copy=False).asformat(self.format) self.__dict__ = new_self.__dict__
(self, shape)
724,716
scipy.sparse._base
setdiag
Set diagonal or off-diagonal elements of the array/matrix. Parameters ---------- values : array_like New values of the diagonal elements. Values may have any length. If the diagonal is longer than values, then the remaining diagonal entries will not be set. If values are longer than the diagonal, then the remaining values are ignored. If a scalar value is given, all of the diagonal is set to it. k : int, optional Which off-diagonal to set, corresponding to elements a[i,i+k]. Default: 0 (the main diagonal).
def setdiag(self, values, k=0): """ Set diagonal or off-diagonal elements of the array/matrix. Parameters ---------- values : array_like New values of the diagonal elements. Values may have any length. If the diagonal is longer than values, then the remaining diagonal entries will not be set. If values are longer than the diagonal, then the remaining values are ignored. If a scalar value is given, all of the diagonal is set to it. k : int, optional Which off-diagonal to set, corresponding to elements a[i,i+k]. Default: 0 (the main diagonal). """ M, N = self.shape if (k > 0 and k >= N) or (k < 0 and -k >= M): raise ValueError("k exceeds array dimensions") self._setdiag(np.asarray(values), k)
(self, values, k=0)
724,717
scipy.sparse._data
sign
Element-wise sign. See `numpy.sign` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,718
scipy.sparse._data
sin
Element-wise sin. See `numpy.sin` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,719
scipy.sparse._data
sinh
Element-wise sinh. See `numpy.sinh` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,720
scipy.sparse._compressed
sort_indices
Sort the indices of this array/matrix *in place*
def sort_indices(self): """Sort the indices of this array/matrix *in place* """ if not self.has_sorted_indices: _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, self.indices, self.data) self.has_sorted_indices = True
(self)
724,721
scipy.sparse._compressed
sorted_indices
Return a copy of this array/matrix with sorted indices
def sorted_indices(self): """Return a copy of this array/matrix with sorted indices """ A = self.copy() A.sort_indices() return A # an alternative that has linear complexity is the following # although the previous option is typically faster # return self.toother().toother()
(self)
724,722
scipy.sparse._data
sqrt
Element-wise sqrt. See `numpy.sqrt` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,723
scipy.sparse._compressed
sum
Sum the array/matrix elements over a given axis. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the sum of all the array/matrix elements, returning a scalar (i.e., `axis` = `None`). dtype : dtype, optional The type of the returned array/matrix and of the accumulator in which the elements are summed. The dtype of `a` is used by default unless `a` has an integer dtype of less precision than the default platform integer. In that case, if `a` is signed then the platform integer is used while if `a` is unsigned then an unsigned integer of the same precision as the platform integer is used. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- sum_along_axis : np.matrix A matrix with the same shape as `self`, with the specified axis removed. See Also -------- numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
def sum(self, axis=None, dtype=None, out=None): """Sum the array/matrix over the given axis. If the axis is None, sum over both rows and columns, returning a scalar. """ # The _spbase base class already does axis=0 and axis=1 efficiently # so we only do the case axis=None here if (not hasattr(self, 'blocksize') and axis in self._swap(((1, -1), (0, 2)))[0]): # faster than multiplication for large minor axis in CSC/CSR res_dtype = get_sum_dtype(self.dtype) ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype) major_index, value = self._minor_reduce(np.add) ret[major_index] = value ret = self._ascontainer(ret) if axis % 2 == 1: ret = ret.T if out is not None and out.shape != ret.shape: raise ValueError('dimensions do not match') return ret.sum(axis=(), dtype=dtype, out=out) # _spbase will handle the remaining situations when axis # is in {None, -1, 0, 1} else: return _spbase.sum(self, axis=axis, dtype=dtype, out=out)
(self, axis=None, dtype=None, out=None)
724,724
scipy.sparse._compressed
sum_duplicates
Eliminate duplicate entries by adding them together This is an *in place* operation.
def sum_duplicates(self): """Eliminate duplicate entries by adding them together This is an *in place* operation. """ if self.has_canonical_format: return self.sort_indices() M, N = self._swap(self.shape) _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices, self.data) self.prune() # nnz may have changed self.has_canonical_format = True
(self)
724,725
scipy.sparse._data
tan
Element-wise tan. See `numpy.tan` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,726
scipy.sparse._data
tanh
Element-wise tanh. See `numpy.tanh` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,727
scipy.sparse._compressed
toarray
Return a dense ndarray representation of this sparse array/matrix. Parameters ---------- order : {'C', 'F'}, optional Whether to store multidimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the `out` argument. out : ndarray, 2-D, optional If specified, uses this array as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array/matrix on which you are calling the method. For most sparse types, `out` is required to be memory contiguous (either C or Fortran ordered). Returns ------- arr : ndarray, 2-D An array with the same shape and containing the same data represented by the sparse array/matrix, with the requested memory order. If `out` was passed, the same object is returned after being modified in-place to contain the appropriate values.
def toarray(self, order=None, out=None): if out is None and order is None: order = self._swap('cf')[0] out = self._process_toarray_args(order, out) if not (out.flags.c_contiguous or out.flags.f_contiguous): raise ValueError('Output array must be C or F contiguous') # align ideal order with output array order if out.flags.c_contiguous: x = self.tocsr() y = out else: x = self.tocsc() y = out.T M, N = x._swap(x.shape) csr_todense(M, N, x.indptr, x.indices, x.data, y) return out
(self, order=None, out=None)
724,728
scipy.sparse._base
tobsr
Convert this array/matrix to Block Sparse Row format. With copy=False, the data/indices may be shared between this array/matrix and the resultant bsr_array/matrix. When blocksize=(R, C) is provided, it will be used for construction of the bsr_array/matrix.
def tobsr(self, blocksize=None, copy=False): """Convert this array/matrix to Block Sparse Row format. With copy=False, the data/indices may be shared between this array/matrix and the resultant bsr_array/matrix. When blocksize=(R, C) is provided, it will be used for construction of the bsr_array/matrix. """ return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
(self, blocksize=None, copy=False)
724,729
scipy.sparse._compressed
tocoo
Convert this array/matrix to COOrdinate format. With copy=False, the data/indices may be shared between this array/matrix and the resultant coo_array/matrix.
def tocoo(self, copy=True): major_dim, minor_dim = self._swap(self.shape) minor_indices = self.indices major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) _sparsetools.expandptr(major_dim, self.indptr, major_indices) coords = self._swap((major_indices, minor_indices)) return self._coo_container( (self.data, coords), self.shape, copy=copy, dtype=self.dtype )
(self, copy=True)
724,730
scipy.sparse._csc
tocsc
Convert this array/matrix to Compressed Sparse Column format. With copy=False, the data/indices may be shared between this array/matrix and the resultant csc_array/matrix.
def tocsc(self, copy=False): if copy: return self.copy() else: return self
(self, copy=False)
724,731
scipy.sparse._csc
tocsr
Convert this array/matrix to Compressed Sparse Row format. With copy=False, the data/indices may be shared between this array/matrix and the resultant csr_array/matrix.
def tocsr(self, copy=False): M,N = self.shape idx_dtype = self._get_index_dtype((self.indptr, self.indices), maxval=max(self.nnz, N)) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) csc_tocsr(M, N, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, indptr, indices, data) A = self._csr_container( (data, indices, indptr), shape=self.shape, copy=False ) A.has_sorted_indices = True return A
(self, copy=False)
724,732
scipy.sparse._base
todense
Return a dense representation of this sparse array/matrix. Parameters ---------- order : {'C', 'F'}, optional Whether to store multi-dimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the `out` argument. out : ndarray, 2-D, optional If specified, uses this array (or `numpy.matrix`) as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array/matrix on which you are calling the method. Returns ------- arr : numpy.matrix, 2-D A NumPy matrix object with the same shape and containing the same data represented by the sparse array/matrix, with the requested memory order. If `out` was passed and was an array (rather than a `numpy.matrix`), it will be filled with the appropriate values and returned wrapped in a `numpy.matrix` object that shares the same memory.
def todense(self, order=None, out=None): """ Return a dense representation of this sparse array/matrix. Parameters ---------- order : {'C', 'F'}, optional Whether to store multi-dimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the `out` argument. out : ndarray, 2-D, optional If specified, uses this array (or `numpy.matrix`) as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array/matrix on which you are calling the method. Returns ------- arr : numpy.matrix, 2-D A NumPy matrix object with the same shape and containing the same data represented by the sparse array/matrix, with the requested memory order. If `out` was passed and was an array (rather than a `numpy.matrix`), it will be filled with the appropriate values and returned wrapped in a `numpy.matrix` object that shares the same memory. """ return self._ascontainer(self.toarray(order=order, out=out))
(self, order=None, out=None)
724,733
scipy.sparse._base
todia
Convert this array/matrix to sparse DIAgonal format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dia_array/matrix.
def todia(self, copy=False): """Convert this array/matrix to sparse DIAgonal format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dia_array/matrix. """ return self.tocoo(copy=copy).todia(copy=False)
(self, copy=False)
724,734
scipy.sparse._base
todok
Convert this array/matrix to Dictionary Of Keys format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dok_array/matrix.
def todok(self, copy=False): """Convert this array/matrix to Dictionary Of Keys format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dok_array/matrix. """ return self.tocoo(copy=copy).todok(copy=False)
(self, copy=False)
724,735
scipy.sparse._base
tolil
Convert this array/matrix to List of Lists format. With copy=False, the data/indices may be shared between this array/matrix and the resultant lil_array/matrix.
def tolil(self, copy=False): """Convert this array/matrix to List of Lists format. With copy=False, the data/indices may be shared between this array/matrix and the resultant lil_array/matrix. """ return self.tocsr(copy=False).tolil(copy=copy)
(self, copy=False)
724,736
scipy.sparse._base
trace
Returns the sum along diagonals of the sparse array/matrix. Parameters ---------- offset : int, optional Which diagonal to get, corresponding to elements a[i, i+offset]. Default: 0 (the main diagonal).
def trace(self, offset=0): """Returns the sum along diagonals of the sparse array/matrix. Parameters ---------- offset : int, optional Which diagonal to get, corresponding to elements a[i, i+offset]. Default: 0 (the main diagonal). """ return self.diagonal(k=offset).sum()
(self, offset=0)
724,737
scipy.sparse._csc
transpose
Reverses the dimensions of the sparse array/matrix. Parameters ---------- axes : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value. copy : bool, optional Indicates whether or not attributes of `self` should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array/matrix being used. Returns ------- p : `self` with the dimensions reversed. Notes ----- If `self` is a `csr_array` or a `csc_array`, then this will return a `csc_array` or a `csr_array`, respectively. See Also -------- numpy.transpose : NumPy's implementation of 'transpose' for ndarrays
def transpose(self, axes=None, copy=False): if axes is not None and axes != (1, 0): raise ValueError("Sparse arrays/matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") M, N = self.shape return self._csr_container((self.data, self.indices, self.indptr), (N, M), copy=copy)
(self, axes=None, copy=False)
724,738
scipy.sparse._data
trunc
Element-wise trunc. See `numpy.trunc` for more information.
def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = (f"Element-wise {name}.\n\n" f"See `numpy.{name}` for more information.") method.__name__ = name return method
(self)
724,739
markov_clustering.modularity
delta_matrix
Compute delta matrix where delta[i,j]=1 if i and j belong to same cluster and i!=j :param matrix: The adjacency matrix :param clusters: The clusters returned by get_clusters :returns: delta matrix
def delta_matrix(matrix, clusters): """ Compute delta matrix where delta[i,j]=1 if i and j belong to same cluster and i!=j :param matrix: The adjacency matrix :param clusters: The clusters returned by get_clusters :returns: delta matrix """ if isspmatrix(matrix): delta = dok_matrix(matrix.shape) else: delta = np.zeros(matrix.shape) for i in clusters : for j in permutations(i, 2): delta[j] = 1 return delta
(matrix, clusters)
724,740
scipy.sparse._dok
dok_matrix
Dictionary Of Keys based sparse matrix. This is an efficient structure for constructing sparse matrices incrementally. This can be instantiated in several ways: dok_matrix(D) where D is a 2-D ndarray dok_matrix(S) with another sparse array or matrix S (equivalent to S.todok()) dok_matrix((M,N), [dtype]) create the matrix with initial shape (M,N) dtype is optional, defaulting to dtype='d' Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements size T Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. - Allows for efficient O(1) access of individual elements. - Duplicates are not allowed. - Can be efficiently converted to a coo_matrix once constructed. Examples -------- >>> import numpy as np >>> from scipy.sparse import dok_matrix >>> S = dok_matrix((5, 5), dtype=np.float32) >>> for i in range(5): ... for j in range(5): ... S[i, j] = i + j # Update element
class dok_matrix(spmatrix, _dok_base): """ Dictionary Of Keys based sparse matrix. This is an efficient structure for constructing sparse matrices incrementally. This can be instantiated in several ways: dok_matrix(D) where D is a 2-D ndarray dok_matrix(S) with another sparse array or matrix S (equivalent to S.todok()) dok_matrix((M,N), [dtype]) create the matrix with initial shape (M,N) dtype is optional, defaulting to dtype='d' Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements size T Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. - Allows for efficient O(1) access of individual elements. - Duplicates are not allowed. - Can be efficiently converted to a coo_matrix once constructed. Examples -------- >>> import numpy as np >>> from scipy.sparse import dok_matrix >>> S = dok_matrix((5, 5), dtype=np.float32) >>> for i in range(5): ... for j in range(5): ... S[i, j] = i + j # Update element """ def set_shape(self, shape): new_matrix = self.reshape(shape, copy=False).asformat(self.format) self.__dict__ = new_matrix.__dict__ def get_shape(self): """Get shape of a sparse matrix.""" return self._shape shape = property(fget=get_shape, fset=set_shape) def __reversed__(self): return self._dict.__reversed__() def __or__(self, other): if isinstance(other, _dok_base): return self._dict | other._dict return self._dict | other def __ror__(self, other): if isinstance(other, _dok_base): return self._dict | other._dict return self._dict | other def __ior__(self, other): if isinstance(other, _dok_base): self._dict |= other._dict else: self._dict |= other return self
(arg1, shape=None, dtype=None, copy=False)
724,741
scipy.sparse._base
__abs__
null
def __abs__(self): return abs(self.tocsr())
(self)
724,742
scipy.sparse._dok
__add__
null
def __add__(self, other): if isscalarlike(other): res_dtype = upcast_scalar(self.dtype, other) new = self._dok_container(self.shape, dtype=res_dtype) # Add this scalar to each element. for key in itertools.product(*[range(d) for d in self.shape]): aij = self._dict.get(key, 0) + other if aij: new[key] = aij elif issparse(other): if other.shape != self.shape: raise ValueError("Matrix dimensions are not equal.") res_dtype = upcast(self.dtype, other.dtype) new = self._dok_container(self.shape, dtype=res_dtype) new._dict = self._dict.copy() if other.format == "dok": o_items = other.items() else: other = other.tocoo() if self.ndim == 1: o_items = zip(other.coords[0], other.data) else: o_items = zip(zip(*other.coords), other.data) with np.errstate(over='ignore'): new._dict.update((k, new[k] + v) for k, v in o_items) elif isdense(other): new = self.todense() + other else: return NotImplemented return new
(self, other)
724,744
scipy.sparse._dok
__contains__
null
def __contains__(self, key): return key in self._dict
(self, key)
724,745
scipy.sparse._dok
__delitem__
null
def __delitem__(self, key, /): del self._dict[key]
(self, key, /)
724,747
scipy.sparse._base
__eq__
null
def __eq__(self, other): return self.tocsr().__eq__(other)
(self, other)
724,748
scipy.sparse._base
__ge__
null
def __ge__(self, other): return self.tocsr().__ge__(other)
(self, other)
724,749
scipy.sparse._dok
__getitem__
null
def __getitem__(self, key): if self.ndim == 2: return super().__getitem__(key) if isinstance(key, tuple) and len(key) == 1: key = key[0] INT_TYPES = (int, np.integer) if isinstance(key, INT_TYPES): if key < 0: key += self.shape[-1] if key < 0 or key >= self.shape[-1]: raise IndexError('index value out of bounds') return self._get_int(key) else: raise IndexError('array/slice index for 1d dok_array not yet supported')
(self, key)
724,750
scipy.sparse._base
__gt__
null
def __gt__(self, other): return self.tocsr().__gt__(other)
(self, other)
724,753
scipy.sparse._dok
__imul__
null
def __imul__(self, other): if isscalarlike(other): self._dict.update((k, v * other) for k, v in self.items()) return self return NotImplemented
(self, other)
724,754
scipy.sparse._dok
__init__
null
def __init__(self, arg1, shape=None, dtype=None, copy=False): _spbase.__init__(self) is_array = isinstance(self, sparray) if isinstance(arg1, tuple) and isshape(arg1, allow_1d=is_array): self._shape = check_shape(arg1, allow_1d=is_array) self._dict = {} self.dtype = getdtype(dtype, default=float) elif issparse(arg1): # Sparse ctor if arg1.format == self.format: arg1 = arg1.copy() if copy else arg1 else: arg1 = arg1.todok() if dtype is not None: arg1 = arg1.astype(dtype, copy=False) self._dict = arg1._dict self._shape = check_shape(arg1.shape, allow_1d=is_array) self.dtype = arg1.dtype else: # Dense ctor try: arg1 = np.asarray(arg1) except Exception as e: raise TypeError('Invalid input format.') from e if arg1.ndim > 2: raise TypeError('Expected rank <=2 dense array or matrix.') if arg1.ndim == 1: if dtype is not None: arg1 = arg1.astype(dtype) self._dict = {i: v for i, v in enumerate(arg1) if v != 0} self.dtype = arg1.dtype else: d = self._coo_container(arg1, dtype=dtype).todok() self._dict = d._dict self.dtype = d.dtype self._shape = check_shape(arg1.shape, allow_1d=is_array)
(self, arg1, shape=None, dtype=None, copy=False)
724,755
scipy.sparse._dok
__ior__
null
def __ior__(self, other): if isinstance(other, _dok_base): self._dict |= other._dict else: self._dict |= other return self
(self, other)
724,757
scipy.sparse._base
__iter__
null
def __iter__(self): for r in range(self.shape[0]): yield self[r]
(self)
724,758
scipy.sparse._dok
__itruediv__
null
def __itruediv__(self, other): if isscalarlike(other): self._dict.update((k, v / other) for k, v in self.items()) return self return NotImplemented
(self, other)
724,759
scipy.sparse._base
__le__
null
def __le__(self, other): return self.tocsr().__le__(other)
(self, other)
724,761
scipy.sparse._base
__lt__
null
def __lt__(self, other): return self.tocsr().__lt__(other)
(self, other)
724,764
scipy.sparse._base
__ne__
null
def __ne__(self, other): return self.tocsr().__ne__(other)
(self, other)
724,765
scipy.sparse._dok
__neg__
null
def __neg__(self): if self.dtype.kind == 'b': raise NotImplementedError( 'Negating a sparse boolean matrix is not supported.' ) new = self._dok_container(self.shape, dtype=self.dtype) new._dict.update((k, -v) for k, v in self.items()) return new
(self)
724,767
scipy.sparse._dok
__or__
null
def __or__(self, other): if isinstance(other, _dok_base): return self._dict | other._dict return self._dict | other
(self, other)
724,769
scipy.sparse._dok
__radd__
null
def __radd__(self, other): return self + other # addition is comutative
(self, other)
724,771
scipy.sparse._dok
__reduce__
null
def __reduce__(self): # this approach is necessary because __setstate__ is called after # __setitem__ upon unpickling and since __init__ is not called there # is no shape attribute hence it is not possible to unpickle it. return dict.__reduce__(self)
(self)
724,773
scipy.sparse._dok
__reversed__
null
def __reversed__(self): return self._dict.__reversed__()
(self)
724,776
scipy.sparse._dok
__ror__
null
def __ror__(self, other): if isinstance(other, _dok_base): return self._dict | other._dict return self._dict | other
(self, other)
724,777
scipy.sparse._base
__round__
null
def __round__(self, ndigits=0): return round(self.tocsr(), ndigits=ndigits)
(self, ndigits=0)
724,780
scipy.sparse._dok
__setitem__
null
def __setitem__(self, key, value): if self.ndim == 2: return super().__setitem__(key, value) if isinstance(key, tuple) and len(key) == 1: key = key[0] INT_TYPES = (int, np.integer) if isinstance(key, INT_TYPES): if key < 0: key += self.shape[-1] if key < 0 or key >= self.shape[-1]: raise IndexError('index value out of bounds') return self._set_int(key, value) else: raise IndexError('array index for 1d dok_array not yet provided')
(self, key, value)
724,783
scipy.sparse._dok
__truediv__
null
def __truediv__(self, other): if isscalarlike(other): res_dtype = upcast_scalar(self.dtype, other) new = self._dok_container(self.shape, dtype=res_dtype) new._dict.update(((k, v / other) for k, v in self.items())) return new return self.tocsr() / other
(self, other)
724,784
scipy.sparse._base
_add_dense
null
def _add_dense(self, other): return self.tocoo()._add_dense(other)
(self, other)
724,785
scipy.sparse._base
_add_sparse
null
def _add_sparse(self, other): return self.tocsr()._add_sparse(other)
(self, other)
724,789
scipy.sparse._dok
_get_arrayXarray
null
def _get_arrayXarray(self, row, col): # inner indexing i, j = map(np.atleast_2d, np.broadcast_arrays(row, col)) newdok = self._dok_container(i.shape, dtype=self.dtype) for key in itertools.product(range(i.shape[0]), range(i.shape[1])): v = self._dict.get((i[key], j[key]), 0) if v: newdok._dict[key] = v return newdok
(self, row, col)
724,790
scipy.sparse._dok
_get_arrayXint
null
def _get_arrayXint(self, row, col): row = row.squeeze() return self._get_columnXarray(row, [col])
(self, row, col)
724,791
scipy.sparse._dok
_get_arrayXslice
null
def _get_arrayXslice(self, row, col): col = list(range(*col.indices(self.shape[1]))) return self._get_columnXarray(row, col)
(self, row, col)
724,792
scipy.sparse._dok
_get_columnXarray
null
def _get_columnXarray(self, row, col): # outer indexing newdok = self._dok_container((len(row), len(col)), dtype=self.dtype) for i, r in enumerate(row): for j, c in enumerate(col): v = self._dict.get((r, c), 0) if v: newdok._dict[i, j] = v return newdok
(self, row, col)
724,794
scipy.sparse._dok
_get_int
null
def _get_int(self, idx): return self._dict.get(idx, self.dtype.type(0))
(self, idx)
724,795
scipy.sparse._dok
_get_intXarray
null
def _get_intXarray(self, row, col): col = col.squeeze() return self._get_columnXarray([row], col)
(self, row, col)
724,796
scipy.sparse._dok
_get_intXint
null
def _get_intXint(self, row, col): return self._dict.get((row, col), self.dtype.type(0))
(self, row, col)
724,797
scipy.sparse._dok
_get_intXslice
null
def _get_intXslice(self, row, col): return self._get_sliceXslice(slice(row, row + 1), col)
(self, row, col)
724,798
scipy.sparse._dok
_get_sliceXarray
null
def _get_sliceXarray(self, row, col): row = list(range(*row.indices(self.shape[0]))) return self._get_columnXarray(row, col)
(self, row, col)
724,799
scipy.sparse._dok
_get_sliceXint
null
def _get_sliceXint(self, row, col): return self._get_sliceXslice(row, slice(col, col + 1))
(self, row, col)
724,800
scipy.sparse._dok
_get_sliceXslice
null
def _get_sliceXslice(self, row, col): row_start, row_stop, row_step = row.indices(self.shape[0]) col_start, col_stop, col_step = col.indices(self.shape[1]) row_range = range(row_start, row_stop, row_step) col_range = range(col_start, col_stop, col_step) shape = (len(row_range), len(col_range)) # Switch paths only when advantageous # (count the iterations in the loops, adjust for complexity) if len(self) >= 2 * shape[0] * shape[1]: # O(nr*nc) path: loop over <row x col> return self._get_columnXarray(row_range, col_range) # O(nnz) path: loop over entries of self newdok = self._dok_container(shape, dtype=self.dtype) for key in self.keys(): i, ri = divmod(int(key[0]) - row_start, row_step) if ri != 0 or i < 0 or i >= shape[0]: continue j, rj = divmod(int(key[1]) - col_start, col_step) if rj != 0 or j < 0 or j >= shape[1]: continue newdok._dict[i, j] = self._dict[key] return newdok
(self, row, col)
724,801
scipy.sparse._base
_getcol
Returns a copy of column j of the array, as an (m x 1) sparse array (column vector).
def _getcol(self, j): """Returns a copy of column j of the array, as an (m x 1) sparse array (column vector). """ if self.ndim == 1: raise ValueError("getcol not provided for 1d arrays. Use indexing A[j]") # Subclasses should override this method for efficiency. # Post-multiply by a (n x 1) column vector 'a' containing all zeros # except for a_j = 1 N = self.shape[-1] if j < 0: j += N if j < 0 or j >= N: raise IndexError("index out of bounds") col_selector = self._csc_container(([1], [[j], [0]]), shape=(N, 1), dtype=self.dtype) result = self @ col_selector return result
(self, j)
724,803
scipy.sparse._dok
_getnnz
Number of stored values, including explicit zeros. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row. See also -------- count_nonzero : Number of non-zero entries
def _getnnz(self, axis=None): if axis is not None: raise NotImplementedError( "_getnnz over an axis is not implemented for DOK format." ) return len(self._dict)
(self, axis=None)
724,804
scipy.sparse._base
_getrow
Returns a copy of row i of the array, as a (1 x n) sparse array (row vector).
def _getrow(self, i): """Returns a copy of row i of the array, as a (1 x n) sparse array (row vector). """ if self.ndim == 1: raise ValueError("getrow not meaningful for a 1d array") # Subclasses should override this method for efficiency. # Pre-multiply by a (1 x m) row vector 'a' containing all zeros # except for a_i = 1 M = self.shape[0] if i < 0: i += M if i < 0 or i >= M: raise IndexError("index out of bounds") row_selector = self._csr_container(([1], [[0], [i]]), shape=(1, M), dtype=self.dtype) return row_selector @ self
(self, i)
724,805
scipy.sparse._base
_imag
null
def _imag(self): return self.tocsr()._imag()
(self)
724,807
scipy.sparse._dok
_matmul_multivector
null
def _matmul_multivector(self, other): result_dtype = upcast(self.dtype, other.dtype) # vector @ multivector if self.ndim == 1: # works for other 1d or 2d return sum(v * other[j] for j, v in self._dict.items()) # matrix @ multivector M = self.shape[0] new_shape = (M,) if other.ndim == 1 else (M, other.shape[1]) result = np.zeros(new_shape, dtype=result_dtype) for (i, j), v in self.items(): result[i] += v * other[j] return result
(self, other)
724,808
scipy.sparse._base
_matmul_sparse
null
def _matmul_sparse(self, other): return self.tocsr()._matmul_sparse(other)
(self, other)
724,809
scipy.sparse._dok
_matmul_vector
null
def _matmul_vector(self, other): res_dtype = upcast(self.dtype, other.dtype) # vector @ vector if self.ndim == 1: if issparse(other): if other.format == "dok": keys = self.keys() & other.keys() else: keys = self.keys() & other.tocoo().coords[0] return res_dtype(sum(self._dict[k] * other._dict[k] for k in keys)) elif isdense(other): return res_dtype(sum(other[k] * v for k, v in self.items())) else: return NotImplemented # matrix @ vector result = np.zeros(self.shape[0], dtype=res_dtype) for (i, j), v in self.items(): result[i] += v * other[j] return result
(self, other)
724,810
scipy.sparse._dok
_mul_scalar
null
def _mul_scalar(self, other): res_dtype = upcast_scalar(self.dtype, other) # Multiply this scalar by every element. new = self._dok_container(self.shape, dtype=res_dtype) new._dict.update(((k, v * other) for k, v in self.items())) return new
(self, other)
724,813
scipy.sparse._base
_real
null
def _real(self): return self.tocsr()._real()
(self)
724,816
scipy.sparse._dok
_set_arrayXarray
null
def _set_arrayXarray(self, row, col, x): row = list(map(int, row.ravel())) col = list(map(int, col.ravel())) x = x.ravel() self._dict.update(zip(zip(row, col), x)) for i in np.nonzero(x == 0)[0]: key = (row[i], col[i]) if self._dict[key] == 0: # may have been superseded by later update del self._dict[key]
(self, row, col, x)
724,817
scipy.sparse._index
_set_arrayXarray_sparse
null
def _set_arrayXarray_sparse(self, row, col, x): # Fall back to densifying x x = np.asarray(x.toarray(), dtype=self.dtype) x, _ = _broadcast_arrays(x, row) self._set_arrayXarray(row, col, x)
(self, row, col, x)
724,818
scipy.sparse._dok
_set_int
null
def _set_int(self, idx, x): if x: self._dict[idx] = x elif idx in self._dict: del self._dict[idx]
(self, idx, x)
724,819
scipy.sparse._dok
_set_intXint
null
def _set_intXint(self, row, col, x): key = (row, col) if x: self._dict[key] = x elif key in self._dict: del self._dict[key]
(self, row, col, x)
724,820
scipy.sparse._base
_setdiag
This part of the implementation gets overridden by the different formats.
def _setdiag(self, values, k): """This part of the implementation gets overridden by the different formats. """ M, N = self.shape if k < 0: if values.ndim == 0: # broadcast max_index = min(M+k, N) for i in range(max_index): self[i - k, i] = values else: max_index = min(M+k, N, len(values)) if max_index <= 0: return for i, v in enumerate(values[:max_index]): self[i - k, i] = v else: if values.ndim == 0: # broadcast max_index = min(M, N-k) for i in range(max_index): self[i, i + k] = values else: max_index = min(M, N-k, len(values)) if max_index <= 0: return for i, v in enumerate(values[:max_index]): self[i, i + k] = v
(self, values, k)
724,822
scipy.sparse._base
_sub_sparse
null
def _sub_sparse(self, other): return self.tocsr()._sub_sparse(other)
(self, other)