index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
724,593 | scipy.sparse._base | __matmul__ | null | def __matmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self._matmul_dispatch(other)
| (self, other) |
724,594 | scipy.sparse._matrix | __mul__ | null | def __mul__(self, other):
return self._matmul_dispatch(other)
| (self, other) |
724,595 | scipy.sparse._compressed | __ne__ | null | def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif issparse(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return NotImplemented
| (self, other) |
724,596 | scipy.sparse._data | __neg__ | null | def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError('negating a boolean sparse array is not '
'supported')
return self._with_data(-self.data)
| (self) |
724,598 | scipy.sparse._matrix | __pow__ | null | def __pow__(self, power):
from .linalg import matrix_power
return matrix_power(self, power)
| (self, power) |
724,599 | scipy.sparse._base | __radd__ | null | def __radd__(self,other): # other + self
return self.__add__(other)
| (self, other) |
724,600 | scipy.sparse._base | __rdiv__ | null | def __rdiv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
| (self, other) |
724,601 | scipy.sparse._base | __repr__ | null | def __repr__(self):
_, format_name = _formats[self.format]
sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
shape_str = 'x'.join(str(x) for x in self.shape)
return (
f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n"
f"\twith {self.nnz} stored elements in {format_name} format>"
)
| (self) |
724,602 | scipy.sparse._base | __rmatmul__ | null | def __rmatmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self._rmatmul_dispatch(other)
| (self, other) |
724,603 | scipy.sparse._matrix | __rmul__ | null | def __rmul__(self, other):
return self._rmatmul_dispatch(other)
| (self, other) |
724,604 | scipy.sparse._data | __round__ | null | def __round__(self, ndigits=0):
return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
| (self, ndigits=0) |
724,605 | scipy.sparse._base | __rsub__ | null | def __rsub__(self,other): # other - self
if isscalarlike(other):
if other == 0:
return -self.copy()
raise NotImplementedError('subtracting a sparse array from a '
'nonzero scalar is not supported')
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._rsub_dense(other)
else:
return NotImplemented
| (self, other) |
724,606 | scipy.sparse._base | __rtruediv__ | null | def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
| (self, other) |
724,607 | scipy.sparse._index | __setitem__ | null | def __setitem__(self, key, x):
row, col = self._validate_indices(key)
if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES):
x = np.asarray(x, dtype=self.dtype)
if x.size != 1:
raise ValueError('Trying to assign a sequence to an item')
self._set_intXint(row, col, x.flat[0])
return
if isinstance(row, slice):
row = np.arange(*row.indices(self.shape[0]))[:, None]
else:
row = np.atleast_1d(row)
if isinstance(col, slice):
col = np.arange(*col.indices(self.shape[1]))[None, :]
if row.ndim == 1:
row = row[:, None]
else:
col = np.atleast_1d(col)
i, j = _broadcast_arrays(row, col)
if i.shape != j.shape:
raise IndexError('number of row and column indices differ')
from ._base import issparse
if issparse(x):
if i.ndim == 1:
# Inner indexing, so treat them like row vectors.
i = i[None]
j = j[None]
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
(broadcast_col or x.shape[1] == i.shape[1])):
raise ValueError('shape mismatch in assignment')
if x.shape[0] == 0 or x.shape[1] == 0:
return
x = x.tocoo(copy=True)
x.sum_duplicates()
self._set_arrayXarray_sparse(i, j, x)
else:
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
if x.squeeze().shape != i.squeeze().shape:
x = np.broadcast_to(x, i.shape)
if x.size == 0:
return
x = x.reshape(i.shape)
self._set_arrayXarray(i, j, x)
| (self, key, x) |
724,608 | scipy.sparse._base | __str__ | null | def __str__(self):
maxprint = self._getmaxprint()
A = self.tocoo()
# helper function, outputs "(i,j) v"
def tostr(row, col, data):
triples = zip(list(zip(row, col)), data)
return '\n'.join([(' {}\t{}'.format(*t)) for t in triples])
if self.nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
| (self) |
724,609 | scipy.sparse._base | __sub__ | null | def __sub__(self, other): # self - other
if isscalarlike(other):
if other == 0:
return self.copy()
raise NotImplementedError('subtracting a nonzero scalar from a '
'sparse array is not supported')
elif issparse(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._sub_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._sub_dense(other)
else:
return NotImplemented
| (self, other) |
724,610 | scipy.sparse._base | __truediv__ | null | def __truediv__(self, other):
return self._divide(other, true_divide=True)
| (self, other) |
724,611 | scipy.sparse._compressed | _add_dense | null | def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})')
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
csr_todense(M, N, self.indptr, self.indices, self.data, y)
return self._container(result, copy=False)
| (self, other) |
724,612 | scipy.sparse._compressed | _add_sparse | null | def _add_sparse(self, other):
return self._binopt(other, '_plus_')
| (self, other) |
724,613 | scipy.sparse._data | _arg_min_or_max | null | def _arg_min_or_max(self, axis, out, argmin_or_argmax, compare):
if out is not None:
raise ValueError("Sparse types do not support an 'out' parameter.")
validateaxis(axis)
if self.ndim == 1:
if axis not in (None, 0, -1):
raise ValueError("axis out of range")
axis = None # avoid calling special axis case. no impact on 1d
if axis is not None:
return self._arg_min_or_max_axis(axis, argmin_or_argmax, compare)
if 0 in self.shape:
raise ValueError("Cannot apply the operation to an empty matrix.")
if self.nnz == 0:
return 0
zero = self.dtype.type(0)
mat = self.tocoo()
# Convert to canonical form: no duplicates, sorted indices.
mat.sum_duplicates()
extreme_index = argmin_or_argmax(mat.data)
extreme_value = mat.data[extreme_index]
num_col = mat.shape[-1]
# If the min value is less than zero, or max is greater than zero,
# then we do not need to worry about implicit zeros.
if compare(extreme_value, zero):
# cast to Python int to avoid overflow and RuntimeError
return int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index])
# Cheap test for the rare case where we have no implicit zeros.
size = np.prod(self.shape)
if size == mat.nnz:
return int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index])
# At this stage, any implicit zero could be the min or max value.
# After sum_duplicates(), the `row` and `col` arrays are guaranteed to
# be sorted in C-order, which means the linearized indices are sorted.
linear_indices = mat.row * num_col + mat.col
first_implicit_zero_index = _find_missing_index(linear_indices, size)
if extreme_value == zero:
return min(first_implicit_zero_index, extreme_index)
return first_implicit_zero_index
| (self, axis, out, argmin_or_argmax, compare) |
724,614 | scipy.sparse._data | _arg_min_or_max_axis | null | def _arg_min_or_max_axis(self, axis, argmin_or_argmax, compare):
if self.shape[axis] == 0:
raise ValueError("Cannot apply the operation along a zero-sized dimension.")
if axis < 0:
axis += 2
zero = self.dtype.type(0)
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
ret_size, line_size = mat._swap(mat.shape)
ret = np.zeros(ret_size, dtype=int)
nz_lines, = np.nonzero(np.diff(mat.indptr))
for i in nz_lines:
p, q = mat.indptr[i:i + 2]
data = mat.data[p:q]
indices = mat.indices[p:q]
extreme_index = argmin_or_argmax(data)
extreme_value = data[extreme_index]
if compare(extreme_value, zero) or q - p == line_size:
ret[i] = indices[extreme_index]
else:
zero_ind = _find_missing_index(indices, line_size)
if extreme_value == zero:
ret[i] = min(extreme_index, zero_ind)
else:
ret[i] = zero_ind
if axis == 1:
ret = ret.reshape(-1, 1)
return self._ascontainer(ret)
| (self, axis, argmin_or_argmax, compare) |
724,615 | scipy.sparse._base | _asfptype | Upcast array to a floating point format (if necessary) | def _asfptype(self):
"""Upcast array to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
| (self) |
724,616 | scipy.sparse._index | _asindices | Convert `idx` to a valid index for an axis with a given length.
Subclasses that need special validation can override this method.
| def _asindices(self, idx, length):
"""Convert `idx` to a valid index for an axis with a given length.
Subclasses that need special validation can override this method.
"""
try:
x = np.asarray(idx)
except (ValueError, TypeError, MemoryError) as e:
raise IndexError('invalid index') from e
if x.ndim not in (1, 2):
raise IndexError('Index dimension must be 1 or 2')
if x.size == 0:
return x
# Check bounds
max_indx = x.max()
if max_indx >= length:
raise IndexError('index (%d) out of range' % max_indx)
min_indx = x.min()
if min_indx < 0:
if min_indx < -length:
raise IndexError('index (%d) out of range' % min_indx)
if x is idx or not x.flags.owndata:
x = x.copy()
x[x < 0] += length
return x
| (self, idx, length) |
724,617 | scipy.sparse._compressed | _binopt | apply the binary operation fn to two sparse matrices. | def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = self._get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
| (self, other, op) |
724,618 | scipy.sparse._data | _deduped_data | null | def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
| (self) |
724,619 | scipy.sparse._base | _divide | null | def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float64):
return self.astype(np.float64)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer) and
np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
recip = np.true_divide(1., other)
else:
recip = np.divide(1., other)
return self.multiply(recip)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif issparse(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float64):
return self_csr.astype(np.float64)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
| (self, other, true_divide=False, rdivide=False) |
724,620 | scipy.sparse._compressed | _divide_sparse |
Divide this matrix by a second sparse matrix.
| def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = self._container(out)
else:
# integers types go with nan <-> 0
out = r
return out
| (self, other) |
724,621 | scipy.sparse._compressed | _get_arrayXarray | null | def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return self._ascontainer(val)
return self.__class__(val.reshape(major.shape))
| (self, row, col) |
724,622 | scipy.sparse._csc | _get_arrayXint | null | def _get_arrayXint(self, row, col):
return self._get_submatrix(major=col)._minor_index_fancy(row)
| (self, row, col) |
724,623 | scipy.sparse._csc | _get_arrayXslice | null | def _get_arrayXslice(self, row, col):
return self._major_slice(col)._minor_index_fancy(row)
| (self, row, col) |
724,624 | scipy.sparse._compressed | _get_columnXarray | null | def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
| (self, row, col) |
724,625 | scipy.sparse._base | _get_index_dtype |
Determine index dtype for array.
This wraps _sputils.get_index_dtype, providing compatibility for both
array and matrix API sparse matrices. Matrix API sparse matrices would
attempt to downcast the indices - which can be computationally
expensive and undesirable for users. The array API changes this
behaviour.
See discussion: https://github.com/scipy/scipy/issues/16774
The get_index_dtype import is due to implementation details of the test
suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a
lower int32 max value for checks on the matrix API's downcasting
behaviour.
| def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False):
"""
Determine index dtype for array.
This wraps _sputils.get_index_dtype, providing compatibility for both
array and matrix API sparse matrices. Matrix API sparse matrices would
attempt to downcast the indices - which can be computationally
expensive and undesirable for users. The array API changes this
behaviour.
See discussion: https://github.com/scipy/scipy/issues/16774
The get_index_dtype import is due to implementation details of the test
suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a
lower int32 max value for checks on the matrix API's downcasting
behaviour.
"""
from ._sputils import get_index_dtype
# Don't check contents for array API
return get_index_dtype(arrays,
maxval,
(check_contents and not isinstance(self, sparray)))
| (self, arrays=(), maxval=None, check_contents=False) |
724,626 | scipy.sparse._csc | _get_intXarray | null | def _get_intXarray(self, row, col):
return self._major_index_fancy(col)._get_submatrix(minor=row)
| (self, row, col) |
724,627 | scipy.sparse._compressed | _get_intXint | null | def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
| (self, row, col) |
724,628 | scipy.sparse._csc | _get_intXslice | null | def _get_intXslice(self, row, col):
if col.step in (1, None):
return self._get_submatrix(major=col, minor=row, copy=True)
return self._major_slice(col)._get_submatrix(minor=row)
| (self, row, col) |
724,629 | scipy.sparse._csc | _get_sliceXarray | null | def _get_sliceXarray(self, row, col):
return self._major_index_fancy(col)._minor_slice(row)
| (self, row, col) |
724,630 | scipy.sparse._csc | _get_sliceXint | null | def _get_sliceXint(self, row, col):
if row.step in (1, None):
return self._get_submatrix(major=col, minor=row, copy=True)
return self._get_submatrix(major=col)._minor_slice(row)
| (self, row, col) |
724,631 | scipy.sparse._compressed | _get_sliceXslice | null | def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
| (self, row, col) |
724,632 | scipy.sparse._compressed | _get_submatrix | Return a submatrix of this matrix.
major, minor: None, int, or slice with step 1
| def _get_submatrix(self, major=None, minor=None, copy=False):
"""Return a submatrix of this matrix.
major, minor: None, int, or slice with step 1
"""
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
| (self, major=None, minor=None, copy=False) |
724,633 | scipy.sparse._csc | _getcol | Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
| def _getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += N
if i < 0 or i >= N:
raise IndexError('index (%d) out of range' % i)
return self._get_submatrix(major=i, copy=True)
| (self, i) |
724,634 | scipy.sparse._base | _getmaxprint | Maximum number of elements to display when printed. | def _getmaxprint(self):
"""Maximum number of elements to display when printed."""
return self.maxprint
| (self) |
724,635 | scipy.sparse._compressed | _getnnz | Number of stored values, including explicit zeros.
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole array, in
each column, or in each row.
See also
--------
count_nonzero : Number of non-zero entries
| def _getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
| (self, axis=None) |
724,636 | scipy.sparse._csc | _getrow | Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
| def _getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError('index (%d) out of range' % i)
return self._get_submatrix(minor=i).tocsr()
| (self, i) |
724,637 | scipy.sparse._data | _imag | null | def _imag(self):
return self._with_data(self.data.imag)
| (self) |
724,638 | scipy.sparse._compressed | _inequality | null | def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning, stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif issparse(other):
# TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
return NotImplemented
| (self, other, op, op_name, bad_scalar_msg) |
724,639 | scipy.sparse._compressed | _insert_many | Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
| def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = self._get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
| (self, i, j, x) |
724,640 | scipy.sparse._compressed | _major_index_fancy | Index along the major axis where idx is an array of ints.
| def _major_index_fancy(self, idx):
"""Index along the major axis where idx is an array of ints.
"""
idx_dtype = self._get_index_dtype((self.indptr, self.indices))
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape, dtype=self.dtype)
row_nnz = (self.indptr[indices + 1] - self.indptr[indices]).astype(idx_dtype)
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz, out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(
M,
indices,
self.indptr.astype(idx_dtype, copy=False),
self.indices.astype(idx_dtype, copy=False),
self.data,
res_indices,
res_data
)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
| (self, idx) |
724,641 | scipy.sparse._compressed | _major_slice | Index along the major axis where idx is a slice object.
| def _major_slice(self, idx, copy=False):
"""Index along the major axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape, dtype=self.dtype)
# Work out what slices are needed for `row_nnz`
# start,stop can be -1, only if step is negative
start0, stop0 = start, stop
if stop == -1 and start >= 0:
stop0 = None
start1, stop1 = start + 1, stop + 1
row_nnz = self.indptr[start1:stop1:step] - \
self.indptr[start0:stop0:step]
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz, out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
| (self, idx, copy=False) |
724,642 | scipy.sparse._base | _matmul_dispatch | np.array-like matmul & `np.matrix`-like mul, i.e. `dot` or `NotImplemented`
interpret other and call one of the following
self._mul_scalar()
self._matmul_vector()
self._matmul_multivector()
self._matmul_sparse()
| def _matmul_dispatch(self, other):
"""np.array-like matmul & `np.matrix`-like mul, i.e. `dot` or `NotImplemented`
interpret other and call one of the following
self._mul_scalar()
self._matmul_vector()
self._matmul_multivector()
self._matmul_sparse()
"""
# This method has to be different from `__matmul__` because it is also
# called by sparse matrix classes.
# Currently matrix multiplication is only supported
# for 2D arrays. Hence we unpacked and use only the
# two last axes' lengths.
M, N = self._shape_as_2d
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._matmul_vector(other)
elif other.shape == (N, 1):
result = self._matmul_vector(other.ravel())
if self.ndim == 1:
return result
return result.reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._matmul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[-1] != other.shape[0]:
raise ValueError('dimension mismatch')
if other.ndim == 1:
raise ValueError('Cannot yet multiply a 1d sparse array')
return self._matmul_sparse(other)
# If it's a list or whatever, treat it like an array
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# Not interpretable as an array; return NotImplemented so that
# other's __rmatmul__ can kick in if that's implemented.
return NotImplemented
try:
other.shape
except AttributeError:
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N, 1):
raise ValueError('dimension mismatch')
result = self._matmul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = self._ascontainer(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1, 1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != N:
raise ValueError('dimension mismatch')
result = self._matmul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = self._ascontainer(result)
return result
else:
raise ValueError('could not interpret dimensions')
| (self, other) |
724,643 | scipy.sparse._compressed | _matmul_multivector | null | def _matmul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
| (self, other) |
724,644 | scipy.sparse._compressed | _matmul_sparse | null | def _matmul_sparse(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = self._get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = self._get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
| (self, other) |
724,645 | scipy.sparse._compressed | _matmul_vector | null | def _matmul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
| (self, other) |
724,646 | scipy.sparse._compressed | _maximum_minimum | null | def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif issparse(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
| (self, other, npop, op_name, dense_check) |
724,647 | scipy.sparse._data | _min_or_max | null | def _min_or_max(self, axis, out, min_or_max):
if out is not None:
raise ValueError("Sparse arrays do not support an 'out' parameter.")
validateaxis(axis)
if self.ndim == 1:
if axis not in (None, 0, -1):
raise ValueError("axis out of range")
axis = None # avoid calling special axis case. no impact on 1d
if axis is None:
if 0 in self.shape:
raise ValueError("zero-size array to reduction operation")
zero = self.dtype.type(0)
if self.nnz == 0:
return zero
m = min_or_max.reduce(self._deduped_data().ravel())
if self.nnz != np.prod(self.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return self._min_or_max_axis(axis, min_or_max)
else:
raise ValueError("axis out of range")
| (self, axis, out, min_or_max) |
724,648 | scipy.sparse._data | _min_or_max_axis | null | def _min_or_max_axis(self, axis, min_or_max):
N = self.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = self.shape[1 - axis]
idx_dtype = self._get_index_dtype(maxval=M)
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
major_index, value = mat._minor_reduce(min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
if axis == 0:
return self._coo_container(
(value, (np.zeros(len(value), dtype=idx_dtype), major_index)),
dtype=self.dtype, shape=(1, M)
)
else:
return self._coo_container(
(value, (major_index, np.zeros(len(value), dtype=idx_dtype))),
dtype=self.dtype, shape=(M, 1)
)
| (self, axis, min_or_max) |
724,649 | scipy.sparse._compressed | _minor_index_fancy | Index along the minor axis where idx is an array of ints.
| def _minor_index_fancy(self, idx):
"""Index along the minor axis where idx is an array of ints.
"""
idx_dtype = self._get_index_dtype((self.indices, self.indptr))
indices = self.indices.astype(idx_dtype, copy=False)
indptr = self.indptr.astype(idx_dtype, copy=False)
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape, dtype=self.dtype)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr, dtype=idx_dtype)
csr_column_index1(
k,
idx,
M,
N,
indptr,
indices,
col_offsets,
res_indptr,
)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
| (self, idx) |
724,650 | scipy.sparse._compressed | _minor_reduce | Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
| def _minor_reduce(self, ufunc, data=None):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
| (self, ufunc, data=None) |
724,651 | scipy.sparse._compressed | _minor_slice | Index along the minor axis where idx is a slice object.
| def _minor_slice(self, idx, copy=False):
"""Index along the minor axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)), dtype=self.dtype)
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
| (self, idx, copy=False) |
724,652 | scipy.sparse._data | _mul_scalar | null | def _mul_scalar(self, other):
return self._with_data(self.data * other)
| (self, other) |
724,653 | scipy.sparse._compressed | _prepare_indices | null | def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.atleast_1d(np.asarray(i, dtype=self.indices.dtype)).ravel()
j = np.atleast_1d(np.asarray(j, dtype=self.indices.dtype)).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
| (self, i, j) |
724,654 | scipy.sparse._base | _process_toarray_args | null | def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse array')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
| (self, order, out) |
724,655 | scipy.sparse._index | _raise_on_1d_array_slice | We do not currently support 1D sparse arrays.
This function is called each time that a 1D array would
result, raising an error instead.
Once 1D sparse arrays are implemented, it should be removed.
| def _raise_on_1d_array_slice(self):
"""We do not currently support 1D sparse arrays.
This function is called each time that a 1D array would
result, raising an error instead.
Once 1D sparse arrays are implemented, it should be removed.
"""
from scipy.sparse import sparray
if isinstance(self, sparray):
raise NotImplementedError(
'We have not yet implemented 1D sparse slices; '
'please index using explicit indices, e.g. `x[:, [0]]`'
)
| (self) |
724,656 | scipy.sparse._data | _real | null | def _real(self):
return self._with_data(self.data.real)
| (self) |
724,657 | scipy.sparse._base | _rmatmul_dispatch | null | def _rmatmul_dispatch(self, other):
if isscalarlike(other):
return self._mul_scalar(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
ret = self.transpose()._matmul_dispatch(tr)
if ret is NotImplemented:
return NotImplemented
return ret.transpose()
| (self, other) |
724,658 | scipy.sparse._base | _rsub_dense | null | def _rsub_dense(self, other):
# note: this can't be replaced by other + (-self) for unsigned types
return other - self.todense()
| (self, other) |
724,659 | scipy.sparse._compressed | _scalar_binopt | Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new sparse array in canonical form.
| def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new sparse array in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
| (self, other, op) |
724,660 | scipy.sparse._compressed | _set_arrayXarray | null | def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
| (self, row, col, x) |
724,661 | scipy.sparse._compressed | _set_arrayXarray_sparse | null | def _set_arrayXarray_sparse(self, row, col, x):
# clear entries that will be overwritten
self._zero_many(*self._swap((row, col)))
M, N = row.shape # matches col.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if x.size == 0:
return
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
# only assign entries in the new sparsity structure
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
| (self, row, col, x) |
724,662 | scipy.sparse._compressed | _set_intXint | null | def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
| (self, row, col, x) |
724,663 | scipy.sparse._compressed | _set_many | Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
| def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
"""
i, j, M, N = self._prepare_indices(i, j)
x = np.atleast_1d(np.asarray(x, dtype=self.dtype)).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
| (self, i, j, x) |
724,664 | scipy.sparse._compressed | _setdiag | null | def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(-k, max_index - k, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(k, k + max_index, dtype=self.indices.dtype)
if not broadcast:
values = values[:len(i)]
x = np.atleast_1d(np.asarray(values, dtype=self.dtype)).ravel()
if x.squeeze().shape != i.squeeze().shape:
x = np.broadcast_to(x, i.shape)
if x.size == 0:
return
M, N = self._swap((M, N))
i, j = self._swap((i, j))
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
mask = (offsets <= -1)
# Boundary between csc and convert to coo
# The value 0.001 is justified in gh-19962#issuecomment-1920499678
if mask.sum() < self.nnz * 0.001:
# create new entries
i = i[mask]
j = j[mask]
self._insert_many(i, j, x[mask])
# replace existing entries
mask = ~mask
self.data[offsets[mask]] = x[mask]
else:
# convert to coo for _set_diag
coo = self.tocoo()
coo._setdiag(values, k)
arrays = coo._coo_to_compressed(self._swap)
self.indptr, self.indices, self.data, _ = arrays
| (self, values, k) |
724,665 | scipy.sparse._base | _sub_dense | null | def _sub_dense(self, other):
return self.todense() - other
| (self, other) |
724,666 | scipy.sparse._compressed | _sub_sparse | null | def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
| (self, other) |
724,667 | scipy.sparse._csc | _swap | swap the members of x if this is a column-oriented matrix
| @staticmethod
def _swap(x):
"""swap the members of x if this is a column-oriented matrix
"""
return x[1], x[0]
| (x) |
724,668 | scipy.sparse._index | _validate_indices | null | def _validate_indices(self, key):
# First, check if indexing with single boolean matrix.
from ._base import _spbase
if (isinstance(key, (_spbase, np.ndarray)) and
key.ndim == 2 and key.dtype.kind == 'b'):
if key.shape != self.shape:
raise IndexError('boolean index shape does not match array shape')
row, col = key.nonzero()
else:
row, col = _unpack_index(key)
M, N = self.shape
def _validate_bool_idx(
idx: npt.NDArray[np.bool_],
axis_size: int,
axis_name: str
) -> npt.NDArray[np.int_]:
if len(idx) != axis_size:
raise IndexError(
f"boolean {axis_name} index has incorrect length: {len(idx)} "
f"instead of {axis_size}"
)
return _boolean_index_to_array(idx)
if isintlike(row):
row = int(row)
if row < -M or row >= M:
raise IndexError('row index (%d) out of range' % row)
if row < 0:
row += M
elif (bool_row := _compatible_boolean_index(row)) is not None:
row = _validate_bool_idx(bool_row, M, "row")
elif not isinstance(row, slice):
row = self._asindices(row, M)
if isintlike(col):
col = int(col)
if col < -N or col >= N:
raise IndexError('column index (%d) out of range' % col)
if col < 0:
col += N
elif (bool_col := _compatible_boolean_index(col)) is not None:
col = _validate_bool_idx(bool_col, N, "column")
elif not isinstance(col, slice):
col = self._asindices(col, N)
return row, col
| (self, key) |
724,669 | scipy.sparse._compressed | _with_data | Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
| def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data, self.indices.copy(),
self.indptr.copy()),
shape=self.shape,
dtype=data.dtype)
else:
return self.__class__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
| (self, data, copy=True) |
724,670 | scipy.sparse._compressed | _zero_many | Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
| def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
| (self, i, j) |
724,671 | scipy.sparse._data | arcsin | Element-wise arcsin.
See `numpy.arcsin` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,672 | scipy.sparse._data | arcsinh | Element-wise arcsinh.
See `numpy.arcsinh` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,673 | scipy.sparse._data | arctan | Element-wise arctan.
See `numpy.arctan` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,674 | scipy.sparse._data | arctanh | Element-wise arctanh.
See `numpy.arctanh` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,675 | scipy.sparse._data | argmax | Return indices of maximum elements along an axis.
Implicit zero elements are also taken into account. If there are
several maximum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmax is computed. If None (default), index
of the maximum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : numpy.matrix or int
Indices of maximum elements. If matrix, its size along `axis` is 1.
| def argmax(self, axis=None, out=None):
"""Return indices of maximum elements along an axis.
Implicit zero elements are also taken into account. If there are
several maximum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmax is computed. If None (default), index
of the maximum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : numpy.matrix or int
Indices of maximum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmax, np.greater)
| (self, axis=None, out=None) |
724,676 | scipy.sparse._data | argmin | Return indices of minimum elements along an axis.
Implicit zero elements are also taken into account. If there are
several minimum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmin is computed. If None (default), index
of the minimum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : numpy.matrix or int
Indices of minimum elements. If matrix, its size along `axis` is 1.
| def argmin(self, axis=None, out=None):
"""Return indices of minimum elements along an axis.
Implicit zero elements are also taken into account. If there are
several minimum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmin is computed. If None (default), index
of the minimum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : numpy.matrix or int
Indices of minimum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmin, np.less)
| (self, axis=None, out=None) |
724,677 | scipy.sparse._base | asformat | Return this array/matrix in the passed format.
Parameters
----------
format : {str, None}
The desired sparse format ("csr", "csc", "lil", "dok", "array", ...)
or None for no conversion.
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : This array/matrix in the passed format.
| def asformat(self, format, copy=False):
"""Return this array/matrix in the passed format.
Parameters
----------
format : {str, None}
The desired sparse format ("csr", "csc", "lil", "dok", "array", ...)
or None for no conversion.
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : This array/matrix in the passed format.
"""
if format is None or format == self.format:
if copy:
return self.copy()
else:
return self
else:
try:
convert_method = getattr(self, 'to' + format)
except AttributeError as e:
raise ValueError(f'Format {format} is unknown.') from e
# Forward the copy kwarg, if it's accepted.
try:
return convert_method(copy=copy)
except TypeError:
return convert_method()
| (self, format, copy=False) |
724,678 | scipy.sparse._matrix | asfptype | Upcast matrix to a floating point format (if necessary) | def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
return self._asfptype()
| (self) |
724,679 | scipy.sparse._data | astype | Cast the array/matrix elements to a specified type.
Parameters
----------
dtype : string or numpy dtype
Typecode or data-type to which to cast the data.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
Defaults to 'unsafe' for backwards compatibility.
'no' means the data types should not be cast at all.
'equiv' means only byte-order changes are allowed.
'safe' means only casts which can preserve values are allowed.
'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
'unsafe' means any data conversions may be done.
copy : bool, optional
If `copy` is `False`, the result might share some memory with this
array/matrix. If `copy` is `True`, it is guaranteed that the result and
this array/matrix do not share any memory.
| def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if self.dtype != dtype:
matrix = self._with_data(
self.data.astype(dtype, casting=casting, copy=True),
copy=True
)
return matrix._with_data(matrix._deduped_data(), copy=False)
elif copy:
return self.copy()
else:
return self
| (self, dtype, casting='unsafe', copy=True) |
724,680 | scipy.sparse._data | ceil | Element-wise ceil.
See `numpy.ceil` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,681 | scipy.sparse._compressed | check_format | Check whether the array/matrix respects the CSR or CSC format.
Parameters
----------
full_check : bool, optional
If `True`, run rigorous check, scanning arrays for valid values.
Note that activating those check might copy arrays for casting,
modifying indices and index pointers' inplace.
If `False`, run basic checks on attributes. O(1) operations.
Default is `True`.
| def check_format(self, full_check=True):
"""Check whether the array/matrix respects the CSR or CSC format.
Parameters
----------
full_check : bool, optional
If `True`, run rigorous check, scanning arrays for valid values.
Note that activating those check might copy arrays for casting,
modifying indices and index pointers' inplace.
If `False`, run basic checks on attributes. O(1) operations.
Default is `True`.
"""
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})",
stacklevel=3)
if self.indices.dtype.kind != 'i':
warn(f"indices array has non-integer dtype ({self.indices.dtype.name})",
stacklevel=3)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError(f"{minor_name} index values must be < {minor_dim}")
if self.indices.min() < 0:
raise ValueError(f"{minor_name} index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
idx_dtype = self._get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
| (self, full_check=True) |
724,682 | scipy.sparse._base | conj | Element-wise complex conjugation.
If the array/matrix is of non-complex data type and `copy` is False,
this method does nothing and the data is not copied.
Parameters
----------
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : The element-wise complex conjugate.
| def conj(self, copy=True):
return self.conjugate(copy=copy)
| (self, copy=True) |
724,683 | scipy.sparse._data | conjugate | Element-wise complex conjugation.
If the array/matrix is of non-complex data type and `copy` is False,
this method does nothing and the data is not copied.
Parameters
----------
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : The element-wise complex conjugate.
| def conjugate(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conjugate(), copy=copy)
elif copy:
return self.copy()
else:
return self
| (self, copy=True) |
724,684 | scipy.sparse._data | copy | Returns a copy of this array/matrix.
No data/indices will be shared between the returned value and current
array/matrix.
| def copy(self):
return self._with_data(self.data.copy(), copy=True)
| (self) |
724,685 | scipy.sparse._data | count_nonzero | Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray())
Unlike the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
| def count_nonzero(self):
return np.count_nonzero(self._deduped_data())
| (self) |
724,686 | scipy.sparse._data | deg2rad | Element-wise deg2rad.
See `numpy.deg2rad` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,687 | scipy.sparse._compressed | diagonal | Returns the kth diagonal of the array/matrix.
Parameters
----------
k : int, optional
Which diagonal to get, corresponding to elements a[i, i+k].
Default: 0 (the main diagonal).
.. versionadded:: 1.0
See also
--------
numpy.diagonal : Equivalent numpy function.
Examples
--------
>>> from scipy.sparse import csr_array
>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> A.diagonal()
array([1, 0, 5])
>>> A.diagonal(k=1)
array([2, 3])
| def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
return np.empty(0, dtype=self.data.dtype)
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
| (self, k=0) |
724,688 | scipy.sparse._base | dot | Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_array
>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
| def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_array
>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
if np.isscalar(other):
return self * other
else:
return self @ other
| (self, other) |
724,689 | scipy.sparse._compressed | eliminate_zeros | Remove zero entries from the array/matrix
This is an *in place* operation.
| def eliminate_zeros(self):
"""Remove zero entries from the array/matrix
This is an *in place* operation.
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
| (self) |
724,690 | scipy.sparse._data | expm1 | Element-wise expm1.
See `numpy.expm1` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,691 | scipy.sparse._data | floor | Element-wise floor.
See `numpy.floor` for more information. | def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = (f"Element-wise {name}.\n\n"
f"See `numpy.{name}` for more information.")
method.__name__ = name
return method
| (self) |
724,692 | scipy.sparse._matrix | getH | Return the Hermitian transpose of this matrix.
See Also
--------
numpy.matrix.getH : NumPy's implementation of `getH` for matrices
| def getH(self):
"""Return the Hermitian transpose of this matrix.
See Also
--------
numpy.matrix.getH : NumPy's implementation of `getH` for matrices
"""
return self.conjugate().transpose()
| (self) |
724,693 | scipy.sparse._matrix | get_shape | Get the shape of the matrix | def get_shape(self):
"""Get the shape of the matrix"""
return self._shape
| (self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.