response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Fuse all `arrays` by simple kronecker addition.
Arrays are fused from "right to left",
Args:
arrays: A list of arrays to be fused.
Returns:
np.ndarray: The result of fusing `arrays`. | def fuse_ndarrays(arrays: List[Union[List, np.ndarray]]) -> np.ndarray:
"""
Fuse all `arrays` by simple kronecker addition.
Arrays are fused from "right to left",
Args:
arrays: A list of arrays to be fused.
Returns:
np.ndarray: The result of fusing `arrays`.
"""
if len(arrays) == 1:
return np.array(arrays[0])
fused_arrays = np.asarray(arrays[0])
for n in range(1, len(arrays)):
fused_arrays = np.ravel(np.add.outer(fused_arrays, arrays[n]))
return fused_arrays |
Fuse degeneracies `degen1` and `degen2` of two leg-charges
by simple kronecker product. `degen1` and `degen2` typically belong to two
consecutive legs of `BlockSparseTensor`.
Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns
`[10, 100, 20, 200, 30, 300]`.
When using row-major ordering of indices in `BlockSparseTensor`,
the position of `degen1` should be "to the left" of the position of `degen2`.
Args:
degen1: Iterable of integers
degen2: Iterable of integers
Returns:
np.ndarray: The result of fusing `dege1` with `degen2`. | def fuse_degeneracies(degen1: Union[List, np.ndarray],
degen2: Union[List, np.ndarray]) -> np.ndarray:
"""
Fuse degeneracies `degen1` and `degen2` of two leg-charges
by simple kronecker product. `degen1` and `degen2` typically belong to two
consecutive legs of `BlockSparseTensor`.
Given `degen1 = [1, 2, 3]` and `degen2 = [10, 100]`, this returns
`[10, 100, 20, 200, 30, 300]`.
When using row-major ordering of indices in `BlockSparseTensor`,
the position of `degen1` should be "to the left" of the position of `degen2`.
Args:
degen1: Iterable of integers
degen2: Iterable of integers
Returns:
np.ndarray: The result of fusing `dege1` with `degen2`.
"""
return np.reshape(
np.array(degen1)[:, None] * np.array(degen2)[None, :],
len(degen1) * len(degen2)) |
compute strides of `dims`. | def _get_strides(dims: Union[List[int], np.ndarray]) -> np.ndarray:
"""
compute strides of `dims`.
"""
return np.flip(np.append(1, np.cumprod(np.flip(dims[1::])))) |
Find the most-levelled partition of `dims`.
A levelled partitioning is a partitioning such that
np.prod(dim[:partition]) and np.prod(dim[partition:])
are as close as possible.
Args:
dims: A list or np.ndarray of integers.
Returns:
int: The best partitioning. | def _find_best_partition(dims: Union[List[int], np.ndarray]) -> int:
"""
Find the most-levelled partition of `dims`.
A levelled partitioning is a partitioning such that
np.prod(dim[:partition]) and np.prod(dim[partition:])
are as close as possible.
Args:
dims: A list or np.ndarray of integers.
Returns:
int: The best partitioning.
"""
if len(dims) == 1:
raise ValueError(
'expecting dims with a length of at least 2, got len(dims) =1')
diffs = [
np.abs(np.prod(dims[0:n]) - np.prod(dims[n::]))
for n in range(1, len(dims))
]
min_inds = np.nonzero(diffs == np.min(diffs))[0]
if len(min_inds) > 1:
right_dims = [np.prod(dims[min_ind + 1:]) for min_ind in min_inds]
min_ind = min_inds[np.argmax(right_dims)]
else:
min_ind = min_inds[0]
return min_ind + 1 |
Return the `numpy.dtype` needed to store an
element of `itemsize` bytes. | def get_dtype(itemsize: int) -> Type[np.number]:
"""
Return the `numpy.dtype` needed to store an
element of `itemsize` bytes.
"""
final_dtype = np.int8
if itemsize > 1:
final_dtype = np.int16
if itemsize > 2:
final_dtype = np.int32
if itemsize > 4:
final_dtype = np.int64
return final_dtype |
If possible, collapse a 2d numpy array
`array` along the rows into a 1d array of larger
dtype.
Args:
array: np.ndarray
Returns:
np.ndarray: The collapsed array. | def collapse(array: np.ndarray) -> np.ndarray:
"""
If possible, collapse a 2d numpy array
`array` along the rows into a 1d array of larger
dtype.
Args:
array: np.ndarray
Returns:
np.ndarray: The collapsed array.
"""
if array.ndim <= 1 or array.dtype.itemsize * array.shape[1] > 8:
return array
array = np.ascontiguousarray(array)
newdtype = get_dtype(array.dtype.itemsize * array.shape[1])
if array.shape[1] in (1, 2, 4, 8):
tmparray = array.view(newdtype)
else:
if array.shape[1] == 3:
width = 1
else:
width = 8 - array.shape[1]
tmparray = np.squeeze(
np.concatenate(
[array, np.zeros((array.shape[0], width), dtype=array.dtype)],
axis=1).view(newdtype))
return np.squeeze(tmparray) |
Reverse operation to `collapse`.
Expand a 1d numpy array `array` into a 2d array
of dtype `original_dtype` by view-casting.
Args:
array: The collapsed array.
original_dtype: The dtype of the original (uncollapsed) array
original_width: The width (the length of the second dimension)
of the original (uncollapsed) array.
original_ndim: Number of dimensions of the original (uncollapsed)
array.
Returns:
np.ndarray: The expanded array. | def expand(array: np.ndarray, original_dtype: Type[np.number],
original_width: int, original_ndim: int) -> np.ndarray:
"""
Reverse operation to `collapse`.
Expand a 1d numpy array `array` into a 2d array
of dtype `original_dtype` by view-casting.
Args:
array: The collapsed array.
original_dtype: The dtype of the original (uncollapsed) array
original_width: The width (the length of the second dimension)
of the original (uncollapsed) array.
original_ndim: Number of dimensions of the original (uncollapsed)
array.
Returns:
np.ndarray: The expanded array.
"""
if original_ndim <= 1:
#nothing to expand
return np.squeeze(array)
if array.ndim == 1:
#the array has been collapsed
#now we uncollapse it
result = array[:, None].view(original_dtype)
if original_width in (3, 5, 6, 7):
result = np.ascontiguousarray(result[:, :original_width])
return result
return array |
Compute the unique elements of 1d or 2d `array` along the
zero axis of the array.
This function performs performs a similar
task to `numpy.unique` with `axis=0` argument,
but is substantially faster for 2d arrays.
Note that for the case of 2d arrays, the ordering of the array of unique
elements differs from the ordering of `numpy.unique`.
Args:
array: An input array of integers.
return_index: If `True`, also return the indices of `array`
that result in the unique array.
return_inverse: If `True`, also return the indices of the unique array
that can be used to reconstruct `array`.
return_counts: If `True`, also return the number of times
each unique item appears in `array`.
Returns:
np.ndarray: An array of unique elements.
np.ndarray (optional): The indices of array that result
in the unique array.
np.ndarray: (optional): The indices of the unique array
from which `array` can be reconstructed.
np.ndarray (optional): The number of times each element of the
unique array appears in `array`. | def unique(array: np.ndarray,
return_index: bool = False,
return_inverse: bool = False,
return_counts: bool = False,
label_dtype: Type[np.number] = np.int16) -> Any:
"""
Compute the unique elements of 1d or 2d `array` along the
zero axis of the array.
This function performs performs a similar
task to `numpy.unique` with `axis=0` argument,
but is substantially faster for 2d arrays.
Note that for the case of 2d arrays, the ordering of the array of unique
elements differs from the ordering of `numpy.unique`.
Args:
array: An input array of integers.
return_index: If `True`, also return the indices of `array`
that result in the unique array.
return_inverse: If `True`, also return the indices of the unique array
that can be used to reconstruct `array`.
return_counts: If `True`, also return the number of times
each unique item appears in `array`.
Returns:
np.ndarray: An array of unique elements.
np.ndarray (optional): The indices of array that result
in the unique array.
np.ndarray: (optional): The indices of the unique array
from which `array` can be reconstructed.
np.ndarray (optional): The number of times each element of the
unique array appears in `array`.
"""
array = np.asarray(array)
original_width = array.shape[1] if array.ndim == 2 else 0
original_ndim = array.ndim
collapsed_array = collapse(array)
if collapsed_array.ndim <= 1:
axis = None
else:
axis = 0
_return_index = (collapsed_array.dtype in (np.int8, np.int16)) or return_index
res = np.unique(
collapsed_array,
return_index=_return_index,
return_inverse=return_inverse,
return_counts=return_counts,
axis=axis)
if any([return_index, return_inverse, return_counts]):
out = list(res)
if _return_index and not return_index:
del out[1]
out[0] = expand(out[0], array.dtype, original_width, original_ndim)
if return_inverse and not return_index:
out[1] = out[1].astype(label_dtype)
elif return_inverse and return_index:
out[2] = out[2].astype(label_dtype)
out[0] = np.ascontiguousarray(out[0])
else:
if _return_index:
out = expand(res[0], array.dtype, original_width, original_ndim)
else:
out = expand(res, array.dtype, original_width, original_ndim)
return out |
Extends numpy's intersect1d to find the row or column-wise intersection of
two 2d arrays. Takes identical input to numpy intersect1d.
Args:
A, B (np.ndarray): arrays of matching widths and datatypes
Returns:
ndarray: sorted 1D array of common rows/cols between the input arrays
ndarray: the indices of the first occurrences of the common values in A.
Only provided if return_indices is True.
ndarray: the indices of the first occurrences of the common values in B.
Only provided if return_indices is True. | def intersect(A: np.ndarray,
B: np.ndarray,
axis=0,
assume_unique=False,
return_indices=False) -> Any:
"""
Extends numpy's intersect1d to find the row or column-wise intersection of
two 2d arrays. Takes identical input to numpy intersect1d.
Args:
A, B (np.ndarray): arrays of matching widths and datatypes
Returns:
ndarray: sorted 1D array of common rows/cols between the input arrays
ndarray: the indices of the first occurrences of the common values in A.
Only provided if return_indices is True.
ndarray: the indices of the first occurrences of the common values in B.
Only provided if return_indices is True.
"""
if A.dtype != B.dtype:
raise ValueError(f"array dtypes must macht to intersect, "
f"found A.dtype = {A.dtype}, B.dtype = {B.dtype}")
if axis not in (0, 1):
raise NotImplementedError(
"intersection can only be performed on first or second axis")
if A.ndim != B.ndim:
raise ValueError("array ndims must match to intersect")
if axis == 1:
if A.shape[0] != B.shape[0]:
raise ValueError("array heights must match to intersect on second axis")
out = intersect(
A.T,
B.T,
axis=0,
assume_unique=assume_unique,
return_indices=return_indices)
if return_indices:
return np.ascontiguousarray(out[0].T), out[1], out[2]
return np.ascontiguousarray(out.T)
if A.ndim > 1 and A.shape[1] != B.shape[1]:
raise ValueError("array widths must match to intersect on first axis")
original_width = A.shape[1] if A.ndim == 2 else 0
original_ndim = A.ndim
collapsed_A = collapse(A)
collapsed_B = collapse(B)
if collapsed_A.ndim > 1:
# arrays were not callapsable, fall back to slower implementation
return _intersect_ndarray(collapsed_A, collapsed_B, axis, assume_unique,
return_indices)
if collapsed_A.dtype in (np.int8,
np.int16) and collapsed_B.dtype in (np.int8,
np.int16):
#special case of dtype = np.int8 or np.int16
#original charges were unpadded in this case
C, A_locs, B_locs = np.intersect1d(
collapsed_A,
collapsed_B,
assume_unique=assume_unique,
return_indices=True)
C = expand(C, A.dtype, original_width, original_ndim)
if return_indices:
result = C, A_locs, B_locs
else:
result = C
else:
result = np.intersect1d(
collapsed_A,
collapsed_B,
assume_unique=assume_unique,
return_indices=return_indices)
if return_indices:
result = list(result)
result[0] = expand(result[0], A.dtype, original_width, original_ndim)
else:
result = expand(result, A.dtype, original_width, original_ndim)
return result |
Extends numpy's intersect1d to find the row or column-wise intersection of
two 2d arrays. Takes identical input to numpy intersect1d.
Args:
A, B (np.ndarray): arrays of matching widths and datatypes
Returns:
ndarray: sorted 1D array of common rows/cols between the input arrays
ndarray: the indices of the first occurrences of the common values in A.
Only provided if return_indices is True.
ndarray: the indices of the first occurrences of the common values in B.
Only provided if return_indices is True. | def _intersect_ndarray(A: np.ndarray,
B: np.ndarray,
axis=0,
assume_unique=False,
return_indices=False) -> Any:
"""
Extends numpy's intersect1d to find the row or column-wise intersection of
two 2d arrays. Takes identical input to numpy intersect1d.
Args:
A, B (np.ndarray): arrays of matching widths and datatypes
Returns:
ndarray: sorted 1D array of common rows/cols between the input arrays
ndarray: the indices of the first occurrences of the common values in A.
Only provided if return_indices is True.
ndarray: the indices of the first occurrences of the common values in B.
Only provided if return_indices is True.
"""
# pylint: disable=line-too-long
# see
# https://stackoverflow.com/questions/8317022/ get-intersecting-rows-across-two-2d-numpy-arrays
#pylint: disable=no-else-return
A = np.ascontiguousarray(A)
B = np.ascontiguousarray(B)
if A.ndim != B.ndim:
raise ValueError("array ndims must match to intersect")
if A.ndim == 1:
return np.intersect1d(
A, B, assume_unique=assume_unique, return_indices=return_indices)
elif A.ndim == 2:
if axis == 0:
ncols = A.shape[1]
if A.shape[1] != B.shape[1]:
raise ValueError("array widths must match to intersect")
dtype = {
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [A.dtype]
}
if return_indices:
C, A_locs, B_locs = np.intersect1d(
A.view(dtype),
B.view(dtype),
assume_unique=assume_unique,
return_indices=return_indices)
return C.view(A.dtype).reshape(-1, ncols), A_locs, B_locs
C = np.intersect1d(
A.view(dtype), B.view(dtype), assume_unique=assume_unique)
return C.view(A.dtype).reshape(-1, ncols)
elif axis == 1:
out = _intersect_ndarray(
A.T.copy(),
B.T.copy(),
axis=0,
assume_unique=assume_unique,
return_indices=return_indices)
if return_indices:
return np.ascontiguousarray(out[0].T), out[1], out[2]
return np.ascontiguousarray(out.T)
raise NotImplementedError(
"intersection can only be performed on first or second axis")
raise NotImplementedError("_intersect_ndarray is only implemented for 1d or 2d arrays") |
Contract given nodes exploiting copy tensors.
This is based on the Bucket-Elimination-based algorithm described in
`arXiv:quant-ph/1712.05384`_, but avoids explicit construction of the
graphical model. Instead, it achieves the efficient contraction of sparse
tensors by representing them as subnetworks consisting of lower rank tensors
and copy tensors. This function assumes that sparse tensors have already been
decomposed this way by the caller.
This contractor is efficient on networks with many copy tensors. Time and
memory requirements are highly sensitive to the requested contraction order.
Note that the returned tensor network may not be fully contracted if the input
network doesn't have enough copy nodes. In this case, the client should use
a different contractor to complete the contraction.
.. _arXiv:quant-ph/1712.05384:
https://arxiv.org/abs/1712.05384
Args:
nodes: A collection of connected nodes.
contraction_order: Order in which copy tensors are contracted.
Returns:
A new iterable of nodes after contracting copy tensors. | def bucket(
nodes: Iterable[AbstractNode],
contraction_order: Sequence[network_components.CopyNode]
) -> Iterable[AbstractNode]:
"""Contract given nodes exploiting copy tensors.
This is based on the Bucket-Elimination-based algorithm described in
`arXiv:quant-ph/1712.05384`_, but avoids explicit construction of the
graphical model. Instead, it achieves the efficient contraction of sparse
tensors by representing them as subnetworks consisting of lower rank tensors
and copy tensors. This function assumes that sparse tensors have already been
decomposed this way by the caller.
This contractor is efficient on networks with many copy tensors. Time and
memory requirements are highly sensitive to the requested contraction order.
Note that the returned tensor network may not be fully contracted if the input
network doesn't have enough copy nodes. In this case, the client should use
a different contractor to complete the contraction.
.. _arXiv:quant-ph/1712.05384:
https://arxiv.org/abs/1712.05384
Args:
nodes: A collection of connected nodes.
contraction_order: Order in which copy tensors are contracted.
Returns:
A new iterable of nodes after contracting copy tensors.
"""
nodes = set(nodes)
for copy_node in contraction_order:
partners = copy_node.get_partners()
new_node = contract_copy_node(copy_node)
nodes = nodes.difference(list(partners.keys()) + [copy_node])
nodes.add(new_node)
return nodes |
Adds the CNOT quantum gate to tensor network.
CNOT consists of two rank-3 tensors: a COPY tensor on the control qubit and
a XOR tensor on the target qubit.
Args:
q0: Input edge for the control qubit.
q1: Input edge for the target qubit.
backend: backend to use
Returns:
Tuple with three elements:
- copy tensor corresponding to the control qubit
- output edge for the control qubit and
- output edge for the target qubit. | def add_cnot(
q0: network_components.Edge,
q1: network_components.Edge,
backend: str = "numpy"
) -> Tuple[network_components.CopyNode, network_components.Edge,
network_components.Edge]:
"""Adds the CNOT quantum gate to tensor network.
CNOT consists of two rank-3 tensors: a COPY tensor on the control qubit and
a XOR tensor on the target qubit.
Args:
q0: Input edge for the control qubit.
q1: Input edge for the target qubit.
backend: backend to use
Returns:
Tuple with three elements:
- copy tensor corresponding to the control qubit
- output edge for the control qubit and
- output edge for the target qubit.
"""
control = CopyNode(rank=3, dimension=2, backend=backend)
xor = np.array([[[1, 0], [0, 1]], [[0, 1], [1, 0]]], dtype=np.float64)
target = Node(xor, backend=backend)
network_components.connect(q0, control[0])
network_components.connect(q1, target[0])
network_components.connect(control[1], target[1])
return (control, control[2], target[2]) |
Solve for the contraction order of a tensor network (encoded in the `ncon`
syntax) that minimizes the computational cost.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
max_branch: maximum number of contraction paths to search at each step.
Returns:
np.ndarray: the cheapest contraction order found (in ncon format).
float: the cost of the network contraction, given as log10(total_FLOPS).
bool: specifies if contraction order is guaranteed optimal. | def ncon_solver(tensors: List[np.ndarray],
labels: List[List[int]],
max_branch: Optional[int] = None):
"""
Solve for the contraction order of a tensor network (encoded in the `ncon`
syntax) that minimizes the computational cost.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
max_branch: maximum number of contraction paths to search at each step.
Returns:
np.ndarray: the cheapest contraction order found (in ncon format).
float: the cost of the network contraction, given as log10(total_FLOPS).
bool: specifies if contraction order is guaranteed optimal.
"""
# build log-adjacency matrix
log_adj = ncon_to_adj(tensors, labels)
# run search algorithm
order, costs, is_optimal = full_solve_complete(log_adj, max_branch=max_branch)
# put contraction order back into ncon format
con_order = ord_to_ncon(labels, order)
return con_order, costs, is_optimal |
Create a log-adjacency matrix, where element [i,j] is the log10 of the total
dimension of the indices connecting ith and jth tensors, for a network
defined in the `ncon` syntax.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
Returns:
np.ndarray: the log-adjacency matrix. | def ncon_to_adj(tensors: List[np.ndarray], labels: List[List[int]]):
"""
Create a log-adjacency matrix, where element [i,j] is the log10 of the total
dimension of the indices connecting ith and jth tensors, for a network
defined in the `ncon` syntax.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
Returns:
np.ndarray: the log-adjacency matrix.
"""
# process inputs
N = len(labels)
ranks = [len(labels[i]) for i in range(N)]
flat_labels = np.hstack([labels[i] for i in range(N)])
tensor_counter = np.hstack(
[i * np.ones(ranks[i], dtype=int) for i in range(N)])
index_counter = np.hstack([np.arange(ranks[i]) for i in range(N)])
# build log-adjacency index-by-index
log_adj = np.zeros([N, N])
unique_labels = np.unique(flat_labels)
for ele in unique_labels:
# identify tensor/index location of each edge
tnr = tensor_counter[flat_labels == ele]
ind = index_counter[flat_labels == ele]
if len(ind) == 1: # external index
log_adj[tnr[0], tnr[0]] += np.log10(tensors[tnr[0]].shape[ind[0]])
elif len(ind) == 2: # internal index
if tnr[0] != tnr[1]: # ignore partial traces
log_adj[tnr[0], tnr[1]] += np.log10(tensors[tnr[0]].shape[ind[0]])
log_adj[tnr[1], tnr[0]] += np.log10(tensors[tnr[0]].shape[ind[0]])
return log_adj |
Produces a `ncon` compatible index contraction order from the sequence of
pairwise contractions.
Args:
labels: list of the tensor connections (in standard `ncon` format).
orders: array of dim (2,N-1) specifying the set of N-1 pairwise
tensor contractions.
Returns:
np.ndarray: the contraction order (in `ncon` format). | def ord_to_ncon(labels: List[List[int]], orders: np.ndarray):
"""
Produces a `ncon` compatible index contraction order from the sequence of
pairwise contractions.
Args:
labels: list of the tensor connections (in standard `ncon` format).
orders: array of dim (2,N-1) specifying the set of N-1 pairwise
tensor contractions.
Returns:
np.ndarray: the contraction order (in `ncon` format).
"""
N = len(labels)
orders = orders.reshape(2, N - 1)
new_labels = [np.array(labels[i]) for i in range(N)]
con_order = np.zeros([0], dtype=int)
# remove all partial trace indices
for counter, temp_label in enumerate(new_labels):
uni_inds, counts = np.unique(temp_label, return_counts=True)
tr_inds = uni_inds[np.flatnonzero(counts == 2)]
con_order = np.concatenate((con_order, tr_inds))
new_labels[counter] = temp_label[np.isin(temp_label, uni_inds[counts == 1])]
for i in range(N - 1):
# find common indices between tensor pair
cont_many, A_cont, B_cont = np.intersect1d(
new_labels[orders[0, i]], new_labels[orders[1, i]], return_indices=True)
temp_labels = np.append(
np.delete(new_labels[orders[0, i]], A_cont),
np.delete(new_labels[orders[1, i]], B_cont))
con_order = list(np.concatenate((con_order, cont_many), axis=0))
# build new set of labels
new_labels[orders[0, i]] = temp_labels
del new_labels[orders[1, i]]
return con_order |
Checks the computational cost of an `ncon` contraction (without actually
doing the contraction). Ignore the cost contributions from partial traces
(which are always sub-leading).
Args:
tensors: list of the tensors in the network.
labels: length-N list of lists (or tuples) specifying the network
connections. The jth entry of the ith list in labels labels the edge
connected to the jth index of the ith tensor. Labels should be positive
integers for internal indices and negative integers for free indices.
con_order: optional argument to specify the order for contracting the
positive indices. Defaults to ascending order if omitted.
Returns:
float: the cost of the network contraction, given as log10(total_FLOPS). | def ncon_cost_check(tensors: List[np.ndarray],
labels: List[Union[List[int], Tuple[int]]],
con_order: Optional[Union[List[int], str]] = None):
"""
Checks the computational cost of an `ncon` contraction (without actually
doing the contraction). Ignore the cost contributions from partial traces
(which are always sub-leading).
Args:
tensors: list of the tensors in the network.
labels: length-N list of lists (or tuples) specifying the network
connections. The jth entry of the ith list in labels labels the edge
connected to the jth index of the ith tensor. Labels should be positive
integers for internal indices and negative integers for free indices.
con_order: optional argument to specify the order for contracting the
positive indices. Defaults to ascending order if omitted.
Returns:
float: the cost of the network contraction, given as log10(total_FLOPS).
"""
total_cost = np.float('-inf')
N = len(tensors)
tensor_dims = [np.array(np.log10(ele.shape)) for ele in tensors]
connect_list = [np.array(ele) for ele in labels]
# generate contraction order if necessary
flat_connect = np.concatenate(connect_list)
if con_order is None:
con_order = np.unique(flat_connect[flat_connect > 0])
else:
con_order = np.array(con_order)
# do all partial traces
for counter, temp_connect in enumerate(connect_list):
uni_inds, counts = np.unique(temp_connect, return_counts=True)
tr_inds = np.isin(temp_connect, uni_inds[counts == 1])
tensor_dims[counter] = tensor_dims[counter][tr_inds]
connect_list[counter] = temp_connect[tr_inds]
con_order = con_order[np.logical_not(
np.isin(con_order, uni_inds[counts == 2]))]
# do all binary contractions
while len(con_order) > 0:
# identify tensors to be contracted
cont_ind = con_order[0]
locs = [
ele for ele in range(len(connect_list))
if sum(connect_list[ele] == cont_ind) > 0
]
# identify indices to be contracted
c1 = connect_list.pop(locs[1])
c0 = connect_list.pop(locs[0])
cont_many, A_cont, B_cont = np.intersect1d(
c0, c1, assume_unique=True, return_indices=True)
# identify dimensions of contracted
d1 = tensor_dims.pop(locs[1])
d0 = tensor_dims.pop(locs[0])
single_cost = np.sum(d0) + np.sum(d1) - np.sum(d0[A_cont])
total_cost = single_cost + np.log10(1 + 10**(total_cost - single_cost))
# update lists
tensor_dims.append(np.append(np.delete(d0, A_cont), np.delete(d1, B_cont)))
connect_list.append(np.append(np.delete(c0, A_cont), np.delete(c1, B_cont)))
con_order = con_order[np.logical_not(np.isin(con_order, cont_many))]
# do all outer products
N = len(tensor_dims)
if N > 1:
tensor_sizes = np.sort([np.sum(tensor_dims[ele]) for ele in range(N)])
for _ in range(N - 1):
single_cost = tensor_sizes[0] + tensor_sizes[1]
tensor_sizes[0] += tensor_sizes[1]
tensor_sizes = np.sort(np.delete(tensor_sizes, 1))
total_cost = single_cost + np.log10(1 + 10**(total_cost - single_cost))
return total_cost |
Solve for the contraction order of a tensor network (encoded as a
log-adjacency matrix) using a greedy algorithm that minimizes the
intermediate tensor sizes.
Args:
log_adj_in: matrix where element [i,j] is the log10 of the total dimension
of the indices connecting ith and jth tensors.
Returns:
np.ndarray: cheapest contraction order found, specified as a sequence of
binary contractions.
float: the cost of the network contraction, given as log10(total_FLOPS). | def greedy_size_solve(log_adj_in: np.ndarray):
"""
Solve for the contraction order of a tensor network (encoded as a
log-adjacency matrix) using a greedy algorithm that minimizes the
intermediate tensor sizes.
Args:
log_adj_in: matrix where element [i,j] is the log10 of the total dimension
of the indices connecting ith and jth tensors.
Returns:
np.ndarray: cheapest contraction order found, specified as a sequence of
binary contractions.
float: the cost of the network contraction, given as log10(total_FLOPS).
"""
tol = 1e-6 # tolerance for float comparison
N0 = log_adj_in.shape[0]
log_adj = log_adj_in.copy().reshape(N0, N0)
orders = np.zeros([2, 0], dtype=int)
costs = None
for _ in range(N0 - 1):
# compute tensor dims
N = log_adj.shape[0]
dims = np.sum(log_adj, axis=0).reshape(N)
comb_dims = np.add.outer(dims, dims)
# compute contraction costs and new dims
single_cost = comb_dims - log_adj
new_dims = comb_dims - 2 * log_adj
new_dims = new_dims + np.max(new_dims.flatten()) * np.eye(N)
# compute maximum dim of tensor in contraction
temp_mat = np.kron(dims, np.ones([N, 1]))
max_dim = np.maximum(temp_mat, temp_mat.T)
dim_change = ((1 / tol) * (new_dims - max_dim)).astype(int)
# compute coords of minimal dim increase
xcoord, ycoord = np.where(dim_change == np.min(dim_change.flatten()))
upper_tri = (xcoord < ycoord)
xcoord = xcoord[upper_tri]
ycoord = ycoord[upper_tri]
# find contraction with minimal cost
all_costs = np.array(
[single_cost[xcoord[i], ycoord[i]] for i in range(len(xcoord))])
cont_dims = np.array(
[log_adj[xcoord[i], ycoord[i]] for i in range(len(xcoord))])
if max(cont_dims) > 0: # prioritise non-trivial contractions
all_costs[cont_dims == 0] += max(all_costs) + 1
cheapest_pos = np.argmin(all_costs)
i = ycoord[cheapest_pos]
j = xcoord[cheapest_pos]
# build new log adjacency
log_adj[j, j] = log_adj[j, j] - 2 * log_adj[j, i]
log_adj[j, :] = log_adj[j, :] + log_adj[i, :]
log_adj[:, j] = log_adj[:, j] + log_adj[:, i]
log_adj = np.delete(log_adj, i, axis=0)
log_adj = np.delete(log_adj, i, axis=1)
# build new orders
orders = np.hstack((orders, np.asarray([j, i]).reshape(2, 1)))
# tally the cost
if costs is None:
costs = single_cost[i, j]
else:
costs = costs + np.log10(1 + 10**(single_cost[i, j] - costs))
return orders, costs |
Solve for the contraction order of a tensor network (encoded as a
log-adjacency matrix) using a greedy algorithm that minimizes the
contraction cost at each step.
Args:
log_adj_in: matrix where element [i,j] is the log10 of the total dimension
of the indices connecting ith and jth tensors.
Returns:
np.ndarray: cheapest contraction order found, specified as a sequence of
binary contractions.
float: the cost of the network contraction, given as log10(total_FLOPS). | def greedy_cost_solve(log_adj_in: np.ndarray):
"""
Solve for the contraction order of a tensor network (encoded as a
log-adjacency matrix) using a greedy algorithm that minimizes the
contraction cost at each step.
Args:
log_adj_in: matrix where element [i,j] is the log10 of the total dimension
of the indices connecting ith and jth tensors.
Returns:
np.ndarray: cheapest contraction order found, specified as a sequence of
binary contractions.
float: the cost of the network contraction, given as log10(total_FLOPS).
"""
tol = 1e-6 # tolerance for float comparison
N = log_adj_in.shape[0]
log_adj = log_adj_in.copy().reshape(N, N)
orders = np.zeros([2, 0], dtype=int)
costs = None
for _ in range(N - 1):
# compute tensor dims and costs
N = log_adj.shape[0]
dims = np.sum(log_adj, axis=0).reshape(N)
comb_dims = np.add.outer(dims, dims)
single_cost = comb_dims - log_adj
# penalize trivial contractions and self-contractions
triv_conts = (log_adj < tol)
trimmed_costs = single_cost + np.max(single_cost.flatten()) * triv_conts
trimmed_costs = trimmed_costs + np.max(trimmed_costs.flatten()) * np.eye(N)
# find best contraction
tensors_to_contract = np.divmod(np.argmin(trimmed_costs), N)
i = max(tensors_to_contract)
j = min(tensors_to_contract)
# build new log adjacency
log_adj[j, j] = log_adj[j, j] - 2 * log_adj[j, i]
log_adj[j, :] = log_adj[j, :] + log_adj[i, :]
log_adj[:, j] = log_adj[:, j] + log_adj[:, i]
log_adj = np.delete(log_adj, i, axis=0)
log_adj = np.delete(log_adj, i, axis=1)
# build new orders
orders = np.hstack((orders, np.asarray(tensors_to_contract).reshape(2, 1)))
# tally the cost
if costs is None:
costs = single_cost[i, j]
else:
costs = costs + np.log10(1 + 10**(single_cost[i, j] - costs))
return orders, costs |
Solve for optimal contraction path of a network encoded as a log-adjacency
matrix via a full search.
Args:
log_adj: matrix where element [i,j] is the log10 of the total dimension
of the indices connecting ith and jth tensors.
cost_bound: upper cost threshold for discarding paths, in log10(FLOPS).
max_branch: bound for the total number of paths to retain.
Returns:
np.ndarray: the cheapest contraction order found.
float: the cost of the network contraction, given as log10(total_FLOPS).
bool: specifies if contraction order is guaranteed optimal. | def full_solve_complete(log_adj: np.ndarray,
cost_bound: Optional[int] = None,
max_branch: Optional[int] = None):
"""
Solve for optimal contraction path of a network encoded as a log-adjacency
matrix via a full search.
Args:
log_adj: matrix where element [i,j] is the log10 of the total dimension
of the indices connecting ith and jth tensors.
cost_bound: upper cost threshold for discarding paths, in log10(FLOPS).
max_branch: bound for the total number of paths to retain.
Returns:
np.ndarray: the cheapest contraction order found.
float: the cost of the network contraction, given as log10(total_FLOPS).
bool: specifies if contraction order is guaranteed optimal.
"""
tol = 1e-6 # tolerance for float comparison
# start by trying both greedy algorithms
order0, cost0 = greedy_size_solve(log_adj)
order1, cost1 = greedy_cost_solve(log_adj)
if cost0 < cost1:
order_greedy = order0
cost_greedy = cost0
else:
order_greedy = order1
cost_greedy = cost1
if max_branch == 1:
# return results from greedy
order_was_found = False
else:
# initialize arrays
N = log_adj.shape[0]
costs = np.zeros([1, 0])
groups = np.array(2**np.arange(N), dtype=np.uint64).reshape(N, 1)
orders = np.zeros([2, 0, 1], dtype=int)
# try full algorithm (using cost_bound from greedy)
cost_bound = cost_greedy + tol
total_truncated = 0
order_was_found = True
for _ in range(N - 1):
log_adj, costs, groups, orders, num_truncated = _full_solve_single(
log_adj,
costs,
groups,
orders,
cost_bound=cost_bound,
max_branch=max_branch)
if log_adj.size == 0:
# no paths found within the cost-bound
order_was_found = False
break
total_truncated = total_truncated + num_truncated
if order_was_found:
# return result from full algorithm
is_optimal = (total_truncated == 0)
return orders.reshape(2, N - 1), costs.item(), is_optimal
# return result from greedy algorithm
is_optimal = False
return order_greedy, cost_greedy, is_optimal |
Solve for the most-likely contraction step given a set of networks encoded
as log-adjacency matrices. Uses an algorithm that searches multiple (or,
potentially, all viable paths) as to minimize the total contraction cost.
Args:
log_adj: an np.ndarray of log-adjacency matrices of dim (N,N,m), with `N`
the number of tensors and `m` the number of (intermediate) networks.
costs: np.ndarray of length `m` detailing to prior cost of each network.
groups: np.ndarray of dim (N,m) providing an id-tag for each network,
based on a power-2 encoding.
orders: np.ndarray of dim (2,t,m) detailing the pairwise contraction
history of each network from the previous `t` contraction steps.
cost_bound: upper cost threshold for discarding paths, in log10(FLOPS).
max_branch: bound for the total number of paths to retain.
allow_outer: sets whether outer products are allowed.
Returns:
np.ndarray: new set of `log_adj` matrices.
np.ndarray: new set of `costs`.
np.ndarray: new set of `groups`.
np.ndarray: new set of `orders`.
int: total number of potentially viable paths that were trimmed. | def _full_solve_single(log_adj: np.ndarray,
costs: np.ndarray,
groups: np.ndarray,
orders: np.ndarray,
cost_bound: Optional[int] = None,
max_branch: Optional[int] = None,
allow_outer: Optional[bool] = False):
"""
Solve for the most-likely contraction step given a set of networks encoded
as log-adjacency matrices. Uses an algorithm that searches multiple (or,
potentially, all viable paths) as to minimize the total contraction cost.
Args:
log_adj: an np.ndarray of log-adjacency matrices of dim (N,N,m), with `N`
the number of tensors and `m` the number of (intermediate) networks.
costs: np.ndarray of length `m` detailing to prior cost of each network.
groups: np.ndarray of dim (N,m) providing an id-tag for each network,
based on a power-2 encoding.
orders: np.ndarray of dim (2,t,m) detailing the pairwise contraction
history of each network from the previous `t` contraction steps.
cost_bound: upper cost threshold for discarding paths, in log10(FLOPS).
max_branch: bound for the total number of paths to retain.
allow_outer: sets whether outer products are allowed.
Returns:
np.ndarray: new set of `log_adj` matrices.
np.ndarray: new set of `costs`.
np.ndarray: new set of `groups`.
np.ndarray: new set of `orders`.
int: total number of potentially viable paths that were trimmed.
"""
tol = 1e-6 # tolerance for float comparison
# set threshold required to trigger compression routine
if max_branch is None:
mid_kept = 10000
else:
mid_kept = max_branch
# initialize outputs
N = log_adj.shape[0]
if log_adj.ndim == 2:
log_adj = log_adj.reshape(N, N, 1)
final_adj = np.zeros([N - 1, N - 1, 0])
final_costs = np.zeros([1, 0])
final_groups = np.zeros([N - 1, 0], dtype=np.uint64)
final_orders = np.zeros([2, orders.shape[1] + 1, 0], dtype=int)
final_stable = np.zeros([1, 0], dtype=bool)
total_truncated = 0
only_outer_exist = not allow_outer
none_inbounds = True
# try to contract j-th tensor with i-th tensor (j<i)
for i in range(1, N):
for j in range(i):
if not allow_outer:
# only attempt non-trivial contractions
new_pos = np.flatnonzero(log_adj[j, i, :] > 0)
num_kept = len(new_pos)
else:
new_pos = np.arange(log_adj.shape[2])
num_kept = len(new_pos)
if num_kept > 0:
only_outer_exist = False
# dims of tensors and cost of contraction
dims = np.sum(log_adj[:, :, new_pos], axis=0).reshape(N, num_kept)
comb_dims = dims[j, :] + dims[i, :]
single_cost = np.reshape(comb_dims - log_adj[j, i, new_pos],
[1, num_kept])
if costs.size == 0:
new_costs = single_cost
else:
prev_cost = costs[0, new_pos]
new_costs = prev_cost + np.log10(1 + 10**(single_cost - prev_cost))
if cost_bound is not None:
# only keep contractions under the cost bound
pos_under_bound = new_costs.flatten() < cost_bound
new_pos = new_pos[pos_under_bound]
num_kept = len(new_pos)
new_costs = new_costs[0, pos_under_bound].reshape(1, num_kept)
if num_kept > 0:
none_inbounds = False
# order the costs
cost_order = np.argsort(new_costs).flatten()
sorted_pos = new_pos[cost_order]
# identify identical networks
new_groups = groups[:, sorted_pos]
new_groups[j, :] = new_groups[j, :] + new_groups[i, :]
new_groups = np.delete(new_groups, i, axis=0)
new_groups, temp_pos = np.unique(new_groups, return_index=True, axis=1)
new_costs = new_costs[:, cost_order[temp_pos]]
new_pos = sorted_pos[temp_pos]
num_kept = len(new_pos)
# new log adjacency
new_adj = log_adj[:, :, new_pos]
new_adj[j, j, :] = new_adj[j, j, :] - 2 * new_adj[j, i, :]
new_adj[j, :, :] = new_adj[j, :, :] + new_adj[i, :, :]
new_adj[:, j, :] = new_adj[:, j, :] + new_adj[:, i, :]
new_adj = np.delete(new_adj, i, axis=0)
new_adj = np.delete(new_adj, i, axis=1)
# new orders
prev_orders = orders[:, :, new_pos]
next_orders = np.vstack([
j * np.ones(len(new_pos), dtype=int),
i * np.ones(len(new_pos), dtype=int)
]).reshape(2, 1, len(new_pos))
new_orders = np.concatenate((prev_orders, next_orders), axis=1)
# new_stable
dims = np.sum(log_adj[:, :, new_pos], axis=0).reshape(N, num_kept)
comb_dims = dims[j, :] + dims[i, :]
final_dims = np.reshape(comb_dims - 2 * log_adj[j, i, new_pos],
[1, num_kept])
# include a fudge factor to avoid rounding errors
stable_pos = final_dims < (np.maximum(dims[j, :], dims[i, :]) + tol)
final_adj = np.concatenate((final_adj, new_adj), axis=2)
final_costs = np.concatenate((final_costs, new_costs), axis=1)
final_groups = np.concatenate((final_groups, new_groups), axis=1)
final_orders = np.concatenate((final_orders, new_orders), axis=2)
final_stable = np.concatenate((final_stable, stable_pos), axis=1)
# if number of intermediates too large then trigger compression routine
if final_costs.size > mid_kept:
temp_pos, num_truncated = _reduce_nets(
final_costs, final_groups, final_stable, max_branch=max_branch)
final_adj = final_adj[:, :, temp_pos]
final_costs = final_costs[:, temp_pos]
final_groups = final_groups[:, temp_pos]
final_orders = final_orders[:, :, temp_pos]
final_stable = final_stable[:, temp_pos]
total_truncated = total_truncated + num_truncated
if not only_outer_exist:
if none_inbounds:
# no orders found under the cost bound; return trivial
return np.zeros(0), np.zeros(0), np.zeros(0), np.zeros(0), 0
if only_outer_exist: # network contains only outer products
# re-solve with outer products enabled
return _full_solve_single(
log_adj,
costs,
groups,
orders,
cost_bound=cost_bound,
max_branch=max_branch,
allow_outer=True)
# compress outputs
temp_pos = _reduce_nets(final_costs, final_groups, final_stable)[0]
final_adj = final_adj[:, :, temp_pos]
final_costs = final_costs[:, temp_pos]
final_groups = final_groups[:, temp_pos]
final_orders = final_orders[:, :, temp_pos]
final_stable = final_stable[:, temp_pos]
return final_adj, final_costs, final_groups, final_orders, total_truncated |
Reduce from `m` starting paths smaller number of paths by first (i)
identifying any equivalent networks then (ii) trimming the most expensive
paths.
Args:
costs: np.ndarray of length `m` detailing to prior cost of each network.
groups: np.ndarray of dim (N,m) providing an id-tag for each network,
based on a power-2 encoding.
stable: np.ndarray of dim (m) denoting which paths were size-stable.
max_branch: bound for the total number of paths to retain.
Returns:
np.ndarray: index positions of the kept paths.
int: total number of potentially viable paths that were trimmed. | def _reduce_nets(costs: np.ndarray,
groups: np.ndarray,
stable: np.ndarray,
max_branch: Optional[int] = None):
"""
Reduce from `m` starting paths smaller number of paths by first (i)
identifying any equivalent networks then (ii) trimming the most expensive
paths.
Args:
costs: np.ndarray of length `m` detailing to prior cost of each network.
groups: np.ndarray of dim (N,m) providing an id-tag for each network,
based on a power-2 encoding.
stable: np.ndarray of dim (m) denoting which paths were size-stable.
max_branch: bound for the total number of paths to retain.
Returns:
np.ndarray: index positions of the kept paths.
int: total number of potentially viable paths that were trimmed.
"""
# sort according to the costs
new_pos = np.argsort(costs).flatten()
# identify and remove identical networks
temp_pos = np.unique(groups[:, new_pos], return_index=True, axis=1)[1]
orig_kept = len(temp_pos)
new_pos = new_pos[temp_pos]
num_truncated = 0
if max_branch is not None:
if orig_kept > max_branch:
# re-sort according to the costs
cost_order = np.argsort(costs[:, new_pos]).flatten()
new_pos = new_pos[cost_order]
# reserve some percertage for size-stable contractions
preserve_ratio = 0.2
num_stable = int(np.ceil(max_branch * preserve_ratio))
num_cheapest = int(np.ceil(max_branch * (1 - preserve_ratio)))
stable_pos = np.flatnonzero(stable[0, new_pos[num_cheapest:]])
temp_pos = np.concatenate(
(np.arange(num_cheapest),
stable_pos[:min(len(stable_pos), num_stable)] + num_cheapest),
axis=0)
new_pos = new_pos[temp_pos]
num_truncated = orig_kept - len(new_pos)
return new_pos, num_truncated |
Creates 'GEMM1' contraction from `opt_einsum` tests. | def gemm_network():
"""Creates 'GEMM1' contraction from `opt_einsum` tests."""
x = Node(np.ones([1, 2, 4]))
y = Node(np.ones([1, 3]))
z = Node(np.ones([2, 4, 3]))
# pylint: disable=pointless-statement
x[0] ^ y[0]
x[1] ^ z[0]
x[2] ^ z[1]
y[1] ^ z[2]
return [x, y, z] |
Creates a (modified) `Inner1` contraction from `opt_einsum` tests. | def inner_network():
"""Creates a (modified) `Inner1` contraction from `opt_einsum` tests."""
x = Node(np.ones([5, 2, 3, 4]))
y = Node(np.ones([5, 3]))
z = Node(np.ones([2, 4]))
# pylint: disable=pointless-statement
x[0] ^ y[0]
x[1] ^ z[0]
x[2] ^ y[1]
x[3] ^ z[1]
return [x, y, z] |
Creates a contraction of chain of matrices.
The `greedy` algorithm does not find the optimal path in this case! | def matrix_chain():
"""Creates a contraction of chain of matrices.
The `greedy` algorithm does not find the optimal path in this case!
"""
d = [10, 8, 6, 4, 2]
nodes = [Node(np.ones([d1, d2])) for d1, d2 in zip(d[:-1], d[1:])]
for a, b in zip(nodes[:-1], nodes[1:]):
# pylint: disable=pointless-statement
a[1] ^ b[0]
return nodes |
Base method for all `opt_einsum` contractors.
Args:
nodes: A collection of connected nodes.
algorithm: `opt_einsum` contraction method to use.
output_edge_order: An optional list of edges. Edges of the
final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
ignore_edge_order: An option to ignore the output edge
order.
Returns:
Final node after full contraction. | def base(nodes: Iterable[AbstractNode],
algorithm: utils.Algorithm,
output_edge_order: Optional[Sequence[Edge]] = None,
ignore_edge_order: bool = False) -> AbstractNode:
"""Base method for all `opt_einsum` contractors.
Args:
nodes: A collection of connected nodes.
algorithm: `opt_einsum` contraction method to use.
output_edge_order: An optional list of edges. Edges of the
final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
ignore_edge_order: An option to ignore the output edge
order.
Returns:
Final node after full contraction.
"""
nodes_set = set(nodes)
edges = get_all_edges(nodes_set)
#output edge order has to be determinded before any contraction
#(edges are refreshed after contractions)
if not ignore_edge_order:
if output_edge_order is None:
output_edge_order = list(get_subgraph_dangling(nodes))
if len(output_edge_order) > 1:
raise ValueError("The final node after contraction has more than "
"one remaining edge. In this case `output_edge_order` "
"has to be provided.")
if set(output_edge_order) != get_subgraph_dangling(nodes):
raise ValueError("output edges are not equal to the remaining "
"non-contracted edges of the final node.")
for edge in edges:
if not edge.is_disabled: #if its disabled we already contracted it
if edge.is_trace():
nodes_set.remove(edge.node1)
nodes_set.add(contract_parallel(edge))
if len(nodes_set) == 1:
# There's nothing to contract.
if ignore_edge_order:
return list(nodes_set)[0]
return list(nodes_set)[0].reorder_edges(output_edge_order)
# Then apply `opt_einsum`'s algorithm
path, nodes = utils.get_path(nodes_set, algorithm)
for a, b in path:
new_node = contract_between(nodes[a], nodes[b], allow_outer_product=True)
nodes.append(new_node)
nodes = utils.multi_remove(nodes, [a, b])
# if the final node has more than one edge,
# output_edge_order has to be specified
final_node = nodes[0] # nodes were connected, we checked this
if not ignore_edge_order:
final_node.reorder_edges(output_edge_order)
return final_node |
Optimal contraction order via `opt_einsum`.
This method will find the truly optimal contraction order via
`opt_einsum`'s depth first search algorithm. Since this search is
exhaustive, if your network is large (n>10), then the search may
take longer than just contracting in a suboptimal way.
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
The final node after full contraction. | def optimal(nodes: Iterable[AbstractNode],
output_edge_order: Optional[Sequence[Edge]] = None,
memory_limit: Optional[int] = None,
ignore_edge_order: bool = False) -> AbstractNode:
"""Optimal contraction order via `opt_einsum`.
This method will find the truly optimal contraction order via
`opt_einsum`'s depth first search algorithm. Since this search is
exhaustive, if your network is large (n>10), then the search may
take longer than just contracting in a suboptimal way.
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
The final node after full contraction.
"""
alg = functools.partial(
opt_einsum.paths.dynamic_programming, memory_limit=memory_limit)
return base(nodes, alg, output_edge_order, ignore_edge_order) |
Branch contraction path via `opt_einsum`.
This method uses the DFS approach of `optimal` while sorting potential
contractions based on a heuristic cost, in order to reduce time spent
in exploring paths which are unlikely to be optimal.
More details on `branching path`_.
.. _branching path:
https://optimized-einsum.readthedocs.io/en/latest/branching_path.html
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
nbranch: Number of best contractions to explore.
If None it explores all inner products starting with those that
have the best cost heuristic.
ignore_edge_order: An option to ignore the output edge order.
Returns:
The final node after full contraction. | def branch(nodes: Iterable[AbstractNode],
output_edge_order: Optional[Sequence[Edge]] = None,
memory_limit: Optional[int] = None,
nbranch: Optional[int] = None,
ignore_edge_order: bool = False) -> AbstractNode:
"""Branch contraction path via `opt_einsum`.
This method uses the DFS approach of `optimal` while sorting potential
contractions based on a heuristic cost, in order to reduce time spent
in exploring paths which are unlikely to be optimal.
More details on `branching path`_.
.. _branching path:
https://optimized-einsum.readthedocs.io/en/latest/branching_path.html
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
nbranch: Number of best contractions to explore.
If None it explores all inner products starting with those that
have the best cost heuristic.
ignore_edge_order: An option to ignore the output edge order.
Returns:
The final node after full contraction.
"""
alg = functools.partial(
opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=nbranch)
return base(nodes, alg, output_edge_order, ignore_edge_order) |
Greedy contraction path via `opt_einsum`.
This provides a more efficient strategy than `optimal` for finding
contraction paths in large networks. First contracts pairs of tensors
by finding the pair with the lowest cost at each step. Then it performs
the outer products. More details on `greedy path`_.
.. _greedy path:
https://optimized-einsum.readthedocs.io/en/latest/greedy_path.html
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
The final node after full contraction. | def greedy(nodes: Iterable[AbstractNode],
output_edge_order: Optional[Sequence[Edge]] = None,
memory_limit: Optional[int] = None,
ignore_edge_order: bool = False) -> AbstractNode:
"""Greedy contraction path via `opt_einsum`.
This provides a more efficient strategy than `optimal` for finding
contraction paths in large networks. First contracts pairs of tensors
by finding the pair with the lowest cost at each step. Then it performs
the outer products. More details on `greedy path`_.
.. _greedy path:
https://optimized-einsum.readthedocs.io/en/latest/greedy_path.html
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
The final node after full contraction.
"""
alg = functools.partial(opt_einsum.paths.greedy, memory_limit=memory_limit)
return base(nodes, alg, output_edge_order, ignore_edge_order) |
Chooses one of the above algorithms according to network size.
Default behavior is based on `opt_einsum`'s `auto` contractor.
Args:
nodes: A collection of connected nodes.
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
Final node after full contraction. | def auto(nodes: Iterable[AbstractNode],
output_edge_order: Optional[Sequence[Edge]] = None,
memory_limit: Optional[int] = None,
ignore_edge_order: bool = False) -> AbstractNode:
"""Chooses one of the above algorithms according to network size.
Default behavior is based on `opt_einsum`'s `auto` contractor.
Args:
nodes: A collection of connected nodes.
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
`output_edge_order` must be provided.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
Final node after full contraction.
"""
n = len(list(nodes)) #pytype thing
_nodes = nodes
if n <= 0:
raise ValueError("Cannot contract empty tensor network.")
if n == 1:
if not ignore_edge_order:
if output_edge_order is None:
output_edge_order = list(
(get_all_edges(_nodes) - get_all_nondangling(_nodes)))
if len(output_edge_order) > 1:
raise ValueError(
"The final node after contraction has more than "
"one dangling edge. In this case `output_edge_order` "
"has to be provided.")
edges = get_all_nondangling(_nodes)
if edges:
final_node = contract_parallel(edges.pop())
else:
final_node = list(_nodes)[0]
final_node.reorder_edges(output_edge_order)
if not ignore_edge_order:
final_node.reorder_edges(output_edge_order)
return final_node
if n < 5:
return optimal(nodes, output_edge_order, memory_limit, ignore_edge_order)
if n < 7:
return branch(
nodes,
output_edge_order=output_edge_order,
memory_limit=memory_limit,
ignore_edge_order=ignore_edge_order)
if n < 9:
return branch(
nodes,
output_edge_order=output_edge_order,
memory_limit=memory_limit,
nbranch=2,
ignore_edge_order=ignore_edge_order)
if n < 15:
return branch(
nodes,
output_edge_order=output_edge_order,
nbranch=1,
ignore_edge_order=ignore_edge_order)
return greedy(nodes, output_edge_order, memory_limit, ignore_edge_order) |
Uses a custom path optimizer created by the user to calculate paths.
The custom path optimizer should inherit `opt_einsum`'s `PathOptimizer`.
See `custom paths`_.
.. _custom paths:
https://optimized-einsum.readthedocs.io/en/latest/custom_paths.html
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
output_edge_order` must be provided.
optimizer: A custom `opt_einsum.PathOptimizer` object.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
Final node after full contraction. | def custom(nodes: Iterable[AbstractNode],
optimizer: Any,
output_edge_order: Sequence[Edge] = None,
memory_limit: Optional[int] = None,
ignore_edge_order: bool = False) -> AbstractNode:
"""Uses a custom path optimizer created by the user to calculate paths.
The custom path optimizer should inherit `opt_einsum`'s `PathOptimizer`.
See `custom paths`_.
.. _custom paths:
https://optimized-einsum.readthedocs.io/en/latest/custom_paths.html
Args:
nodes: an iterable of Nodes
output_edge_order: An optional list of edges.
Edges of the final node in `nodes_set`
are reordered into `output_edge_order`;
if final node has more than one edge,
output_edge_order` must be provided.
optimizer: A custom `opt_einsum.PathOptimizer` object.
memory_limit: Maximum number of elements in an array during contractions.
ignore_edge_order: An option to ignore the output edge order.
Returns:
Final node after full contraction.
"""
alg = functools.partial(optimizer, memory_limit=memory_limit)
return base(nodes, alg, output_edge_order, ignore_edge_order) |
Calculates the contraction paths using `opt_einsum` methods.
Args:
algorithm: `opt_einsum` method to use for calculating the contraction path.
nodes: an iterable of `AbstractNode` objects to contract.
memory_limit: Maximum number of elements in an array during contractions.
Only relevant for `algorithm in (optimal, greedy)`
nbranch: Number of best contractions to explore.
If None it explores all inner products starting with those that
have the best cost heuristic. Only relevant for `algorithm=branch`.
Returns:
The optimal contraction path as returned by `opt_einsum`. | def path_solver(
algorithm: Text,
nodes: Iterable[AbstractNode],
memory_limit: Optional[int] = None,
nbranch: Optional[int] = None
) -> Tuple[List[Tuple[int, int]], List[AbstractNode]]:
"""Calculates the contraction paths using `opt_einsum` methods.
Args:
algorithm: `opt_einsum` method to use for calculating the contraction path.
nodes: an iterable of `AbstractNode` objects to contract.
memory_limit: Maximum number of elements in an array during contractions.
Only relevant for `algorithm in (optimal, greedy)`
nbranch: Number of best contractions to explore.
If None it explores all inner products starting with those that
have the best cost heuristic. Only relevant for `algorithm=branch`.
Returns:
The optimal contraction path as returned by `opt_einsum`.
"""
if algorithm == "optimal":
alg = functools.partial(
opt_einsum.paths.dynamic_programming, memory_limit=memory_limit)
elif algorithm == "branch":
alg = functools.partial(
opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=nbranch)
elif algorithm == "greedy":
alg = functools.partial(opt_einsum.paths.greedy, memory_limit=memory_limit)
elif algorithm == "auto":
n = len(list(nodes)) #pytype thing
_nodes = nodes
if n <= 1:
return []
if n < 5:
alg = functools.partial(
opt_einsum.paths.dynamic_programming, memory_limit=memory_limit)
if n < 7:
alg = functools.partial(
opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=None)
if n < 9:
alg = functools.partial(
opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=2)
if n < 15:
alg = functools.partial(
opt_einsum.paths.branch, memory_limit=memory_limit, nbranch=1)
else:
alg = functools.partial(
opt_einsum.paths.greedy, memory_limit=memory_limit)
else:
raise ValueError("algorithm {algorithm} not implemented")
path, _ = utils.get_path(nodes, alg)
return path |
Contract `nodes` using `path`.
Args:
path: The contraction path as returned from `path_solver`.
nodes: A collection of connected nodes.
output_edge_order: A list of edges. Edges of the
final node in `nodes`
are reordered into `output_edge_order`;
Returns:
Final node after full contraction. | def contract_path(path: Tuple[List[Tuple[int,
int]]], nodes: Iterable[AbstractNode],
output_edge_order: Sequence[Edge]) -> AbstractNode:
"""Contract `nodes` using `path`.
Args:
path: The contraction path as returned from `path_solver`.
nodes: A collection of connected nodes.
output_edge_order: A list of edges. Edges of the
final node in `nodes`
are reordered into `output_edge_order`;
Returns:
Final node after full contraction.
"""
edges = get_all_edges(nodes)
for edge in edges:
if not edge.is_disabled: #if its disabled we already contracted it
if edge.is_trace():
contract_parallel(edge)
if len(nodes) == 1:
newnode = nodes[0].copy()
for edge in nodes[0].edges:
redirect_edge(edge, newnode, nodes[0])
return newnode.reorder_edges(output_edge_order)
if len(path) == 0:
return nodes
for p in path:
if len(p) > 1:
a, b = p
new_node = contract_between(nodes[a], nodes[b], allow_outer_product=True)
nodes.append(new_node)
nodes = utils.multi_remove(nodes, [a, b])
elif len(p) == 1:
a = p[0]
node = nodes.pop(a)
new_node = contract_trace_edges(node)
nodes.append(new_node)
# if the final node has more than one edge,
# output_edge_order has to be specified
final_node = nodes[0] # nodes were connected, we checked this
#some contractors miss trace edges
final_node = contract_trace_edges(final_node)
final_node.reorder_edges(output_edge_order)
return final_node |
Remove multiple indicies in a list at once. | def multi_remove(elems: List[Any], indices: List[int]) -> List[Any]:
"""Remove multiple indicies in a list at once."""
return [i for j, i in enumerate(elems) if j not in indices] |
Calculates the contraction paths using `opt_einsum` methods.
Args:
nodes: An iterable of nodes.
algorithm: `opt_einsum` method to use for calculating the contraction path.
Returns:
The optimal contraction path as returned by `opt_einsum`. | def get_path(
nodes: Iterable[AbstractNode],
algorithm: Algorithm) -> Tuple[List[Tuple[int, int]], List[AbstractNode]]:
"""Calculates the contraction paths using `opt_einsum` methods.
Args:
nodes: An iterable of nodes.
algorithm: `opt_einsum` method to use for calculating the contraction path.
Returns:
The optimal contraction path as returned by `opt_einsum`.
"""
nodes = list(nodes)
input_sets = [set(node.edges) for node in nodes]
output_set = get_subgraph_dangling(nodes)
size_dict = {edge: edge.dimension for edge in get_all_edges(nodes)}
return algorithm(input_sets, output_set, size_dict), nodes |
Return a Tensor wrapping data obtained by an initialization function
implemented in a backend. The Tensor will have the same shape as the
underlying array that function generates, with all Edges dangling.
This function is not intended to be called directly, but doing so should
be safe enough.
Args:
fname: Name of the method of backend to call (a string).
*fargs: Positional arguments to the initialization method.
backend: The backend or its name.
**fkwargs: Keyword arguments to the initialization method.
Returns:
tensor: A Tensor wrapping data generated by
(the_backend).fname(*fargs, **fkwargs), with one dangling edge per
axis of data. | def initialize_tensor(fname: Text,
*fargs: Any,
backend: Optional[Union[Text, AbstractBackend]] = None,
**fkwargs: Any) -> Tensor:
"""Return a Tensor wrapping data obtained by an initialization function
implemented in a backend. The Tensor will have the same shape as the
underlying array that function generates, with all Edges dangling.
This function is not intended to be called directly, but doing so should
be safe enough.
Args:
fname: Name of the method of backend to call (a string).
*fargs: Positional arguments to the initialization method.
backend: The backend or its name.
**fkwargs: Keyword arguments to the initialization method.
Returns:
tensor: A Tensor wrapping data generated by
(the_backend).fname(*fargs, **fkwargs), with one dangling edge per
axis of data.
"""
if backend is None:
backend = backend_contextmanager.get_default_backend()
backend_obj = backends.backend_factory.get_backend(backend)
func = getattr(backend_obj, fname)
data = func(*fargs, **fkwargs)
tensor = Tensor(data, backend=backend)
return tensor |
Return a Tensor representing a 2D array with ones on the diagonal and
zeros elsewhere. The Tensor has two dangling Edges.
Args:
N (int): The first dimension of the returned matrix.
dtype, optional: dtype of array (default np.float64).
M (int, optional): The second dimension of the returned matrix.
backend (optional): The backend or its name.
Returns:
I : Tensor of shape (N, M)
Represents an array of all zeros except for the k'th diagonal of all
ones. | def eye(N: int,
dtype: Optional[Type[np.number]] = None,
M: Optional[int] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor representing a 2D array with ones on the diagonal and
zeros elsewhere. The Tensor has two dangling Edges.
Args:
N (int): The first dimension of the returned matrix.
dtype, optional: dtype of array (default np.float64).
M (int, optional): The second dimension of the returned matrix.
backend (optional): The backend or its name.
Returns:
I : Tensor of shape (N, M)
Represents an array of all zeros except for the k'th diagonal of all
ones.
"""
the_tensor = initialize_tensor("eye", N, backend=backend, dtype=dtype, M=M)
return the_tensor |
Return a Tensor of shape `shape` of all zeros.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape`. Represents an array of all zeros. | def zeros(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor of shape `shape` of all zeros.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape`. Represents an array of all zeros.
"""
the_tensor = initialize_tensor("zeros", shape, backend=backend, dtype=dtype)
return the_tensor |
Return a Tensor of shape `shape` of all ones.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape`
Represents an array of all ones. | def ones(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor of shape `shape` of all ones.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape`
Represents an array of all ones.
"""
the_tensor = initialize_tensor("ones", shape, backend=backend, dtype=dtype)
return the_tensor |
Return a Tensor shape full of ones the same shape as input
Args:
tensor : Object to recieve shape from
dtype (optional) : dtype of object
backend(optional): The backend or its name. | def ones_like(tensor: Union[Any],
dtype: Optional[Type[Any]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor shape full of ones the same shape as input
Args:
tensor : Object to recieve shape from
dtype (optional) : dtype of object
backend(optional): The backend or its name."""
if backend is None:
backend = backend_contextmanager.get_default_backend()
else:
backend = backend_contextmanager.backend_factory.get_backend(backend)
if isinstance(tensor, Tensor):
the_tensor = initialize_tensor("ones", tensor.shape,
backend=tensor.backend, dtype=tensor.dtype)
else:
try:
tensor = backend.convert_to_tensor(tensor)
except TypeError as e:
error = "Input to zeros_like has invalid type causing " \
"error massage: \n" + str(e)
raise TypeError(error) from e
the_tensor = initialize_tensor("ones", tensor.get_shape().as_list(),
backend=backend, dtype=dtype)
return the_tensor |
Return a Tensor shape full of zeros the same shape as input
Args:
tensor : Object to recieve shape from
dtype (optional) : dtype of object
backend(optional): The backend or its name. | def zeros_like(tensor: Union[Any],
dtype: Optional[Any] = None,
backend: Optional[Union[Text,
AbstractBackend]] = None) -> Tensor:
"""Return a Tensor shape full of zeros the same shape as input
Args:
tensor : Object to recieve shape from
dtype (optional) : dtype of object
backend(optional): The backend or its name."""
if backend is None:
backend = backend_contextmanager.get_default_backend()
else:
backend = backend_contextmanager.backend_factory.get_backend(backend)
if isinstance(tensor, Tensor):
the_tensor = initialize_tensor("zeros", tensor.shape,
backend=tensor.backend, dtype=tensor.dtype)
else:
try:
tensor = backend.convert_to_tensor(tensor)
except TypeError as e:
error = "Input to zeros_like has invalid " \
"type causing error massage: \n" + str(e)
raise TypeError(error) from e
the_tensor = initialize_tensor("zeros", tensor.shape,
backend=backend, dtype=dtype)
return the_tensor |
Return a Tensor of shape `shape` of Gaussian random floats.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape` filled with Gaussian random data. | def randn(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor of shape `shape` of Gaussian random floats.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape` filled with Gaussian random data.
"""
the_tensor = initialize_tensor("randn", shape, backend=backend, seed=seed,
dtype=dtype)
return the_tensor |
Return a Tensor of shape `shape` of uniform random floats.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
boundaries : Values lie in [boundaries[0], boundaries[1]).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape` filled with uniform random data. | def random_uniform(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
backend: Optional[Union[Text, AbstractBackend]]
= None) -> Tensor:
"""Return a Tensor of shape `shape` of uniform random floats.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
boundaries : Values lie in [boundaries[0], boundaries[1]).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape` filled with uniform random data.
"""
the_tensor = initialize_tensor("random_uniform", shape, backend=backend,
seed=seed, boundaries=boundaries, dtype=dtype)
return the_tensor |
Checks that at least one of backend and x0 are not None; that backend
and x0.backend agree; that if args is not None its elements are Tensors
whose backends also agree. Creates a backend object from backend
and returns the arrays housed by x0 and args.
Args:
backend: A backend, text specifying one, or None.
x0: A tn.Tensor, or None.
args: A list of tn.Tensor, or None.
Returns:
backend: A backend object.
x0_array: x0.array if x0 was supplied, or None.
args_arr: Each array in the list of args if it was supplied, or None. | def krylov_error_checks(backend: Union[Text, AbstractBackend, None],
x0: Union[Tensor, None],
args: Union[List[Tensor], None]):
"""
Checks that at least one of backend and x0 are not None; that backend
and x0.backend agree; that if args is not None its elements are Tensors
whose backends also agree. Creates a backend object from backend
and returns the arrays housed by x0 and args.
Args:
backend: A backend, text specifying one, or None.
x0: A tn.Tensor, or None.
args: A list of tn.Tensor, or None.
Returns:
backend: A backend object.
x0_array: x0.array if x0 was supplied, or None.
args_arr: Each array in the list of args if it was supplied, or None.
"""
# If the backend wasn't specified, infer it from x0. If neither was specified
# raise ValueError.
if backend is None:
if x0 is None:
raise ValueError("One of backend or x0 must be specified.")
backend = x0.backend
else:
backend = backends.backend_factory.get_backend(backend)
# If x0 was specified, return the enclosed array. If attempting to do so
# raises AttributeError, instead raise TypeError. If backend was also
# specified, but was different than x0.backend, raise ValueError.
if x0 is not None:
try:
x0_array = x0.array
except AttributeError as err:
raise TypeError("x0 must be a tn.Tensor.") from err
if x0.backend.name != backend.name:
errstr = ("If both x0 and backend are specified the"
"backends must agree. \n"
f"x0 backend: {x0.backend.name} \n"
f"backend: {backend.name} \n")
raise ValueError(errstr)
else: # If x0 was not specified, set x0_array (the returned value) to None.
x0_array = None
# If args were specified, set the returned args_array to be all the enclosed
# arrays. If any of them raise AttributeError during the attempt, raise
# TypeError. If args was not specified, set args_array to None.
if args is not None:
try:
args_array = [a.array for a in args]
except AttributeError as err:
raise TypeError("Every element of args must be a tn.Tensor.") from err
else:
args_array = None
return (backend, x0_array, args_array) |
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Array`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(x0, *args)`.
x0: An initial vector for the Lanczos algorithm. If `None`,
a random initial vector is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `x0` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\lVert x_n\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors | def eigsh_lanczos(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
delta: float = 1E-8,
ndiag: int = 20,
reorthogonalize: bool = False) -> Tuple[Tensor, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Array`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(x0, *args)`.
x0: An initial vector for the Lanczos algorithm. If `None`,
a random initial vector is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `x0` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigsh_lanczos(mv, args=args_array,
initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, delta=delta, ndiag=ndiag,
reorthogonalize=reorthogonalize)
eigvals, eigvecs = result
eigvecsT = [Tensor(ev, backend=backend) for ev in eigvecs]
return eigvals, eigvecsT |
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
x0: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors | def eigs(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
which: Text = 'LR',
maxiter: int = 20) -> Tuple[Tensor, List]:
"""
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
x0: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigs(mv, args=args_array, initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, which=which, maxiter=maxiter)
eigvals, eigvecs = result
eigvecsT = [Tensor(eV, backend=backend) for eV in eigvecs]
return eigvals, eigvecsT |
GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter. If supplied, it must be
an integer in 0 < num_krylov_vectors <= b.size.
Default: b.size.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1.
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is only supported in the
numpy backend. Supplying this argument to other backends will
trigger NotImplementedError.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-in NumPy, if the ARPACK solver reports a breakdown (which
usually indicates some kind of floating point issue).
-if num_krylov_vectors is 0 or exceeds b.size.
-if tol was negative.
-if M was supplied with any backend but NumPy.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise. | def gmres(A_mv: Callable,
b: Tensor,
A_args: Optional[List] = None,
x0: Optional[Tensor] = None,
tol: float = 1E-05,
atol: Optional[float] = None,
num_krylov_vectors: Optional[int] = None,
maxiter: Optional[int] = 1,
M: Optional[Callable] = None
) -> Tuple[Tensor, int]:
""" GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter. If supplied, it must be
an integer in 0 < num_krylov_vectors <= b.size.
Default: b.size.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1.
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is only supported in the
numpy backend. Supplying this argument to other backends will
trigger NotImplementedError.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-in NumPy, if the ARPACK solver reports a breakdown (which
usually indicates some kind of floating point issue).
-if num_krylov_vectors is 0 or exceeds b.size.
-if tol was negative.
-if M was supplied with any backend but NumPy.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise.
"""
try:
b_array = b.array
except AttributeError as err:
raise TypeError("b must be a tn.Tensor") from err
backend, x0_array, args_array = krylov_error_checks(b.backend, x0, A_args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A_mv)
out = backend.gmres(mv, b_array, A_args=args_array,
x0=x0_array, tol=tol, atol=atol,
num_krylov_vectors=num_krylov_vectors,
maxiter=maxiter, M=M)
result, info = out
resultT = Tensor(result, backend=b.backend)
return (resultT, info) |
Computes the singular value decomposition (SVD) of a tensor.
The SVD is performed by treating the tensor as a matrix, with an effective
left (row) index resulting from combining the axes
`tensor.shape[:pivot_axis]` and an effective right (column) index resulting
from combining the axes `tensor.shape[pivot_axis:]`.
For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `u` would have shape (2, 3, 6), `s` would have shape (6), and `vh`
would have shape (6, 4, 5).
If `max_singular_values` is set to an integer, the SVD is truncated to keep
at most this many singular values.
If `max_truncation_error > 0`, as many singular values will be truncated as
possible, so that the truncation error (the norm of discarded singular
values) is at most `max_truncation_error`.
If `relative` is set `True` then `max_truncation_err` is understood
relative to the largest singular value.
If both `max_singular_values` and `max_truncation_error` are specified, the
number of retained singular values will be
`min(max_singular_values, nsv_auto_trunc)`, where `nsv_auto_trunc` is the
number of singular values that must be kept to maintain a truncation error
smaller than `max_truncation_error`.
The output consists of three tensors `u, s, vh` such that:
```python
u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.
Args:
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.
max_singular_values: The number of singular values to keep, or `None` to
keep them all.
max_truncation_error: The maximum allowed truncation error or `None` to
not do any truncation.
relative: Multiply `max_truncation_err` with the largest singular value.
Returns:
u: Left tensor factor.
s: Vector of ordered singular values from largest to smallest.
vh: Right tensor factor.
s_rest: Vector of discarded singular values (length zero if no
truncation). | def svd(
tensor: Tensor,
pivot_axis: int = -1,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Computes the singular value decomposition (SVD) of a tensor.
The SVD is performed by treating the tensor as a matrix, with an effective
left (row) index resulting from combining the axes
`tensor.shape[:pivot_axis]` and an effective right (column) index resulting
from combining the axes `tensor.shape[pivot_axis:]`.
For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `u` would have shape (2, 3, 6), `s` would have shape (6), and `vh`
would have shape (6, 4, 5).
If `max_singular_values` is set to an integer, the SVD is truncated to keep
at most this many singular values.
If `max_truncation_error > 0`, as many singular values will be truncated as
possible, so that the truncation error (the norm of discarded singular
values) is at most `max_truncation_error`.
If `relative` is set `True` then `max_truncation_err` is understood
relative to the largest singular value.
If both `max_singular_values` and `max_truncation_error` are specified, the
number of retained singular values will be
`min(max_singular_values, nsv_auto_trunc)`, where `nsv_auto_trunc` is the
number of singular values that must be kept to maintain a truncation error
smaller than `max_truncation_error`.
The output consists of three tensors `u, s, vh` such that:
```python
u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.
Args:
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.
max_singular_values: The number of singular values to keep, or `None` to
keep them all.
max_truncation_error: The maximum allowed truncation error or `None` to
not do any truncation.
relative: Multiply `max_truncation_err` with the largest singular value.
Returns:
u: Left tensor factor.
s: Vector of ordered singular values from largest to smallest.
vh: Right tensor factor.
s_rest: Vector of discarded singular values (length zero if no
truncation).
"""
backend = tensor.backend
out = backend.svd(tensor.array, pivot_axis,
max_singular_values=max_singular_values,
max_truncation_error=max_truncation_error,
relative=relative)
tensors = [Tensor(t, backend=backend) for t in out]
return tuple(tensors) |
QR reshapes tensor into a matrix and then decomposes that matrix into the
product of unitary and upper triangular matrices Q and R. Q is reshaped
into a tensor depending on the input shape and the choice of pivot_axis.
Computes the reduced QR decomposition of the matrix formed by concatenating
tensor about pivot_axis, e.g.
``` shape = tensor.shape
columns = np.prod(shape[:pivot_axis])
rows = np.prod(shape[pivot_axis:])
matrix = tensor.reshape((columns, rows))
```
The output is then shaped as follows:
- Q has dimensions (*shape[:pivot_axis], np.prod(shape[pivot_axis:])).
- R is a square matrix with length np.prod(shape[pivot_axis:]).
The argument non_negative_diagonal, True by default, enforces a phase
convention such that R has strictly non-negative entries on its main diagonal.
This makes the QR decomposition unambiguous and unique, which allows
it to be used in fixed point iterations. If False, the phase convention is set
by the backend and thus undefined at the TN interface level, but this
routine will be slightly less expensive.
By default this pivot_axis is 1, which produces the usual behaviour in the
matrix case.
Args:
tensor: The Tensor to be decomposed.
pivot_axis: The axis of Tensor about which to concatenate.
Default: 1
non_negative_diagonal:
Returns:
Q, R : The decomposed Tensor with dimensions as specified above. | def qr(
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
"""
QR reshapes tensor into a matrix and then decomposes that matrix into the
product of unitary and upper triangular matrices Q and R. Q is reshaped
into a tensor depending on the input shape and the choice of pivot_axis.
Computes the reduced QR decomposition of the matrix formed by concatenating
tensor about pivot_axis, e.g.
``` shape = tensor.shape
columns = np.prod(shape[:pivot_axis])
rows = np.prod(shape[pivot_axis:])
matrix = tensor.reshape((columns, rows))
```
The output is then shaped as follows:
- Q has dimensions (*shape[:pivot_axis], np.prod(shape[pivot_axis:])).
- R is a square matrix with length np.prod(shape[pivot_axis:]).
The argument non_negative_diagonal, True by default, enforces a phase
convention such that R has strictly non-negative entries on its main diagonal.
This makes the QR decomposition unambiguous and unique, which allows
it to be used in fixed point iterations. If False, the phase convention is set
by the backend and thus undefined at the TN interface level, but this
routine will be slightly less expensive.
By default this pivot_axis is 1, which produces the usual behaviour in the
matrix case.
Args:
tensor: The Tensor to be decomposed.
pivot_axis: The axis of Tensor about which to concatenate.
Default: 1
non_negative_diagonal:
Returns:
Q, R : The decomposed Tensor with dimensions as specified above.
"""
backend = tensor.backend
out = backend.qr(tensor.array, pivot_axis=pivot_axis,
non_negative_diagonal=non_negative_diagonal)
Q, R = [Tensor(t, backend=backend) for t in out]
return Q, R |
RQ reshapes tensor into a matrix and then decomposes that matrix into the
product of upper triangular and unitary matrices R and Q. Q is reshaped
into a tensor depending on the input shape and the choice of pivot_axis.
Computes the reduced RQ decomposition of the matrix formed by concatenating
tensor about pivot_axis, e.g.
``` shape = tensor.shape
columns = np.prod(shape[:pivot_axis])
rows = np.prod(shape[pivot_axis:])
matrix = tensor.reshape((columns, rows))
```
The output is then shaped as follows:
- R is a square matrix with length np.prod(shape[:pivot_axis]).
- Q has dimensions (np.prod(shape[:pivot_axis]), *shape[pivot_axis:]).
The argument non_negative_diagonal, True by default, enforces a phase
convention such that R has strictly non-negative entries on its main diagonal.
This makes the RQ decomposition unambiguous and unique, which allows
it to be used in fixed point iterations. If False, the phase convention is set
by the backend and thus undefined at the TN interface level, but this
routine will be slightly less expensive.
By default this pivot_axis is 1, which produces the usual behaviour in the
matrix case.
Args:
tensor: The Tensor to be decomposed.
pivot_axis: The axis of Tensor about which to concatenate.
Default: 1
non_negative_diagonal:
Returns:
R, Q : The decomposed Tensor with dimensions as specified above. | def rq(
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
"""
RQ reshapes tensor into a matrix and then decomposes that matrix into the
product of upper triangular and unitary matrices R and Q. Q is reshaped
into a tensor depending on the input shape and the choice of pivot_axis.
Computes the reduced RQ decomposition of the matrix formed by concatenating
tensor about pivot_axis, e.g.
``` shape = tensor.shape
columns = np.prod(shape[:pivot_axis])
rows = np.prod(shape[pivot_axis:])
matrix = tensor.reshape((columns, rows))
```
The output is then shaped as follows:
- R is a square matrix with length np.prod(shape[:pivot_axis]).
- Q has dimensions (np.prod(shape[:pivot_axis]), *shape[pivot_axis:]).
The argument non_negative_diagonal, True by default, enforces a phase
convention such that R has strictly non-negative entries on its main diagonal.
This makes the RQ decomposition unambiguous and unique, which allows
it to be used in fixed point iterations. If False, the phase convention is set
by the backend and thus undefined at the TN interface level, but this
routine will be slightly less expensive.
By default this pivot_axis is 1, which produces the usual behaviour in the
matrix case.
Args:
tensor: The Tensor to be decomposed.
pivot_axis: The axis of Tensor about which to concatenate.
Default: 1
non_negative_diagonal:
Returns:
R, Q : The decomposed Tensor with dimensions as specified above.
"""
backend = tensor.backend
out = backend.rq(tensor.array, pivot_axis=pivot_axis,
non_negative_diagonal=non_negative_diagonal)
R, Q = [Tensor(t, backend=backend) for t in out]
return R, Q |
Compute eigenvectors and eigenvalues of a hermitian matrix.
Args:
matrix: A symetric matrix.
Returns:
Tensor: The eigenvalues in ascending order.
Tensor: The eigenvectors. | def eigh(matrix: Tensor) -> Tuple[Tensor, Tensor]:
"""Compute eigenvectors and eigenvalues of a hermitian matrix.
Args:
matrix: A symetric matrix.
Returns:
Tensor: The eigenvalues in ascending order.
Tensor: The eigenvectors.
"""
backend = matrix.backend
out = backend.eigh(matrix.array)
tensors = [Tensor(t, backend=backend) for t in out]
return tuple(tensors) |
Calculate the L2-norm of the elements of `tensor`
| def norm(tensor: Tensor) -> Tensor:
"""Calculate the L2-norm of the elements of `tensor`
"""
backend = tensor.backend
out = backend.norm(tensor.array)
return out |
Compute the matrix inverse of `matrix`.
Args:
matrix: A matrix.
Returns:
Tensor: The inverse of `matrix` | def inv(matrix: Tensor) -> Tensor:
"""Compute the matrix inverse of `matrix`.
Args:
matrix: A matrix.
Returns:
Tensor: The inverse of `matrix`
"""
backend = matrix.backend
out = backend.inv(matrix.array)
tensor = Tensor(out, backend=backend)
return tensor |
Return expm log of `matrix`, matrix exponential.
Args:
matrix: A tensor.
Returns:
Tensor | def expm(matrix: Tensor) -> Tensor:
"""
Return expm log of `matrix`, matrix exponential.
Args:
matrix: A tensor.
Returns:
Tensor
"""
backend = matrix.backend
out = backend.expm(matrix.array)
tensor = Tensor(out, backend=backend)
return tensor |
Return a Node wrapping data obtained by an initialization function
implemented in a backend. The Node will have the same shape as the
underlying array that function generates, with all Edges dangling.
This function is not intended to be called directly, but doing so should
be safe enough.
Args:
fname: Name of the method of backend to call (a string).
*fargs: Positional arguments to the initialization method.
name: Optional name of the Node.
axis_names: Optional names of the Node's dangling edges.
backend: The backend or its name.
**fkwargs: Keyword arguments to the initialization method.
Returns:
node: A Node wrapping data generated by
(the_backend).fname(*fargs, **fkwargs), with one dangling edge per
axis of data. | def initialize_node(fname: Text,
*fargs: Any,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None,
**fkwargs: Any) -> Tensor:
"""Return a Node wrapping data obtained by an initialization function
implemented in a backend. The Node will have the same shape as the
underlying array that function generates, with all Edges dangling.
This function is not intended to be called directly, but doing so should
be safe enough.
Args:
fname: Name of the method of backend to call (a string).
*fargs: Positional arguments to the initialization method.
name: Optional name of the Node.
axis_names: Optional names of the Node's dangling edges.
backend: The backend or its name.
**fkwargs: Keyword arguments to the initialization method.
Returns:
node: A Node wrapping data generated by
(the_backend).fname(*fargs, **fkwargs), with one dangling edge per
axis of data.
"""
if backend is None:
backend_obj = backend_contextmanager.get_default_backend()
else:
backend_obj = backends.backend_factory.get_backend(backend)
func = getattr(backend_obj, fname)
data = func(*fargs, **fkwargs)
node = Node(data, name=name, axis_names=axis_names, backend=backend)
return node |
Return a Node representing a 2D array with ones on the diagonal and
zeros elsewhere. The Node has two dangling Edges.
Args:
N (int): The first dimension of the returned matrix.
dtype, optional: dtype of array (default np.float64).
M (int, optional): The second dimension of the returned matrix.
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
I : Node of shape (N, M)
Represents an array of all zeros except for the k'th diagonal of all
ones. | def eye(N: int,
dtype: Optional[Type[np.number]] = None,
M: Optional[int] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node representing a 2D array with ones on the diagonal and
zeros elsewhere. The Node has two dangling Edges.
Args:
N (int): The first dimension of the returned matrix.
dtype, optional: dtype of array (default np.float64).
M (int, optional): The second dimension of the returned matrix.
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
I : Node of shape (N, M)
Represents an array of all zeros except for the k'th diagonal of all
ones.
"""
the_node = initialize_node(
"eye",
N,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype,
M=M)
return the_node |
Return a Node of shape `shape` of all zeros.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`. Represents an array of all zeros. | def zeros(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of all zeros.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`. Represents an array of all zeros.
"""
the_node = initialize_node(
"zeros",
shape,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype)
return the_node |
Return a Node of shape `shape` of all ones.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`
Represents an array of all ones. | def ones(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of all ones.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`
Represents an array of all ones.
"""
the_node = initialize_node(
"ones",
shape,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype)
return the_node |
Return a Node of shape `shape` of Gaussian random floats.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape` filled with Gaussian random data. | def randn(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of Gaussian random floats.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape` filled with Gaussian random data.
"""
the_node = initialize_node(
"randn",
shape,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed,
dtype=dtype)
return the_node |
Return a Node of shape `shape` of uniform random floats.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
boundaries : Values lie in [boundaries[0], boundaries[1]).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape` filled with uniform random data. | def random_uniform(
shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of uniform random floats.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
boundaries : Values lie in [boundaries[0], boundaries[1]).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape` filled with uniform random data.
"""
the_node = initialize_node(
"random_uniform",
shape,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed,
boundaries=boundaries,
dtype=dtype)
return the_node |
The L2 norm of `node`
Args:
node: A `AbstractNode`.
Returns:
The L2 norm.
Raises:
AttributeError: If `node` has no `backend` attribute. | def norm(node: AbstractNode) -> Tensor:
"""The L2 norm of `node`
Args:
node: A `AbstractNode`.
Returns:
The L2 norm.
Raises:
AttributeError: If `node` has no `backend` attribute.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
return node.backend.norm(node.tensor) |
Conjugate a `node`.
Args:
node: A `AbstractNode`.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The complex conjugate of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute. | def conj(node: AbstractNode,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> AbstractNode:
"""Conjugate a `node`.
Args:
node: A `AbstractNode`.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The complex conjugate of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
backend = node.backend
if not axis_names:
axis_names = node.axis_names
return Node(
backend.conj(node.tensor),
name=name,
axis_names=axis_names,
backend=backend) |
Transpose `node`
Args:
node: A `AbstractNode`.
permutation: A list of int or str. The permutation of the axis.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The transpose of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute, or if
`node` has no tensor.
ValueError: If either `permutation` is not the same as expected or
if you try to permute with a trace edge. | def transpose(node: AbstractNode,
permutation: Sequence[Union[Text, int]],
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> AbstractNode:
"""Transpose `node`
Args:
node: A `AbstractNode`.
permutation: A list of int or str. The permutation of the axis.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The transpose of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute, or if
`node` has no tensor.
ValueError: If either `permutation` is not the same as expected or
if you try to permute with a trace edge.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
perm = [node.get_axis_number(p) for p in permutation]
if not axis_names:
axis_names = node.axis_names
new_node = Node(
node.tensor, name=name, axis_names=node.axis_names, backend=node.backend)
return new_node.reorder_axes(perm) |
Kronecker product of the given nodes.
Kronecker products of nodes is the same as the outer product, but the order
of the axes is different. The first half of edges of all of the nodes will
appear first half of edges in the resulting node, and the second half ot the
edges in each node will be in the second half of the resulting node.
For example, if I had two nodes :math:`X_{ab}`, :math:`Y_{cdef}`, and
:math:`Z_{gh}`, then the resulting node would have the edges ordered
:math:`R_{acdgbefh}`.
The kronecker product is designed such that the kron of many operators is
itself an operator.
Args:
nodes: A sequence of `AbstractNode` objects.
Returns:
A `Node` that is the kronecker product of the given inputs. The first
half of the edges of this node would represent the "input" edges of the
operator and the last half of edges are the "output" edges of the
operator. | def kron(nodes: Sequence[AbstractNode]) -> AbstractNode:
"""Kronecker product of the given nodes.
Kronecker products of nodes is the same as the outer product, but the order
of the axes is different. The first half of edges of all of the nodes will
appear first half of edges in the resulting node, and the second half ot the
edges in each node will be in the second half of the resulting node.
For example, if I had two nodes :math:`X_{ab}`, :math:`Y_{cdef}`, and
:math:`Z_{gh}`, then the resulting node would have the edges ordered
:math:`R_{acdgbefh}`.
The kronecker product is designed such that the kron of many operators is
itself an operator.
Args:
nodes: A sequence of `AbstractNode` objects.
Returns:
A `Node` that is the kronecker product of the given inputs. The first
half of the edges of this node would represent the "input" edges of the
operator and the last half of edges are the "output" edges of the
operator.
"""
input_edges = []
output_edges = []
for node in nodes:
order = len(node.shape)
if order % 2 != 0:
raise ValueError(f"All operator tensors must have an even order. "
f"Found tensor with order {order}")
input_edges += node.edges[:order // 2]
output_edges += node.edges[order // 2:]
result = outer_product_final_nodes(nodes, input_edges + output_edges)
return result |
Checks that each of tensors has the same backend, returning True and an
empty string if so, or False and an error string if not.
Args:
tensors: The list of tensors whose backends to check.
fname: The name of the calling function, which will go into the errstring.
Returns:
(flag, errstr): Whether all backends agree, and an error message if not. | def _check_backends(tensors: Sequence[Tensor], fname: str) -> Tuple[bool, str]:
""" Checks that each of tensors has the same backend, returning True and an
empty string if so, or False and an error string if not.
Args:
tensors: The list of tensors whose backends to check.
fname: The name of the calling function, which will go into the errstring.
Returns:
(flag, errstr): Whether all backends agree, and an error message if not.
"""
backend_names = [tensor.backend.name for tensor in tensors]
backends_check = [backend_names[0] == name for name in backend_names[1:]]
all_backends_same = all(backends_check)
errstr = ""
if not all_backends_same:
errstr = "All Tensors fed to " + fname + "must have the same backend."
errstr += "Backends were: \n"
errstr += str([name + "\n" for name in backend_names])
return all_backends_same, errstr |
Do a tensordot (contraction) of Tensors `a` and `b` over the given axes.
The behaviour of this function largely matches that of np.tensordot.
Args:
a: A Tensor.
b: Another Tensor.
axes: Two lists of integers. These values are the contraction
axes. A single integer may also be supplied, in which case both
tensors are contracted over this axis.
Raises:
ValueError, if a and b have different backends.
Returns:
The result of the tensordot, a Tensor. | def tensordot(a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
"""Do a tensordot (contraction) of Tensors `a` and `b` over the given axes.
The behaviour of this function largely matches that of np.tensordot.
Args:
a: A Tensor.
b: Another Tensor.
axes: Two lists of integers. These values are the contraction
axes. A single integer may also be supplied, in which case both
tensors are contracted over this axis.
Raises:
ValueError, if a and b have different backends.
Returns:
The result of the tensordot, a Tensor.
"""
if a.backend.name != b.backend.name:
errstr = "Tried to Tensordot Tensors with differing backends \n"
errstr += a.backend.name + "and " + b.backend.name + "."
raise ValueError(errstr)
out_array = a.backend.tensordot(a.array, b.array, axes)
out_tensor = Tensor(out_array, backend=a.backend)
return out_tensor |
Reshape Tensor to the given shape.
Args:
tensor: Tensor to reshape.
new_shape: The new shape.
Returns:
The reshaped Tensor. | def reshape(tensor: Tensor, new_shape: Sequence[int]) -> Tensor:
"""Reshape Tensor to the given shape.
Args:
tensor: Tensor to reshape.
new_shape: The new shape.
Returns:
The reshaped Tensor.
"""
return tensor.reshape(new_shape) |
Return a new `Tensor` transposed according to the permutation set
by `axes`. By default the axes are reversed.
Args:
axes: The permutation. If None (default) the index order is reversed.
Returns:
The transposed `Tensor`. | def transpose(tensor: Tensor, perm: Optional[Sequence[int]] = None) -> Tensor:
""" Return a new `Tensor` transposed according to the permutation set
by `axes`. By default the axes are reversed.
Args:
axes: The permutation. If None (default) the index order is reversed.
Returns:
The transposed `Tensor`.
"""
return tensor.transpose(perm=perm) |
Obtains a slice of a Tensor based on start_indices and slice_sizes.
Args:
Tensor: A Tensor.
start_indices: Tuple of integers denoting start indices of slice.
slice_sizes: Tuple of integers denoting size of slice along each axis.
Returns:
The slice, a Tensor. | def take_slice(tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
"""Obtains a slice of a Tensor based on start_indices and slice_sizes.
Args:
Tensor: A Tensor.
start_indices: Tuple of integers denoting start indices of slice.
slice_sizes: Tuple of integers denoting size of slice along each axis.
Returns:
The slice, a Tensor.
"""
sliced = tensor.backend.slice(tensor.array, start_indices, slice_sizes)
sliced_tensor = Tensor(sliced, backend=tensor.backend)
return sliced_tensor |
Get the shape of a Tensor as a tuple of integers.
Args:
Tensor: A Tensor.
Returns:
The shape of the input Tensor. | def shape(tensor: Tensor) -> Tuple[int, ...]:
"""Get the shape of a Tensor as a tuple of integers.
Args:
Tensor: A Tensor.
Returns:
The shape of the input Tensor.
"""
return tensor.shape |
Take the square root (element wise) of a given Tensor. | def sqrt(tensor: Tensor) -> Tensor:
"""Take the square root (element wise) of a given Tensor."""
out_array = tensor.backend.sqrt(tensor.array)
return Tensor(out_array, backend=tensor.backend) |
Calculate the outer product of the two given Tensors. | def outer(tensor1: Tensor, tensor2: Tensor) -> Tensor:
"""Calculate the outer product of the two given Tensors."""
tensors = [tensor1, tensor2]
all_backends_same, errstr = _check_backends(tensors, "outer")
if not all_backends_same:
raise ValueError(errstr)
out_data = tensor1.backend.outer_product(tensor1.array, tensor2.array)
return Tensor(out_data, backend=tensor1.backend) |
Calculate sum of products of Tensors according to expression. | def einsum(expression: Text, *tensors: Tensor, optimize: bool) -> Tensor:
"""Calculate sum of products of Tensors according to expression."""
all_backends_same, errstr = _check_backends(tensors, "einsum")
if not all_backends_same:
raise ValueError(errstr)
backend = tensors[0].backend
arrays = [tensor.array for tensor in tensors]
result_data = backend.einsum(expression, *arrays, optimize=optimize)
return Tensor(result_data, backend=backend) |
Return the complex conjugate of `Tensor`
Args:
Tensor: A Tensor.
Returns:
The complex conjugated Tensor. | def conj(tensor: Tensor) -> Tensor:
"""
Return the complex conjugate of `Tensor`
Args:
Tensor: A Tensor.
Returns:
The complex conjugated Tensor.
"""
return tensor.conj() |
The Hermitian conjugated tensor; e.g. the complex conjugate tranposed
by the permutation set be `axes`. By default the axes are reversed.
Args:
tensor: The Tensor to conjugate.
axes: The permutation. If None (default) the index order is reversed.
Returns:
The Hermitian conjugated `Tensor`. | def hconj(tensor: Tensor, perm: Optional[Sequence[int]] = None) -> Tensor:
""" The Hermitian conjugated tensor; e.g. the complex conjugate tranposed
by the permutation set be `axes`. By default the axes are reversed.
Args:
tensor: The Tensor to conjugate.
axes: The permutation. If None (default) the index order is reversed.
Returns:
The Hermitian conjugated `Tensor`.
"""
return tensor.hconj(perm=perm) |
Return sin of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor | def sin(tensor: Tensor) -> Tensor:
"""
Return sin of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.sin(tensor.array)
return Tensor(out_array, backend=tensor.backend) |
Return cos of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor | def cos(tensor: Tensor) -> Tensor:
"""
Return cos of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.cos(tensor.array)
return Tensor(out_array, backend=tensor.backend) |
Return elementwise exp of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor | def exp(tensor: Tensor) -> Tensor:
"""
Return elementwise exp of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.exp(tensor.array)
return Tensor(out_array, backend=tensor.backend) |
Return elementwise natural logarithm of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor | def log(tensor: Tensor) -> Tensor:
"""
Return elementwise natural logarithm of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.log(tensor.array)
return Tensor(out_array, backend=tensor.backend) |
Extracts the offset'th diagonal from the matrix slice of tensor indexed
by (axis1, axis2).
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out : A 1D Tensor storing the elements of the selected diagonal. | def diagonal(tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""
Extracts the offset'th diagonal from the matrix slice of tensor indexed
by (axis1, axis2).
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out : A 1D Tensor storing the elements of the selected diagonal.
"""
backend = tensor.backend
result = backend.diagonal(tensor.array, offset=offset, axis1=axis1,
axis2=axis2)
return Tensor(result, backend=backend) |
Flattens tensor and places its elements at the k'th diagonal of a new
(tensor.size + k, tensor.size + k) `Tensor` of zeros.
Args:
tensor: A Tensor.
k : The elements of tensor will be stored at this diagonal.
Returns:
out : A (tensor.size + k, tensor.size + k) `Tensor` with the elements
of tensor on its kth diagonal. | def diagflat(tensor: Tensor, k: int = 0) -> Tensor:
"""
Flattens tensor and places its elements at the k'th diagonal of a new
(tensor.size + k, tensor.size + k) `Tensor` of zeros.
Args:
tensor: A Tensor.
k : The elements of tensor will be stored at this diagonal.
Returns:
out : A (tensor.size + k, tensor.size + k) `Tensor` with the elements
of tensor on its kth diagonal.
"""
backend = tensor.backend
result = backend.diagflat(tensor.array, k=k)
return Tensor(result, backend=backend) |
Calculate the sum along diagonal entries of the given Tensor. The
entries of the offset`th diagonal of the matrix slice of tensor indexed by
(axis1, axis2) are summed.
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out: The trace. | def trace(tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Calculate the sum along diagonal entries of the given Tensor. The
entries of the offset`th diagonal of the matrix slice of tensor indexed by
(axis1, axis2) are summed.
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out: The trace.
"""
backend = tensor.backend
result = backend.trace(tensor.array, offset=offset, axis1=axis1,
axis2=axis2)
return Tensor(result, backend=backend) |
Returns the sign of the elements of Tensor.
| def sign(tensor: Tensor) -> Tensor:
""" Returns the sign of the elements of Tensor.
"""
backend = tensor.backend
result = backend.sign(tensor.array)
return Tensor(result, backend=backend) |
Returns the absolute value of the elements of Tensor.
| def abs(tensor: Tensor) -> Tensor:
""" Returns the absolute value of the elements of Tensor.
"""
backend = tensor.backend
result = backend.abs(tensor.array)
return Tensor(result, backend=backend) |
Reshapes tensor into a matrix about the pivot_axis. Equivalent to
tensor.reshape(prod(tensor.shape[:pivot_axis]),
prod(tensor.shape[pivot_axis:])).
Args:
tensor: The input tensor.
pivot_axis: Axis to pivot around. | def pivot(tensor: Tensor, pivot_axis: int = -1) -> Tensor:
""" Reshapes tensor into a matrix about the pivot_axis. Equivalent to
tensor.reshape(prod(tensor.shape[:pivot_axis]),
prod(tensor.shape[pivot_axis:])).
Args:
tensor: The input tensor.
pivot_axis: Axis to pivot around.
"""
backend = tensor.backend
result = backend.pivot(tensor.array, pivot_axis=pivot_axis)
return Tensor(result, backend=backend) |
Compute the (tensor) kronecker product between `tensorA` and
`tensorB`. `tensorA` and `tensorB` can be tensors of any
even order (i.e. `tensorA.ndim % 2 == 0`, `tensorB.ndim % 2 == 0`).
The returned tensor has index ordering such that when reshaped into
a matrix with `pivot =t ensorA.ndim//2 + tensorB.ndim//2`,
the resulting matrix is identical to the result of numpy's
`np.kron(matrixA, matrixB)`, with `matrixA, matrixB` matrices
obtained from reshaping `tensorA` and `tensorB` into matrices with
`pivotA = tensorA.ndim//2`, `pivotB = tensorB.ndim//2`
Example:
`tensorA.shape = (2,3,4,5)`, `tensorB.shape(6,7)` ->
`kron(tensorA, tensorB).shape = (2, 3, 6, 4, 5, 7)`
Args:
tensorA: A `Tensor`.
tensorB: A `Tensor`.
Returns:
Tensor: The kronecker product.
Raises:
ValueError: - If backends, are not matching.
- If ndims of the input tensors are not even. | def kron(tensorA: Tensor, tensorB: Tensor) -> Tensor:
"""
Compute the (tensor) kronecker product between `tensorA` and
`tensorB`. `tensorA` and `tensorB` can be tensors of any
even order (i.e. `tensorA.ndim % 2 == 0`, `tensorB.ndim % 2 == 0`).
The returned tensor has index ordering such that when reshaped into
a matrix with `pivot =t ensorA.ndim//2 + tensorB.ndim//2`,
the resulting matrix is identical to the result of numpy's
`np.kron(matrixA, matrixB)`, with `matrixA, matrixB` matrices
obtained from reshaping `tensorA` and `tensorB` into matrices with
`pivotA = tensorA.ndim//2`, `pivotB = tensorB.ndim//2`
Example:
`tensorA.shape = (2,3,4,5)`, `tensorB.shape(6,7)` ->
`kron(tensorA, tensorB).shape = (2, 3, 6, 4, 5, 7)`
Args:
tensorA: A `Tensor`.
tensorB: A `Tensor`.
Returns:
Tensor: The kronecker product.
Raises:
ValueError: - If backends, are not matching.
- If ndims of the input tensors are not even.
"""
tensors = [tensorA, tensorA]
all_backends_same, errstr = _check_backends(tensors, "kron")
if not all_backends_same:
raise ValueError(errstr)
ndimA, ndimB = tensorA.ndim, tensorB.ndim
if ndimA % 2 != 0:
raise ValueError(f"kron only supports tensors with even number of legs."
f"found tensorA.ndim = {ndimA}")
if ndimB % 2 != 0:
raise ValueError(f"kron only supports tensors with even number of legs."
f"found tensorB.ndim = {ndimB}")
backend = tensorA.backend
incoming = list(range(ndimA // 2)) + list(range(ndimA, ndimA + ndimB // 2))
outgoing = list(range(ndimA // 2, ndimA)) + list(
range(ndimA + ndimB // 2, ndimA + ndimB))
arr = backend.transpose(
backend.outer_product(tensorA.array, tensorB.array), incoming + outgoing)
return Tensor(arr, backend=backend) |
Tests tensornetwork.eye against np.eye. | def test_eye(backend):
"""
Tests tensornetwork.eye against np.eye.
"""
N = 4
M = 6
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = tensornetwork.eye(N, dtype=dtype, M=M, backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.array, npI) |
Tests tensornetwork.zeros against np.zeros. | def test_zeros(backend):
"""
Tests tensornetwork.zeros against np.zeros.
"""
shape = (5, 10, 3)
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = tensornetwork.zeros(shape, dtype=dtype, backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.array, npI) |
Tests tensornetwork.ones against np.ones. | def test_ones(backend):
"""
Tests tensornetwork.ones against np.ones.
"""
shape = (5, 10, 3)
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = tensornetwork.ones(shape, dtype=dtype, backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.array, npI) |
Tests tensornetwork.randn against the backend code. | def test_randn(backend):
"""
Tests tensornetwork.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = tensornetwork.randn(
shape,
dtype=dtype,
seed=seed,
backend=backend)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.array, npI) |
Tests tensornetwork.ones against np.ones. | def test_random_uniform(backend):
"""
Tests tensornetwork.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = tensornetwork.random_uniform(
shape,
dtype=dtype,
seed=seed,
boundaries=boundaries,
backend=backend)
npI = backend_obj.random_uniform(
shape, dtype=dtype, seed=seed, boundaries=boundaries)
np.testing.assert_allclose(tnI.array, npI) |
Tests tensornetwork.ones_like against np.zeros_like | def test_ones_like(backend, shape, n):
"""Tests tensornetwork.ones_like against np.zeros_like"""
backend_obj = backends.backend_factory.get_backend(backend)
@pytest.mark.parametrize("dtype,expected", (dtypes[backend]["all"]))
def inner_ones_test(dtype):
objTensor = tensornetwork.ones(shape, dtype=dtype,
backend=backend)
tensor = tensornetwork.ones_like(objTensor, dtype=dtype,
backend=backend)
numpyT = tensornetwork.ones_like(n, dtype=dtype,
backend=backend)
tensorCheck = backend_obj.ones(shape, dtype=dtype)
numpyCheck = backend_obj.ones(n.shape, dtype=dtype)
np.testing.assert_allclose(tensor.array, tensorCheck)
np.testing.assert_allclose(numpyT.array, numpyCheck) |
Tests tensornetwork.zeros_like against np.zeros_like | def test_zeros_like(backend, shape, n):
"""Tests tensornetwork.zeros_like against np.zeros_like"""
backend_obj = backends.backend_factory.get_backend(backend)
@pytest.mark.parametrize("dtype,expected", (dtypes[backend]["all"]))
def inner_zero_test(dtype):
objTensor = tensornetwork.zeros(shape, dtype=dtype, backend=backend)
tensor = tensornetwork.zeros_like(objTensor, dtype=dtype,
backend=backend)
numpyT = tensornetwork.zeros_like(n, dtype=dtype,
backend=backend)
tensorCheck = backend_obj.zeros(shape, dtype=dtype)
numpyCheck = backend_obj.zeros(n.shape, dtype=dtype)
np.testing.assert_allclose(tensor.array, tensorCheck)
np.testing.assert_allclose(numpyT.array, numpyCheck) |
Tests node_linalg.eye against np.eye. | def test_eye(backend):
"""
Tests node_linalg.eye against np.eye.
"""
N = 4
M = 6
name = "Jeffrey"
axis_names = ["Sam", "Blinkey"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = node_linalg.eye(
N, dtype=dtype, M=M, name=name, axis_names=axis_names, backend=backend)
npI = backend_obj.eye(N, dtype=dtype, M=M)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend |
Tests node_linalg.zeros against np.zeros. | def test_zeros(backend):
"""
Tests node_linalg.zeros against np.zeros.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = node_linalg.zeros(
shape, dtype=dtype, name=name, axis_names=axis_names, backend=backend)
npI = backend_obj.zeros(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend |
Tests node_linalg.ones against np.ones. | def test_ones(backend):
"""
Tests node_linalg.ones against np.ones.
"""
shape = (5, 10, 3)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["all"]:
tnI = node_linalg.ones(
shape, dtype=dtype, name=name, axis_names=axis_names, backend=backend)
npI = backend_obj.ones(shape, dtype=dtype)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend |
Tests node_linalg.randn against the backend code. | def test_randn(backend):
"""
Tests node_linalg.randn against the backend code.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = node_linalg.randn(
shape,
dtype=dtype,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed)
npI = backend_obj.randn(shape, dtype=dtype, seed=seed)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend |
Tests node_linalg.ones against np.ones. | def test_random_uniform(backend):
"""
Tests node_linalg.ones against np.ones.
"""
shape = (5, 10, 3, 2)
seed = int(time.time())
np.random.seed(seed=seed)
boundaries = (-0.3, 10.5)
name = "Jeffrey"
axis_names = ["Sam", "Blinkey", "Renaldo", "Jarvis"]
backend_obj = backends.backend_factory.get_backend(backend)
for dtype in dtypes[backend]["rand"]:
tnI = node_linalg.random_uniform(
shape,
dtype=dtype,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed,
boundaries=boundaries)
npI = backend_obj.random_uniform(
shape, dtype=dtype, seed=seed, boundaries=boundaries)
np.testing.assert_allclose(tnI.tensor, npI)
assert tnI.name == name
edges = tnI.get_all_dangling()
for edge, expected_name in zip(edges, axis_names):
assert edge.name == expected_name
assert tnI.backend.name == backend |
Compares linalg.krylov.eigsh_lanczos with backend.eigsh_lanczos. | def test_eigsh_lanczos(sparse_backend, dtype):
"""
Compares linalg.krylov.eigsh_lanczos with backend.eigsh_lanczos.
"""
n = 2
shape = (n, n)
dtype = testing_utils.np_dtype_to_backend(sparse_backend, dtype)
A = tensornetwork.linalg.initialization.ones(shape,
backend=sparse_backend,
dtype=dtype)
x0 = tensornetwork.linalg.initialization.ones((n, 1), backend=sparse_backend,
dtype=dtype)
def matvec(B):
return A @ B
result = tensornetwork.linalg.krylov.eigsh_lanczos(matvec, backend=A.backend,
x0=x0, num_krylov_vecs=n-1)
def array_matvec(B):
return A.array @ B
rev, reV = result
test_result = A.backend.eigsh_lanczos(array_matvec, initial_state=x0.array,
num_krylov_vecs=n-1)
tev, teV = test_result
assert np.all(np.isfinite(np.ravel(np.array(rev))))
np.testing.assert_allclose(np.array(rev), np.array(tev))
for r, t in zip(reV, teV):
assert np.all(np.isfinite(np.ravel(r.array)))
np.testing.assert_allclose(r.array, t) |
Compares linalg.krylov.eigsh_lanczos with backend.eigsh_lanczos. | def test_eigsh_lanczos_with_args(sparse_backend, dtype):
"""
Compares linalg.krylov.eigsh_lanczos with backend.eigsh_lanczos.
"""
n = 2
shape = (n, n)
dtype = testing_utils.np_dtype_to_backend(sparse_backend, dtype)
A = tensornetwork.linalg.initialization.ones(shape,
backend=sparse_backend,
dtype=dtype)
x0 = tensornetwork.linalg.initialization.ones((n, 1), backend=sparse_backend,
dtype=dtype)
def matvec(B):
return A @ B
def matvec_args(B, A):
return A @ B
result = tensornetwork.linalg.krylov.eigsh_lanczos(matvec, backend=A.backend,
x0=x0, num_krylov_vecs=n-1)
test = tensornetwork.linalg.krylov.eigsh_lanczos(matvec_args,
backend=A.backend,
x0=x0, num_krylov_vecs=n-1,
args=[A, ])
rev, reV = result
tev, teV = test
assert np.all(np.isfinite(np.ravel(np.array(rev))))
np.testing.assert_allclose(np.array(rev), np.array(tev))
for r, t in zip(reV, teV):
assert np.all(np.isfinite(np.ravel(r.array)))
np.testing.assert_allclose(r.array, t.array) |
Tests that tensordot raises ValueError when fed Tensors with different
backends. Other failure modes are tested at the backend level. | def test_tensordot_invalid_backend_raises_value_error(backend, dtype):
"""
Tests that tensordot raises ValueError when fed Tensors with different
backends. Other failure modes are tested at the backend level.
"""
backend_names = set(["jax", "numpy", "tensorflow", "pytorch"])
this_name = set([backend])
other_backend_names = list(backend_names - this_name)
shape = (4, 4, 4)
dtype1 = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype1)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype1)
for other_backend in other_backend_names:
dtype2 = testing_utils.np_dtype_to_backend(other_backend, dtype)
testing_utils.check_contraction_dtype(other_backend, dtype2)
tensor2 = tensornetwork.ones(shape, backend=other_backend, dtype=dtype2)
with pytest.raises(ValueError):
_ = tensornetwork.tensordot(tensor1, tensor2, [[2, 0, 1], [1, 2, 0]]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.