response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Tests that tensordot yields the same result as the backend equivalent.
def test_tensordot_vs_backend(backend, dtype): """ Tests that tensordot yields the same result as the backend equivalent. """ shape = (4, 4, 4) dtype = testing_utils.np_dtype_to_backend(backend, dtype) testing_utils.check_contraction_dtype(backend, dtype) tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype) tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype) tensors = [tensor1, tensor2] dims = [[2, 0, 1], [1, 2, 0]] result = tensornetwork.tensordot(*tensors, dims) backend_obj = backends.backend_factory.get_backend(backend) arrays = [t.array for t in tensors] backend_result = backend_obj.tensordot(*arrays, axes=dims) np.testing.assert_allclose(backend_result, result.array)
Tests that tensordot yields the same result as the backend equivalent.
def test_tensordot_int_vs_backend(backend, dtype): """ Tests that tensordot yields the same result as the backend equivalent. """ shape = (4, 4, 4) dtype = testing_utils.np_dtype_to_backend(backend, dtype) testing_utils.check_contraction_dtype(backend, dtype) tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype) tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype) tensors = [tensor1, tensor2] dim = 1 result = tensornetwork.tensordot(*tensors, dim) backend_obj = backends.backend_factory.get_backend(backend) arrays = [t.array for t in tensors] backend_result = backend_obj.tensordot(*arrays, axes=dim) np.testing.assert_allclose(backend_result, result.array)
Tests that reshape yields the same result as the backend equivalent.
def test_reshape_vs_backend(backend, dtype): """ Tests that reshape yields the same result as the backend equivalent. """ shape = (3, 2, 4) dtype = testing_utils.np_dtype_to_backend(backend, dtype) tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype) result = tensornetwork.reshape(tensor, (6, 4)) backend_obj = backends.backend_factory.get_backend(backend) backend_result = backend_obj.reshape(tensor.array, (6, 4)) assert result.shape == backend_result.shape
Tests that transpose yields the same result as the backend equivalent.
def test_transpose_vs_backend(backend, dtype): """ Tests that transpose yields the same result as the backend equivalent. """ shape = (3, 2, 4) permutation = (1, 2, 0) tensor, array = testing_utils.safe_randn(shape, backend, dtype) if tensor is not None: backend_obj = backends.backend_factory.get_backend(backend) test = backend_obj.convert_to_tensor(array) test = backend_obj.transpose(test, perm=permutation) tensor_test = tensornetwork.transpose(tensor, perm=permutation) np.testing.assert_allclose(test, tensor_test.array)
Tests that hconj yields the same result as the equivalent backend sequence.
def test_hconj_vs_backend(backend, dtype): """ Tests that hconj yields the same result as the equivalent backend sequence. """ shape = (3, 2, 4) permutation = (1, 2, 0) tensor, array = testing_utils.safe_randn(shape, backend, dtype) if tensor is not None: backend_obj = backends.backend_factory.get_backend(backend) test = backend_obj.convert_to_tensor(array) test = backend_obj.transpose(test, perm=permutation) test = backend_obj.conj(test) tensor_test = tensornetwork.hconj(tensor, perm=permutation) np.testing.assert_allclose(test, tensor_test.array)
Tests that take_slice yields the same result as the backend equivalent.
def test_take_slice_vs_backend(backend, dtype): """ Tests that take_slice yields the same result as the backend equivalent. """ shape = (5, 6, 7) dtype = testing_utils.np_dtype_to_backend(backend, dtype) tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype) start_indices = (1, 2, 3) slice_sizes = (2, 3, 3) result = tensornetwork.take_slice(tensor, start_indices, slice_sizes) backend_obj = backends.backend_factory.get_backend(backend) backend_result = backend_obj.slice(tensor.array, start_indices, slice_sizes) assert result.shape == backend_result.shape
Checks that Tensor.diagonal() works.
def test_diagonal(backend, dtype): """ Checks that Tensor.diagonal() works. """ shape = (2, 3, 3) A, _ = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(tensornetwork.diagonal(A).array, A.backend.diagonal(A.array))
Checks that Tensor.diagflat() works.
def test_diagflat(backend, dtype): """ Checks that Tensor.diagflat() works. """ shape = (2, 3, 3) A, _ = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(tensornetwork.diagflat(A).array, A.backend.diagflat(A.array))
Checks that Tensor.trace() works.
def test_trace(backend, dtype): """ Checks that Tensor.trace() works. """ shape = (2, 3, 3) A, _ = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(tensornetwork.trace(A).array, A.backend.trace(A.array))
Checks that Tensor.pivot() works.
def test_pivot(backend, dtype, pivotA): """ Checks that Tensor.pivot() works. """ shapeA = (2, 3, 4, 2) A, _ = testing_utils.safe_randn(shapeA, backend, dtype) if A is not None: if pivotA is None: matrixA = tensornetwork.pivot(A) tA = A.backend.pivot(A.array, pivot_axis=-1) else: matrixA = tensornetwork.pivot(A, pivot_axis=pivotA) tA = A.backend.pivot(A.array, pivot_axis=pivotA) np.testing.assert_allclose(matrixA.array, tA)
Checks that Tensor.kron() works.
def test_kron(backend, dtype): """ Checks that Tensor.kron() works. """ if (backend == "pytorch" and dtype in (np.complex64, np.complex128)): pytest.skip("pytorch support for complex dtypes is currently poor.") np.random.seed(10) t1 = Tensor(np.random.rand(2, 2).astype(dtype), backend=backend) t2 = Tensor(np.random.rand(3, 3).astype(dtype), backend=backend) res_kron = kron(t1, t2) res_ncon = ncon([t1.array, t2.array], [[-1, -3], [-2, -4]], backend=backend) np.testing.assert_allclose(res_kron.array, res_ncon) mat1 = res_kron.reshape((6, 6)) mat2 = np.kron(t1.array, t2.array) np.testing.assert_allclose(mat1.array, mat2) t1 = Tensor(np.random.rand(2, 2, 2, 2).astype(dtype), backend=backend) t2 = Tensor(np.random.rand(3, 3, 3, 3).astype(dtype), backend=backend) res_kron = kron(t1, t2) res_ncon = ncon([t1.array, t2.array], [[-1, -2, -5, -6], [-3, -4, -7, -8]], backend=backend) np.testing.assert_allclose(res_kron.array, res_ncon) mat1 = res_kron.reshape((36, 36)) mat2 = np.kron( np.array(t1.array).reshape(4, 4), np.array(t2.array).reshape(9, 9)) np.testing.assert_allclose(mat1.array, mat2)
Constructs an appropriately specialized QuOperator. If there are no edges, creates a QuScalar. If the are only output (input) edges, creates a QuVector (QuAdjointVector). Otherwise creates a QuOperator. Args: out_edges: output edges. in_edges: in edges. ref_nodes: reference nodes for the tensor network (needed if there is a scalar component). ignore_edges: edges to ignore when checking the dimensionality of the tensor network. Returns: The object.
def quantum_constructor( out_edges: Sequence[Edge], in_edges: Sequence[Edge], ref_nodes: Optional[Collection[AbstractNode]] = None, ignore_edges: Optional[Collection[Edge]] = None) -> "QuOperator": """Constructs an appropriately specialized QuOperator. If there are no edges, creates a QuScalar. If the are only output (input) edges, creates a QuVector (QuAdjointVector). Otherwise creates a QuOperator. Args: out_edges: output edges. in_edges: in edges. ref_nodes: reference nodes for the tensor network (needed if there is a scalar component). ignore_edges: edges to ignore when checking the dimensionality of the tensor network. Returns: The object. """ if len(out_edges) == 0 and len(in_edges) == 0: return QuScalar(ref_nodes, ignore_edges) if len(out_edges) == 0: return QuAdjointVector(in_edges, ref_nodes, ignore_edges) if len(in_edges) == 0: return QuVector(out_edges, ref_nodes, ignore_edges) return QuOperator(out_edges, in_edges, ref_nodes, ignore_edges)
Construct a `QuOperator` representing the identity on a given space. Internally, this is done by constructing `CopyNode`s for each edge, with dimension according to `space`. Args: space: A sequence of integers for the dimensions of the tensor product factors of the space (the edges in the tensor network). backend: Optionally specify the backend to use for computations. dtype: The data type (for conversion to dense). Returns: The desired identity operator.
def identity(space: Sequence[int], backend: Optional[Text] = None, dtype: Type[np.number] = np.float64) -> "QuOperator": """Construct a `QuOperator` representing the identity on a given space. Internally, this is done by constructing `CopyNode`s for each edge, with dimension according to `space`. Args: space: A sequence of integers for the dimensions of the tensor product factors of the space (the edges in the tensor network). backend: Optionally specify the backend to use for computations. dtype: The data type (for conversion to dense). Returns: The desired identity operator. """ nodes = [CopyNode(2, d, backend=backend, dtype=dtype) for d in space] out_edges = [n[0] for n in nodes] in_edges = [n[1] for n in nodes] return quantum_constructor(out_edges, in_edges)
Check the vector spaces represented by two lists of edges are compatible. The number of edges must be the same and the dimensions of each pair of edges must match. Otherwise, an exception is raised. Args: edges_1: List of edges representing a many-body Hilbert space. edges_2: List of edges representing a many-body Hilbert space.
def check_spaces(edges_1: Sequence[Edge], edges_2: Sequence[Edge]) -> None: """Check the vector spaces represented by two lists of edges are compatible. The number of edges must be the same and the dimensions of each pair of edges must match. Otherwise, an exception is raised. Args: edges_1: List of edges representing a many-body Hilbert space. edges_2: List of edges representing a many-body Hilbert space. """ if len(edges_1) != len(edges_2): raise ValueError("Hilbert-space mismatch: Cannot connect {} subsystems " "with {} subsystems.".format(len(edges_1), len(edges_2))) for (i, (e1, e2)) in enumerate(zip(edges_1, edges_2)): if e1.dimension != e2.dimension: raise ValueError("Hilbert-space mismatch on subsystems {}: Input " "dimension {} != output dimension {}.".format( i, e1.dimension, e2.dimension))
Eliminates any connected CopyNodes that are identity matrices. This will modify the network represented by `nodes`. Only identities that are connected to other nodes are eliminated. Args: nodes: Collection of nodes to search. Returns: nodes_dict: Dictionary mapping remaining Nodes to any replacements. dangling_edges_dict: Dictionary specifying all dangling-edge replacements.
def eliminate_identities(nodes: Collection[AbstractNode]) -> Tuple[dict, dict]: """Eliminates any connected CopyNodes that are identity matrices. This will modify the network represented by `nodes`. Only identities that are connected to other nodes are eliminated. Args: nodes: Collection of nodes to search. Returns: nodes_dict: Dictionary mapping remaining Nodes to any replacements. dangling_edges_dict: Dictionary specifying all dangling-edge replacements. """ nodes_dict = {} dangling_edges_dict = {} for n in nodes: if isinstance( n, CopyNode) and n.get_rank() == 2 and not (n[0].is_dangling() and n[1].is_dangling()): old_edges = [n[0], n[1]] _, new_edges = remove_node(n) if 0 in new_edges and 1 in new_edges: e = connect(new_edges[0], new_edges[1]) elif 0 in new_edges: # 1 was dangling dangling_edges_dict[old_edges[1]] = new_edges[0] elif 1 in new_edges: # 0 was dangling dangling_edges_dict[old_edges[0]] = new_edges[1] else: # Trace of identity, so replace with a scalar node! d = n.get_dimension(0) # NOTE: Assume CopyNodes have numpy dtypes. nodes_dict[n] = Node(np.array(d, dtype=n.dtype), backend=n.backend) else: for e in n.get_all_dangling(): dangling_edges_dict[e] = e nodes_dict[n] = n return nodes_dict, dangling_edges_dict
Creates a numpy array, initializes a Tensor from it, and checks that all its members have been correctly initialized.
def test_init_tensor_from_numpy_array(backend, dtype): """ Creates a numpy array, initializes a Tensor from it, and checks that all its members have been correctly initialized. """ A, init = testing_utils.safe_zeros((2, 3, 1), backend, dtype) if A is None: return assert A.backend.name == backend np.testing.assert_allclose(A.array, init) assert A.shape == init.shape assert A.size == init.size assert A.ndim == init.ndim
Creates a numpy array, initializes a Tensor from it, and checks that all its members have been correctly initialized.
def test_init_tensor_default_backend(dtype): """ Creates a numpy array, initializes a Tensor from it, and checks that all its members have been correctly initialized. """ backend = backend_contextmanager.get_default_backend() backend_obj = backends.backend_factory.get_backend(backend) shape = (3, 5, 2) testA = backend_obj.zeros(shape, dtype=dtype) init = np.zeros(shape, dtype=dtype) A = tensornetwork.Tensor(init) assert A.backend.name == backend np.testing.assert_allclose(A.array, testA) assert A.shape == testA.shape assert A.size == testA.size assert A.ndim == testA.ndim
Creates an instance of the backend's array class, initializes a Tensor from it, and checks that all its members have been correctly initialized.
def test_init_tensor_from_backend_array(backend, dtype): """ Creates an instance of the backend's array class, initializes a Tensor from it, and checks that all its members have been correctly initialized. """ shape = (2, 3, 1) if backend == "pytorch": if dtype not in testing_utils.torch_supported_dtypes: with pytest.raises(TypeError): dtype = testing_utils.np_dtype_to_backend(backend, dtype) return dtype = testing_utils.np_dtype_to_backend(backend, dtype) init = torch.zeros(shape, dtype=dtype) elif backend == "numpy": dtype = testing_utils.np_dtype_to_backend(backend, dtype) init = np.zeros(shape, dtype=dtype) elif backend == "jax": dtype = testing_utils.np_dtype_to_backend(backend, dtype) init = jnp.zeros(shape, dtype=dtype) elif backend == "tensorflow": dtype = testing_utils.np_dtype_to_backend(backend, dtype) init = tf.zeros(shape, dtype=dtype) else: raise ValueError("Unexpected backend ", backend) A = tensornetwork.Tensor(init, backend=backend) assert A.backend.name == backend np.testing.assert_allclose(A.array, init) assert A.shape == init.shape assert A.size == np.prod(init.shape) assert A.ndim == init.ndim
Checks that Tensor.dtype works.
def test_tensor_dtype(backend, dtype): """ Checks that Tensor.dtype works. """ shape = (2, 3, 1) A, init = testing_utils.safe_zeros(shape, backend, dtype) if A is None: return if backend != "pytorch": assert A.dtype == init.dtype else: assert A.dtype == torch.tensor(init).dtype
Checks that Tensor.T works.
def test_tensor_T(backend, dtype): """ Checks that Tensor.T works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.T.array, init.T)
Checks that Tensor.H works.
def test_tensor_H(backend, dtype): """ Checks that Tensor.H works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.H.array, init.conj().T)
Checks that Tensor.conj() works.
def test_tensor_conj(backend, dtype): """ Checks that Tensor.conj() works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.conj().array, A.backend.conj(init))
Checks that Tensor.conjugate() works.
def test_tensor_conjugate(backend, dtype): """ Checks that Tensor.conjugate() works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.conjugate().array, A.backend.conj(init))
Checks that Tensor.copy() works.
def test_tensor_copy(backend, dtype): """ Checks that Tensor.copy() works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.copy().array, init.copy())
Checks that Tensor.copy() works.
def test_tensor_reshape(backend, dtype): """ Checks that Tensor.copy() works. """ shape = (2, 3, 1) newshape = (6, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.reshape(newshape).array, init.reshape(newshape))
Checks that Tensor.transpose() works.
def test_tensor_transpose(backend, dtype): """ Checks that Tensor.transpose() works. """ shape = (2, 3, 1) permutation = (1, 2, 0) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: test = A.backend.convert_to_tensor(init) test = A.backend.transpose(test, perm=permutation) np.testing.assert_allclose(A.transpose(perm=permutation).array, test)
Checks that Tensor.squeeze() works.
def test_tensor_squeeze(backend, dtype): """ Checks that Tensor.squeeze() works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.squeeze().array, init.squeeze())
Checks that Tensor.ravel() works.
def test_tensor_ravel(backend, dtype): """ Checks that Tensor.ravel() works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.ravel().array, init.ravel())
Checks that Tensor.flatten() works.
def test_tensor_flatten(backend, dtype): """ Checks that Tensor.flatten() works. """ shape = (2, 3, 1) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: np.testing.assert_allclose(A.flatten().array, init.flatten())
Checks that Tensor.hconj() works.
def test_tensor_hconj(backend, dtype): """ Checks that Tensor.hconj() works. """ shape = (2, 3, 1) permutation = (1, 2, 0) A, init = testing_utils.safe_randn(shape, backend, dtype) if A is not None: test = A.backend.convert_to_tensor(init) test = A.backend.transpose(A.backend.conj(test), perm=permutation) np.testing.assert_allclose(A.hconj(perm=permutation).array, test)
Checks that Tensor*Tensor works.
def test_tensor_multiply(backend, dtype): """ Checks that Tensor*Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B, initB = testing_utils.safe_randn(shape, backend, dtype) if A is not None: testA = A.backend.convert_to_tensor(initA) testB = B.backend.convert_to_tensor(initB) result = A * B result2 = A.backend.multiply(testA, testB) np.testing.assert_allclose(result.array, result2)
Checks that Tensor*scalar works.
def test_tensor_scalar_multiply(backend, dtype): """ Checks that Tensor*scalar works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = A * B result2 = A.backend.multiply(testA, B) np.testing.assert_allclose(result.array, result2)
Checks that scalar*Tensor works.
def test_tensor_scalar_rmultiply(backend, dtype): """ Checks that scalar*Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = B * A result2 = A.backend.multiply(B, testA) np.testing.assert_allclose(result.array, result2)
Checks that Tensor/Tensor works.
def test_tensor_divide(backend, dtype): """ Checks that Tensor/Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B, _ = testing_utils.safe_zeros(shape, backend, dtype) if A is not None: B = B + 1 testA = A.backend.convert_to_tensor(initA) result = A / B result2 = A.backend.divide(testA, B.array) np.testing.assert_allclose(result.array, result2)
Checks that Tensor/scalar works.
def test_tensor_scalar_divide(backend, dtype): """ Checks that Tensor/scalar works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = A / B result2 = A.backend.divide(testA, B) np.testing.assert_allclose(result.array, result2)
Checks that Tensor+Tensor works.
def test_tensor_addition(backend, dtype): """ Checks that Tensor+Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B, initB = testing_utils.safe_randn(shape, backend, dtype) if A is not None: testA = A.backend.convert_to_tensor(initA) testB = B.backend.convert_to_tensor(initB) result = A + B result2 = A.backend.addition(testA, testB) np.testing.assert_allclose(result.array, result2)
Checks that Tensor+scalar works.
def test_tensor_scalar_addition(backend, dtype): """ Checks that Tensor+scalar works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = A + B result2 = A.backend.addition(testA, B) np.testing.assert_allclose(result.array, result2)
Checks that scalar+Tensor works.
def test_tensor_scalar_raddition(backend, dtype): """ Checks that scalar+Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = B + A result2 = A.backend.addition(B, testA) np.testing.assert_allclose(result.array, result2)
Checks that Tensor-Tensor works.
def test_tensor_subtraction(backend, dtype): """ Checks that Tensor-Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B, initB = testing_utils.safe_randn(shape, backend, dtype) if A is not None: testA = A.backend.convert_to_tensor(initA) testB = B.backend.convert_to_tensor(initB) result = A - B result2 = A.backend.subtraction(testA, testB) np.testing.assert_allclose(result.array, result2)
Checks that Tensor-scalar works.
def test_tensor_scalar_subtraction(backend, dtype): """ Checks that Tensor-scalar works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = A - B result2 = A.backend.subtraction(testA, B) np.testing.assert_allclose(result.array, result2)
Checks that scalar-Tensor works.
def test_tensor_scalar_rsubtraction(backend, dtype): """ Checks that scalar-Tensor works. """ shape = (2, 3, 1) A, initA = testing_utils.safe_randn(shape, backend, dtype) B = 2. if A is not None: testA = A.backend.convert_to_tensor(initA) result = B - A result2 = A.backend.subtraction(B, testA) np.testing.assert_allclose(result.array, result2)
Checks that Tensor@Tensor works.
def test_tensor_matmul(backend, dtype): """ Checks that Tensor@Tensor works. """ shape = (3, 3) A, initA = testing_utils.safe_randn(shape, backend, dtype) B, initB = testing_utils.safe_randn(shape, backend, dtype) if A is not None and B is not None: testA = A.backend.convert_to_tensor(initA) testB = B.backend.convert_to_tensor(initB) result = A @ B result2 = A.backend.matmul(testA, testB) np.testing.assert_allclose(result.array, result2)
Checks that tensor operators raise the right error.
def test_tensor_ops_raise(dtype): """ Checks that tensor operators raise the right error. """ shape = (2, 3, 1) A, _ = testing_utils.safe_randn(shape, "numpy", dtype) B, _ = testing_utils.safe_randn(shape, "jax", dtype) with pytest.raises(ValueError): _ = A * B with pytest.raises(ValueError): _ = A + B with pytest.raises(ValueError): _ = A - B with pytest.raises(ValueError): _ = A / B with pytest.raises(ValueError): _ = A @ B
Creates a random tensor , catching errors that occur when the dtype is not supported by the backend. Returns the Tensor and the backend array, which are both None if the dtype and backend did not match.
def safe_randn(shape, backend, dtype): """ Creates a random tensor , catching errors that occur when the dtype is not supported by the backend. Returns the Tensor and the backend array, which are both None if the dtype and backend did not match. """ np.random.seed(seed=10) init = np.random.randn(*shape) if dtype == np.bool: init = np.round(init) init = init.astype(dtype) if dtype in np_complex: init_i = np.random.randn(*shape) init = init + 1.0j * init_i.astype(dtype) if backend == "pytorch" and dtype not in torch_supported_dtypes: pytest.skip("dtype unsupported by PyTorch") else: A = tensornetwork.Tensor(init, backend=backend) return (A, init)
Creates a tensor of zeros, catching errors that occur when the dtype is not supported by the backend. Returns both the Tensor and the backend array, which are both None if the dtype and backend did not match.
def safe_zeros(shape, backend, dtype): """ Creates a tensor of zeros, catching errors that occur when the dtype is not supported by the backend. Returns both the Tensor and the backend array, which are both None if the dtype and backend did not match. """ init = np.zeros(shape, dtype=dtype) if backend == "pytorch" and dtype not in torch_supported_dtypes: pytest.skip("dtype unsupported by PyTorch") else: A = tensornetwork.Tensor(init, backend=backend) return (A, init)
Converts a given np dtype to the equivalent in the given backend. Skips the present test if the dtype is not supported in the backend.
def np_dtype_to_backend(backend, dtype): """ Converts a given np dtype to the equivalent in the given backend. Skips the present test if the dtype is not supported in the backend. """ backend_obj = backends.backend_factory.get_backend(backend) if backend_obj.name in ("numpy", "symmetric"): return dtype A_np = np.ones([1], dtype=dtype) if backend_obj.name == "jax": A = jnp.array(A_np) elif backend_obj.name == "tensorflow": A = tf.convert_to_tensor(A_np, dtype=dtype) elif backend_obj.name == "pytorch": if dtype not in torch_supported_dtypes: pytest.skip("dtype unsupported by PyTorch") A = torch.tensor(A_np) else: raise ValueError("Invalid backend ", backend) return A.dtype
Skips the test if the backend cannot perform multiply-add with the given dtype.
def check_contraction_dtype(backend, dtype): """ Skips the test if the backend cannot perform multiply-add with the given dtype. """ skip = False if backend == "tensorflow": if dtype in [np.uint8, tf.uint8, np.uint16, tf.uint16, np.int8, tf.int8, np.int16, tf.int16, np.uint32, tf.uint32, np.uint64, tf.uint64]: skip = True if backend == "pytorch": if dtype in [np.float16, torch.float16]: skip = True if skip: pytest.skip("backend does not support multiply-add with this dtype.")
Create a graphviz Graph that is isomorphic to the given TensorNetwork. Args: nodes: a collection of nodes graph: An optional `graphviz.Graph` object to write to. Use this only if you wish to set custom attributes for the graph. include_all_names: Whether to include all of the names in the graph. If False, all names starting with '__' (which are almost always just the default generated names) will be dropped to reduce clutter. engine: The graphviz engine to use. Only applicable if `graph` is None. Returns: The `graphviz.Graph` object.
def to_graphviz(nodes: Iterable[AbstractNode], graph: Optional[graphviz.Graph] = None, include_all_names: bool = False, engine: Text = "neato") -> graphviz.Graph: """Create a graphviz Graph that is isomorphic to the given TensorNetwork. Args: nodes: a collection of nodes graph: An optional `graphviz.Graph` object to write to. Use this only if you wish to set custom attributes for the graph. include_all_names: Whether to include all of the names in the graph. If False, all names starting with '__' (which are almost always just the default generated names) will be dropped to reduce clutter. engine: The graphviz engine to use. Only applicable if `graph` is None. Returns: The `graphviz.Graph` object. """ if graph is None: #pylint: disable=no-member graph = graphviz.Graph('G', engine=engine) for node in nodes: if not node.name.startswith("__") or include_all_names: label = node.name else: label = "" graph.node(str(id(node)), label=label) seen_edges = set() for node in nodes: for i, edge in enumerate(node.edges): if edge in seen_edges: continue seen_edges.add(edge) if not edge.name.startswith("__") or include_all_names: edge_label = edge.name else: edge_label = "" if edge.is_dangling(): # We need to create an invisible node for the dangling edge # to connect to. graph.node( "{}_{}".format(id(node), i), label="", _attributes={"style": "invis"}) graph.edge("{}_{}".format(id(node), i), str(id(node)), label=edge_label) else: graph.edge(str(id(edge.node1)), str(id(edge.node2)), label=edge_label) return graph
Parse error given an HTTP status code and a json payload Args: status_code (`int`): HTTP status code payload (`Dict[str, str]`): Json payload Returns: Exception: parsed exception
def parse_error(status_code: int, payload: Dict[str, str]) -> Exception: """ Parse error given an HTTP status code and a json payload Args: status_code (`int`): HTTP status code payload (`Dict[str, str]`): Json payload Returns: Exception: parsed exception """ # Try to parse a Text Generation Inference error message = payload["error"] if "error_type" in payload: error_type = payload["error_type"] if error_type == "generation": return GenerationError(message) if error_type == "incomplete_generation": return IncompleteGenerationError(message) if error_type == "overloaded": return OverloadedError(message) if error_type == "validation": return ValidationError(message) # Try to parse a APIInference error if status_code == 400: return BadRequestError(message) if status_code == 403 or status_code == 424: return ShardNotReadyError(message) if status_code == 504: return ShardTimeoutError(message) if status_code == 404: return NotFoundError(message) if status_code == 429: return RateLimitExceededError(message) # Fallback to an unknown error return UnknownError(message)
Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models
def deployed_models(headers: Optional[Dict] = None) -> List[DeployedModel]: """ Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models """ resp = requests.get( f"https://api-inference.huggingface.co/framework/text-generation-inference", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) models = [DeployedModel(**raw_deployed_model) for raw_deployed_model in payload] return models
Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client
def check_model_support(repo_id: str, headers: Optional[Dict] = None) -> bool: """ Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client """ resp = requests.get( f"https://api-inference.huggingface.co/status/{repo_id}", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) framework = payload["framework"] supported = framework == "text-generation-inference" return supported
Applies split marker based on a regex match of special tokens such as [START_DNA]. Parameters ---------- n : str Input text to split Returns ---------- str - the text with the split token added
def _insert_split_marker(m: re.Match): """ Applies split marker based on a regex match of special tokens such as [START_DNA]. Parameters ---------- n : str Input text to split Returns ---------- str - the text with the split token added """ start_token, _, sequence, end_token = m.groups() sequence = re.sub(r"(.)", rf"{SPLIT_MARKER}\1", sequence, flags=re.DOTALL) return f"{start_token}{sequence}{SPLIT_MARKER}{end_token}"
Applies custom splitting to the text for GALILEO's tokenization Parameters ---------- text : str Input text to split Returns ---------- str - the text with the split token added
def escape_custom_split_sequence(text): """ Applies custom splitting to the text for GALILEO's tokenization Parameters ---------- text : str Input text to split Returns ---------- str - the text with the split token added """ return CUSTOM_SEQ_RE.sub(_insert_split_marker, text)
Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (width, height). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height).
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (width, height). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): raise ValueError("grid_pinpoints should be a list of tuples or lists") height, width = select_best_resolution(image_size, grid_pinpoints) return height // patch_size, width // patch_size
Make causal mask used for self-attention.
def _make_causal_mask( input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int ) -> torch.BoolTensor: """ Make causal mask used for self-attention. """ batch_size, target_length = input_ids_shape mask = torch.ones( (target_length, target_length + past_key_values_length), dtype=torch.bool, device=device, ) mask = mask.triu(1 + past_key_values_length) expanded_mask = mask.unsqueeze(0).expand( batch_size, target_length, target_length + past_key_values_length ) return expanded_mask
Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`.
def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: """ Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. """ batch_size, src_length = mask.shape tgt_length = tgt_length if tgt_length is not None else src_length expanded_mask = ~(mask[:, None, :].to(torch.bool)) return expanded_mask.expand(batch_size, tgt_length, src_length)
Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor
def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor """ batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32, ) powers = torch.arange( 1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32 ) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32, ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange( 1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32, ) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor return alibi
Dropout add function Args: x (`torch.tensor`, *required*): input tensor residual (`torch.tensor`, *required*): esidual tensor prob (`float`, *required*): dropout probability training (`bool`, *required*): training mode
def dropout_add( x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool ) -> torch.Tensor: """ Dropout add function Args: x (`torch.tensor`, *required*): input tensor residual (`torch.tensor`, *required*): esidual tensor prob (`float`, *required*): dropout probability training (`bool`, *required*): training mode """ out = F.dropout(x, p=prob, training=training) out = residual + out return out
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim]
def _split_heads( fused_qkv: torch.Tensor, num_heads: int, head_dim: int ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim] """ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, num_heads, 3 * head_dim) query_layer, key_layer, value_layer = fused_qkv.split(head_dim, dim=-1) query_layer = query_layer.transpose(1, 2).reshape( batch_size * num_heads, seq_length, head_dim ) key_layer = key_layer.permute(0, 2, 3, 1).reshape( batch_size * num_heads, head_dim, seq_length ) value_layer = value_layer.transpose(1, 2).reshape( batch_size * num_heads, seq_length, head_dim ) return query_layer, key_layer, value_layer
Merge heads together over the last dimenstion Args: x: (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim]
def _merge_heads(x: torch.Tensor, num_heads: int, head_dim: int) -> torch.Tensor: """ Merge heads together over the last dimenstion Args: x: (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim] """ # What we want to achieve is: # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim batch_size_and_num_heads, seq_length, _ = x.shape batch_size = batch_size_and_num_heads // num_heads # First view to decompose the batch size # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim x = x.view(batch_size, num_heads, seq_length, head_dim) # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim x = x.permute(0, 2, 1, 3) # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim return x.reshape(batch_size, seq_length, num_heads * head_dim)
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand( batch, num_key_value_heads, n_rep, slen, head_dim ) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
Make causal mask used for bi-directional self-attention.
def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat( [ torch.zeros( tgt_len, past_key_values_length, dtype=dtype, device=device ), mask, ], dim=-1, ) return mask[None, None, :, :].expand( bsz, 1, tgt_len, tgt_len + past_key_values_length )
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill( inverted_mask.to(torch.bool), torch.finfo(dtype).min )
Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url
def is_url(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" if " " in string: return False result = urlparse(string) return all([result.scheme, result.netloc])
Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url
def is_image(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" return is_url(string) or string.startswith("data:")
Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (width, height). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height).
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (width, height). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): raise ValueError("grid_pinpoints should be a list of tuples or lists") height, width = select_best_resolution(image_size, grid_pinpoints) return height // patch_size, width // patch_size
Unpads a PyTorch tensor of a padded and resized image. Args: tensor (`torch.Tensor`): The image tensor, assumed to be of shape (num_channels, height, width). original_size (`tuple`): The original size of the image (height, width). Returns: `torch.Tensor`: The unpadded image tensor.
def unpad_image(tensor, original_size): """ Unpads a PyTorch tensor of a padded and resized image. Args: tensor (`torch.Tensor`): The image tensor, assumed to be of shape (num_channels, height, width). original_size (`tuple`): The original size of the image (height, width). Returns: `torch.Tensor`: The unpadded image tensor. """ original_height, original_width = original_size current_height, current_width = tensor.shape[1:] original_aspect_ratio = original_width / original_height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: scale_factor = current_width / original_width new_height = int(original_height * scale_factor) padding = (current_height - new_height) // 2 unpadded_tensor = tensor[:, padding : current_height - padding, :] else: scale_factor = current_height / original_height new_width = int(original_width * scale_factor) padding = (current_width - new_width) // 2 unpadded_tensor = tensor[:, :, padding : current_width - padding] return unpadded_tensor
Make causal mask used for self-attention.
def make_causal_mask( input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int ) -> torch.BoolTensor: """ Make causal mask used for self-attention. """ batch_size, target_length = input_ids_shape mask = torch.ones( (target_length, target_length + past_key_values_length), dtype=torch.bool, device=device, ) mask = mask.triu(1 + past_key_values_length) expanded_mask = mask.unsqueeze(0).expand( batch_size, target_length, target_length + past_key_values_length ) return expanded_mask
Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`.
def expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: """ Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. """ batch_size, src_length = mask.shape tgt_length = tgt_length if tgt_length is not None else src_length expanded_mask = ~(mask[:, None, :].to(torch.bool)) return expanded_mask.expand(batch_size, tgt_length, src_length)
Make causal mask used for bi-directional self-attention.
def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full( (tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device, ) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat( [ torch.zeros( tgt_len, past_key_values_length, dtype=dtype, device=device ), mask, ], dim=-1, ) return mask[None, None, :, :].expand( bsz, 1, tgt_len, tgt_len + past_key_values_length )
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill( inverted_mask.to(torch.bool), torch.finfo(dtype).min )
Convert a pytorch file to a safetensors file This will remove duplicate tensors from the file. Unfortunately, this might not respect *transformers* convention. Forcing us to check for potentially different keys during load when looking for specific tensors (making tensor sharing explicit).
def convert_file(pt_file: Path, sf_file: Path, discard_names: List[str]): """ Convert a pytorch file to a safetensors file This will remove duplicate tensors from the file. Unfortunately, this might not respect *transformers* convention. Forcing us to check for potentially different keys during load when looking for specific tensors (making tensor sharing explicit). """ loaded = torch.load(pt_file, map_location="cpu", weights_only=True) if "state_dict" in loaded: loaded = loaded["state_dict"] to_removes = _remove_duplicate_names(loaded, discard_names=discard_names) metadata = {"format": "pt"} for kept_name, to_remove_group in to_removes.items(): for to_remove in to_remove_group: if to_remove not in metadata: metadata[to_remove] = kept_name del loaded[to_remove] # Force tensors to be contiguous loaded = {k: v.contiguous() for k, v in loaded.items()} dirname = os.path.dirname(sf_file) os.makedirs(dirname, exist_ok=True) save_file(loaded, sf_file, metadata=metadata) reloaded = load_file(sf_file) for k in loaded: pt_tensor = loaded[k] sf_tensor = reloaded[k] if not torch.equal(pt_tensor, sf_tensor): raise RuntimeError(f"The output tensors do not match for key {k}")
Guess weight files from the cached revision snapshot directory
def _cached_weight_files( model_id: str, revision: Optional[str], extension: str ) -> List[str]: """Guess weight files from the cached revision snapshot directory""" d = _get_cached_revision_directory(model_id, revision) if not d: return [] filenames = _weight_files_from_dir(d, extension) return filenames
Get the weights filenames on the hub
def weight_hub_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[str]: """Get the weights filenames on the hub""" api = HfApi() if HF_HUB_OFFLINE: filenames = _cached_weight_files(model_id, revision, extension) else: # Online case, fetch model info from the Hub info = api.model_info(model_id, revision=revision) filenames = _weight_hub_files_from_model_info(info, extension) if not filenames: raise EntryNotFoundError( f"No {extension} weights found for model {model_id} and revision {revision}.", None, ) return filenames
Try to load a file from the Hugging Face cache
def try_to_load_from_cache( model_id: str, revision: Optional[str], filename: str ) -> Optional[Path]: """Try to load a file from the Hugging Face cache""" d = _get_cached_revision_directory(model_id, revision) if not d: return None # Check if file exists in cache cached_file = d / filename return cached_file if cached_file.is_file() else None
Get the local files
def weight_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[Path]: """Get the local files""" # Local model d = Path(model_id) if d.exists() and d.is_dir(): local_files = _weight_files_from_dir(d, extension) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) return [Path(f) for f in local_files] try: filenames = weight_hub_files(model_id, revision, extension) except EntryNotFoundError as e: if extension != ".safetensors": raise e # Try to see if there are pytorch weights pt_filenames = weight_hub_files(model_id, revision, extension=".bin") # Change pytorch extension to safetensors extension # It is possible that we have safetensors weights locally even though they are not on the # hub if we converted weights locally without pushing them filenames = [ f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames ] if WEIGHTS_CACHE_OVERRIDE is not None: files = [] for filename in filenames: p = Path(WEIGHTS_CACHE_OVERRIDE) / filename if not p.exists(): raise FileNotFoundError( f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}." ) files.append(p) return files files = [] for filename in filenames: cache_file = try_to_load_from_cache( model_id, revision=revision, filename=filename ) if cache_file is None: raise LocalEntryNotFoundError( f"File {filename} of model {model_id} not found in " f"{os.getenv('HUGGINGFACE_HUB_CACHE', 'the local cache')}. " f"Please run `text-generation-server download-weights {model_id}` first." ) files.append(cache_file) return files
Download the safetensors files from the hub
def download_weights( filenames: List[str], model_id: str, revision: Optional[str] = None ) -> List[Path]: """Download the safetensors files from the hub""" def download_file(fname, tries=5, backoff: int = 5): local_file = try_to_load_from_cache(model_id, revision, fname) if local_file is not None: logger.info(f"File {fname} already present in cache.") return Path(local_file) for idx in range(tries): try: logger.info(f"Download file: {fname}") stime = time.time() local_file = hf_hub_download( filename=fname, repo_id=model_id, revision=revision, local_files_only=HF_HUB_OFFLINE, ) logger.info( f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - stime))}." ) return Path(local_file) except Exception as e: if idx + 1 == tries: raise e logger.error(e) logger.info(f"Retrying in {backoff} seconds") time.sleep(backoff) logger.info(f"Retry {idx + 1}/{tries - 1}") # We do this instead of using tqdm because we want to parse the logs with the launcher start_time = time.time() files = [] for i, filename in enumerate(filenames): file = download_file(filename) elapsed = timedelta(seconds=int(time.time() - start_time)) remaining = len(filenames) - (i + 1) eta = (elapsed / (i + 1)) * remaining if remaining > 0 else 0 logger.info(f"Download: [{i + 1}/{len(filenames)}] -- ETA: {eta}") files.append(file) return files
Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the remaining tokens are also returned.
def batch_top_tokens( top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor, ) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the remaining tokens are also returned. """ max_top_n = max(top_n_tokens) # Early exit when top_n_tokens is not used if max_top_n == 0: return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) batch_size = accepted_ids.shape[0] speculate_size = logprobs.shape[0] // batch_size top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size top_n_tokens = [ min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size) ] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=-1, sorted=True).values nth_highest = torch.gather( sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1) ) nth_highest[nth_highest == -float("inf")] = torch.finfo(logprobs.dtype).min # Find the new "fuzzy" top n values top_n_indices = (logprobs >= nth_highest).nonzero() _, top_n_ishes = torch.unique_consecutive(top_n_indices[:, 0], return_counts=True) k = 1 if top_n_ishes.numel() == 0 else top_n_ishes.max() # Take a new topk for these new max n values top_k = torch.topk(logprobs, k=k, dim=1, sorted=True) top_n_ishes = top_n_ishes.tolist() top_indices = top_k.indices.tolist() top_values = top_k.values.tolist() batch_top_token_ids = [] batch_top_token_logprobs = [] accepted_ids_list = accepted_ids.tolist() for i, n_accepted_ids in enumerate(accepted_ids_list): start = speculate_size * i stop = speculate_size * (i + 1) _top_indices = top_indices[start:stop] _top_values = top_values[start:stop] _top_n_ishes = top_n_ishes[start:stop] _top_n_tokens = top_n_tokens[start:stop] _top_indices = _top_indices[:n_accepted_ids] _top_values = _top_values[:n_accepted_ids] _top_n_ishes = _top_n_ishes[:n_accepted_ids] _top_n_tokens = _top_n_tokens[:n_accepted_ids] row_top_token_ids = [] row_top_token_logprobs = [] for idxs, vals, n, req_n in zip( _top_indices, _top_values, _top_n_ishes, _top_n_tokens ): indices = idxs[:n] if req_n > 0 else [] values = vals[:n] if req_n > 0 else [] row_top_token_ids.append(indices) row_top_token_logprobs.append(values) batch_top_token_ids.append(row_top_token_ids) batch_top_token_logprobs.append(row_top_token_logprobs) return batch_top_token_ids, batch_top_token_logprobs
Packs a 4-bit integer matrix into a packed 32-bit integer matrix. Args: imatrix (torch.Tensor): matrix of integers direction (str): direction of packing, either "column" or "row" Returns: qmatrix (torch.Tensor): packed matrix of integers
def pack(imatrix: torch.Tensor, direction: str = "column"): """ Packs a 4-bit integer matrix into a packed 32-bit integer matrix. Args: imatrix (torch.Tensor): matrix of integers direction (str): direction of packing, either "column" or "row" Returns: qmatrix (torch.Tensor): packed matrix of integers """ shifts = torch.arange(0, 32, 4, dtype=torch.int32, device=imatrix.device) imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow if direction == "column": imatrix = imatrix.view(-1, imatrix.shape[1] // (32 // 4), (32 // 4)) qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, None, :]).sum(dim=-1) elif direction == "row": imatrix = imatrix.view(imatrix.shape[0] // (32 // 4), (32 // 4), -1) qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, :, None]).sum(dim=1) qmatrix = qmatrix.to(torch.int32) return qmatrix
Unpacks a 32-bit packed integer matrix into a 4-bit integer matrix. Args: qmatrix (torch.Tensor): matrix of packed integers direction (str): direction of unpacking, either "column" or "row" Returns: imatrix (torch.Tensor): matrix of integers
def unpack(qmatrix: torch.Tensor, direction: str = "column"): """ Unpacks a 32-bit packed integer matrix into a 4-bit integer matrix. Args: qmatrix (torch.Tensor): matrix of packed integers direction (str): direction of unpacking, either "column" or "row" Returns: imatrix (torch.Tensor): matrix of integers """ shifts = torch.arange(0, 32, 4, device=qmatrix.device) if direction == "column": imatrix = torch.bitwise_right_shift( qmatrix[:, :, None], shifts[None, None, :] ).view(qmatrix.shape[0], -1) elif direction == "row": imatrix = torch.bitwise_right_shift( qmatrix[:, None, :], shifts[None, :, None] ).view(-1, qmatrix.shape[-1]) imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow return imatrix
Applies the order to a 4-bit integer matrix. Args: imatrix (torch.Tensor): matrix of integers direction (str): direction of applying order, either "column" or "row" order (List[int]): order to apply, default is AWQ_PACK_ORDER Returns: imatrix (torch.Tensor): matrix of integers
def apply_order( imatrix: torch.Tensor, direction: str = "column", order: List[int] = AWQ_PACK_ORDER, ): """ Applies the order to a 4-bit integer matrix. Args: imatrix (torch.Tensor): matrix of integers direction (str): direction of applying order, either "column" or "row" order (List[int]): order to apply, default is AWQ_PACK_ORDER Returns: imatrix (torch.Tensor): matrix of integers """ if direction == "column": imatrix = imatrix.view(-1, (32 // 4))[:, order].view(imatrix.shape) elif direction == "row": imatrix = imatrix.view((32 // 4), -1)[order, :].view(imatrix.shape) return imatrix
Decorator for auto-tuning a :code:`triton.jit`'d function. .. highlight:: python .. code-block:: python @triton.autotune(configs=[ triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4), triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8), ], key=['x_size'] # the two above configs will be evaluated anytime # the value of x_size changes ) @triton.jit def kernel(x_ptr, x_size, **META): BLOCK_SIZE = META['BLOCK_SIZE'] :note: When all the configurations are evaluated, the kernel will run multiple time. This means that whatever value the kernel updates will be updated multiple times. To avoid this undesired behavior, you can use the `reset_to_zero` argument, which reset the value of the provided tensor to `zero` before running any configuration. :param configs: a list of :code:`triton.Config` objects :type configs: list[triton.Config] :param key: a list of argument names whose change in value will trigger the evaluation of all provided configs. :type key: list[str] :param prune_configs_by: a dict of functions that are used to prune configs, fields: 'perf_model': performance model used to predicate running time with different configs, returns running time 'top_k': number of configs to bench 'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It take configs:List[Config] as its input, and returns pruned configs. :param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs. :type reset_to_zero: list[str]
def autotune( configs, key, prune_configs_by=None, reset_to_zero=None, nearest_power_of_two=False ): """ Decorator for auto-tuning a :code:`triton.jit`'d function. .. highlight:: python .. code-block:: python @triton.autotune(configs=[ triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4), triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8), ], key=['x_size'] # the two above configs will be evaluated anytime # the value of x_size changes ) @triton.jit def kernel(x_ptr, x_size, **META): BLOCK_SIZE = META['BLOCK_SIZE'] :note: When all the configurations are evaluated, the kernel will run multiple time. This means that whatever value the kernel updates will be updated multiple times. To avoid this undesired behavior, you can use the `reset_to_zero` argument, which reset the value of the provided tensor to `zero` before running any configuration. :param configs: a list of :code:`triton.Config` objects :type configs: list[triton.Config] :param key: a list of argument names whose change in value will trigger the evaluation of all provided configs. :type key: list[str] :param prune_configs_by: a dict of functions that are used to prune configs, fields: 'perf_model': performance model used to predicate running time with different configs, returns running time 'top_k': number of configs to bench 'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It take configs:List[Config] as its input, and returns pruned configs. :param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs. :type reset_to_zero: list[str] """ def decorator(fn): return Autotuner( fn, fn.arg_names, configs, key, reset_to_zero, prune_configs_by, nearest_power_of_two, ) return decorator
The main purpose of this function is to shrink BLOCK_SIZE_* when the corresponding dimension is smaller.
def matmul248_kernel_config_pruner(configs, nargs): """ The main purpose of this function is to shrink BLOCK_SIZE_* when the corresponding dimension is smaller. """ m = max(2 ** int(math.ceil(math.log2(nargs["M"]))), 16) n = max(2 ** int(math.ceil(math.log2(nargs["N"]))), 16) k = max(2 ** int(math.ceil(math.log2(nargs["K"]))), 16) used = set() for config in configs: block_size_m = min(m, config.kwargs["BLOCK_SIZE_M"]) block_size_n = min(n, config.kwargs["BLOCK_SIZE_N"]) block_size_k = min(k, config.kwargs["BLOCK_SIZE_K"]) group_size_m = config.kwargs["GROUP_SIZE_M"] if ( block_size_m, block_size_n, block_size_k, group_size_m, config.num_stages, config.num_warps, ) in used: continue used.add( ( block_size_m, block_size_n, block_size_k, group_size_m, config.num_stages, config.num_warps, ) ) yield triton.Config( { "BLOCK_SIZE_M": block_size_m, "BLOCK_SIZE_N": block_size_n, "BLOCK_SIZE_K": block_size_k, "GROUP_SIZE_M": group_size_m, }, num_stages=config.num_stages, num_warps=config.num_warps, )
Construct Q4Matrix, return handle
def ext_make_q4(qweight, qzeros, scales, g_idx, device): """Construct Q4Matrix, return handle""" return make_q4( qweight, qzeros, scales, g_idx if g_idx is not None else none_tensor, device )
Matrix multiplication, returns x @ q4
def ext_q4_matmul(x, q4, q4_width): """Matrix multiplication, returns x @ q4""" outshape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], q4_width), dtype=torch.float16, device=x.device) q4_matmul(x, q4, output) return output.view(outshape)
Matrix multiplication, returns x @ q4
def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda): """Matrix multiplication, returns x @ q4""" output_shape = x.shape[:-1] + (q4_width,) x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device) gemm_half_q_half(x, q_handle, output, force_cuda) return output.view(output_shape)
Create Q matrix
def ext_make_q_matrix(w: dict, temp_dq, key: str = None): """ Create Q matrix """ # EXL2 # won't work as the moment because the tensors are not the same. if "q_weight" in w: w["q_scale_max"] /= 256 w["q_perm"] = w["q_perm"].short() w["q_invperm"] = w["q_invperm"].short() if "q_group_map" not in w: w["q_group_map"] = make_group_map(w["q_groups"], w["q_weight"].shape[0]) return make_q_matrix( w["q_weight"], w["q_perm"], w["q_invperm"], w["q_scale"], w["q_scale_max"], w["q_groups"], w["q_group_map"], none_tensor, none_tensor, none_tensor, temp_dq, ) # GPTQ elif "qweight" in w: if w["scales"].dtype == torch.float: w["scales"] = w["scales"].half() # GPTQ with g_idx (act_order) if w.get("g_idx", None) is not None and not (w["g_idx"] == 0).all().item(): w["q_perm"] = torch.empty( (w["qweight"].shape[0] * 8,), dtype=torch.short, device=w["qweight"].device, ) w["q_invperm"] = torch.empty_like(w["q_perm"]) # make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx. return make_q_matrix( w["qweight"], w["q_perm"], w["q_invperm"], none_tensor, none_tensor, none_tensor, none_tensor, w["qzeros"], w["scales"], w["g_idx"].cpu(), temp_dq, ) # GPTQ without g_idx else: return make_q_matrix( w["qweight"], none_tensor, none_tensor, none_tensor, none_tensor, none_tensor, none_tensor, w["qzeros"], w["scales"], none_tensor, temp_dq, )
This function is applied to your text inputs before they are fed into the model.
def input_modifier(string): """ This function is applied to your text inputs before they are fed into the model. """ return string
This function is applied to the model outputs.
def output_modifier(string): """ This function is applied to the model outputs. """ return string
This function is only applied in chat mode. It modifies the prefix text for the Bot and can be used to bias its behavior.
def bot_prefix_modifier(string): """ This function is only applied in chat mode. It modifies the prefix text for the Bot and can be used to bias its behavior. """ if params['activate']: if params['custom string'].strip() != '': return f'{string} {params["custom string"].strip()} ' else: return f'{string} {params["bias string"].strip()} ' else: return string
Modifies the chat history. Only used in chat mode.
def history_modifier(history): """ Modifies the chat history. Only used in chat mode. """ return history
Modifies the state variable, which is a dictionary containing the input values in the UI like sliders and checkboxes.
def state_modifier(state): """ Modifies the state variable, which is a dictionary containing the input values in the UI like sliders and checkboxes. """ return state
Modifies the user input string in chat mode (visible_text). You can also modify the internal representation of the user input (text) to change how it will appear in the prompt.
def chat_input_modifier(text, visible_text, state): """ Modifies the user input string in chat mode (visible_text). You can also modify the internal representation of the user input (text) to change how it will appear in the prompt. """ return text, visible_text
In default/notebook modes, modifies the whole prompt. In chat mode, it is the same as chat_input_modifier but only applied to "text", here called "string", and not to "visible_text".
def input_modifier(string, state, is_chat=False): """ In default/notebook modes, modifies the whole prompt. In chat mode, it is the same as chat_input_modifier but only applied to "text", here called "string", and not to "visible_text". """ return string
Modifies the prefix for the next bot reply in chat mode. By default, the prefix will be something like "Bot Name:".
def bot_prefix_modifier(string, state): """ Modifies the prefix for the next bot reply in chat mode. By default, the prefix will be something like "Bot Name:". """ return string
Modifies the input ids and embeds. Used by the multimodal extension to put image embeddings in the prompt. Only used by loaders that use the transformers library for sampling.
def tokenizer_modifier(state, prompt, input_ids, input_embeds): """ Modifies the input ids and embeds. Used by the multimodal extension to put image embeddings in the prompt. Only used by loaders that use the transformers library for sampling. """ return prompt, input_ids, input_embeds
Adds logits processors to the list, allowing you to access and modify the next token probabilities. Only used by loaders that use the transformers library for sampling.
def logits_processor_modifier(processor_list, input_ids): """ Adds logits processors to the list, allowing you to access and modify the next token probabilities. Only used by loaders that use the transformers library for sampling. """ processor_list.append(MyLogits()) return processor_list
Modifies the LLM output before it gets presented. In chat mode, the modified version goes into history['visible'], and the original version goes into history['internal'].
def output_modifier(string, state, is_chat=False): """ Modifies the LLM output before it gets presented. In chat mode, the modified version goes into history['visible'], and the original version goes into history['internal']. """ return string
Replaces the function that generates the prompt from the chat history. Only used in chat mode.
def custom_generate_chat_prompt(user_input, state, **kwargs): """ Replaces the function that generates the prompt from the chat history. Only used in chat mode. """ result = chat.generate_chat_prompt(user_input, state, **kwargs) return result
Returns a CSS string that gets appended to the CSS for the webui.
def custom_css(): """ Returns a CSS string that gets appended to the CSS for the webui. """ return ''