python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Union, Tuple, List, Any
import torch
import inspect
from functools import partial, wraps
import contextlib
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
from .pytree_hacks import tree_map_, treespec_pprint
import torch.autograd.forward_ad as fwAD
from .vmap import vmap
from .decompositions import decomposition_table, decomposition_table_for_jvp
from functorch._C import (
_wrap_for_grad,
_unwrap_for_grad,
_grad_increment_nesting,
_grad_decrement_nesting,
_jvp_increment_nesting,
_jvp_decrement_nesting,
set_fwd_grad_enabled,
get_fwd_grad_enabled,
_wrap_functional_tensor,
_unwrap_functional_tensor,
_func_decrement_nesting,
_func_increment_nesting,
_assert_wrapped_functional,
_propagate_functional_input_mutation,
set_inplace_requires_grad_allowed,
get_inplace_requires_grad_allowed,
)
argnums_t = Union[int, Tuple[int, ...]]
@contextlib.contextmanager
def enable_inplace_requires_grad(enabled=True):
prev_state = get_inplace_requires_grad_allowed()
set_inplace_requires_grad_allowed(enabled)
try:
yield
finally:
set_inplace_requires_grad_allowed(prev_state)
def _create_differentiable(inps, level=None):
def create_differentiable(x):
if isinstance(x, torch.Tensor):
with enable_inplace_requires_grad():
return x.requires_grad_()
raise ValueError(f'Thing passed to transform API must be Tensor, '
f'got {type(x)}')
return tree_map(create_differentiable, inps)
def _undo_create_differentiable(inps, level=None):
def unwrap_tensors(x):
if isinstance(x, torch.Tensor):
return _unwrap_for_grad(x, level)
# TODO: Remove the following hack for namedtuples
if isinstance(x, tuple):
return tree_map(unwrap_tensors, tuple(x))
raise RuntimeError(f"Expected tensors, got unsupported type {type(x)}")
return tree_map(unwrap_tensors, inps)
def _is_differentiable(maybe_tensor):
if not isinstance(maybe_tensor, torch.Tensor):
return False
return maybe_tensor.requires_grad
def _any_differentiable(tensor_or_tuple_of_tensors):
flat_args, _ = tree_unflatten(tensor_or_tuple_of_tensors)
return any(tuple(map(_is_differentiable, flat_args)))
def _wrap_tensor_for_grad(maybe_tensor, level):
if not isinstance(maybe_tensor, torch.Tensor):
return maybe_tensor
return _wrap_for_grad(maybe_tensor, level)
def _wrap_all_tensors(tensor_pytree, level):
return tree_map(partial(_wrap_tensor_for_grad, level=level), tensor_pytree)
def _as_tuple(val):
if isinstance(val, tuple):
return val
return (val,)
# Version of autograd.grad that handles outputs that don't depend on inputs
def _autograd_grad(outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True):
if grad_outputs is None:
diff_outputs = tuple(out for out in outputs if out.requires_grad)
else:
result = tuple((out, go) for out, go in zip(outputs, grad_outputs) if out.requires_grad)
if len(result) == 0:
diff_outputs, grad_outputs = (), ()
else:
diff_outputs, grad_outputs = zip(*result)
if len(diff_outputs) == 0:
return tuple(torch.zeros_like(inp) for inp in inputs)
grad_inputs = torch.autograd.grad(diff_outputs, inputs, grad_outputs,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=True)
grad_inputs = tuple(torch.zeros_like(inp) if gi is None else gi
for gi, inp in zip(grad_inputs, inputs))
return grad_inputs
# NOTE [grad and vjp interaction with no_grad]
#
# def f(x):
# with torch.no_grad():
# c = x ** 2
# return x - c
#
# The thing to consider is if enable_grad is on/off before grad gets called.
#
# Case 1: enable_grad is on.
# grad(f)(x)
# In this case, `grad` should respect the inner torch.no_grad.
#
# Case 2: enable_grad is off
# with torch.no_grad():
# grad(f)(x)
# In this case, `grad` should respect the inner torch.no_grad, but not the
# outer one. This is because `grad` is a "function transform": its result
# should not depend on the result of a context manager outside of `f`.
#
# This gives us the following desired behavior:
# - (nested) grad transforms must obey torch.no_grad inside them
# - (nested) grad transforms should not obey torch.no_grad outside them
#
# To achieve this behavior, upon entering grad/vjp:
# - we save the current ("previous") is_grad_enabled (*)
# - we unconditionally enable grad.
#
# Inside DynamicLayerBackFallback, when we're temporarily popping `grad` layer
# off the stack:
# - if grad_mode is disabled, then we do nothing. (there is a torch.no_grad
# active, all subsequent grad transforms must obey it).
# - if grad_mode is enabled, and the previous is_grad_enabled (*) is False,
# then we temporarily restore the previous `is_grad_enabled`. This is
# because we're crossing the boundary from a `grad` outside the
# no_grad to a `grad` inside the no_grad.
#
# NB: vjp has some interesting behavior because the vjp's callable can be called
# under a different grad_mode than the forward computation...
#
# NB: forward-mode AD: forward-mode AD doesn't respect torch.no_grad, but
# it respects c10::AutoFwGradMode. We've implemented the same logic for
# our jvp transform (it will have special handling if FwGradMode is disabled).
# How do we increment and decrement the nesting? I don't think we can.
def vjp(func: Callable, *primals, has_aux: bool = False):
"""
Standing for the vector-Jacobian product, returns a tuple containing the
results of :attr:`func` applied to :attr:`primals` and a function that, when
given ``cotangents``, computes the reverse-mode Jacobian of :attr:`func` with
respect to :attr:`primals` times ``cotangents``.
Args:
func (Callable): A Python function that takes one or more arguments. Must
return one or more Tensors.
primals (Tensors): Positional arguments to :attr:`func` that must all be
Tensors. The returned function will also be computing the
derivative with respect to these arguments
has_aux (bool): Flag indicating that :attr:`func` returns a
``(output, aux)`` tuple where the first element is the output of
the function to be differentiated and the second element is
other auxiliary objects that will not be differentiated.
Default: False.
Returns:
Returns a ``(output, vjp_fn)`` tuple containing the output of :attr:`func`
applied to :attr:`primals` and a function that computes the vjp of
:attr:`func` with respect to all :attr:`primals` using the cotangents passed
to the returned function. If ``has_aux is True``, then instead returns a
``(output, vjp_fn, aux)`` tuple.
The returned ``vjp_fn`` function will return a tuple of each VJP.
When used in simple cases, :func:`vjp` behaves the same as :func:`grad`
>>> x = torch.randn([5])
>>> f = lambda x: x.sin().sum()
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> grad = vjpfunc(torch.tensor(1.))[0]
>>> assert torch.allclose(grad, functorch.grad(f)(x))
However, :func:`vjp` can support functions with multiple outputs by
passing in the cotangents for each of the outputs
>>> x = torch.randn([5])
>>> f = lambda x: (x.sin(), x.cos())
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> vjps = vjpfunc((torch.ones([5]), torch.ones([5])))
>>> assert torch.allclose(vjps[0], x.cos() + -x.sin())
:func:`vjp` can even support outputs being Python structs
>>> x = torch.randn([5])
>>> f = lambda x: {'first': x.sin(), 'second': x.cos()}
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> cotangents = {'first': torch.ones([5]), 'second': torch.ones([5])}
>>> vjps = vjpfunc(cotangents)
>>> assert torch.allclose(vjps[0], x.cos() + -x.sin())
The function returned by :func:`vjp` will compute the partials with
respect to each of the :attr:`primals`
>>> x, y = torch.randn([5, 4]), torch.randn([4, 5])
>>> (_, vjpfunc) = functorch.vjp(torch.matmul, x, y)
>>> cotangents = torch.randn([5, 5])
>>> vjps = vjpfunc(cotangents)
>>> assert len(vjps) == 2
>>> assert torch.allclose(vjps[0], torch.matmul(cotangents, y.transpose(0, 1)))
>>> assert torch.allclose(vjps[1], torch.matmul(x.transpose(0, 1), cotangents))
:attr:`primals` are the positional arguments for :attr:`f`. All kwargs use their
default value
>>> x = torch.randn([5])
>>> def f(x, scale=4.):
>>> return x * 4.
>>>
>>> (_, vjpfunc) = functorch.vjp(f, x)
>>> vjps = vjpfunc(torch.ones_like(x))
>>> assert torch.allclose(vjps[0], torch.full(x.shape, 4.))
.. note::
Using PyTorch ``torch.no_grad`` together with ``vjp``.
Case 1: Using ``torch.no_grad`` inside a function:
>>> def f(x):
>>> with torch.no_grad():
>>> c = x ** 2
>>> return x - c
In this case, ``vjp(f)(x)`` will respect the inner ``torch.no_grad``.
Case 2: Using ``vjp`` inside ``torch.no_grad`` context manager:
>>> with torch.no_grad():
>>> vjp(f)(x)
In this case, ``vjp`` will respect the inner ``torch.no_grad``, but not the
outer one. This is because ``vjp`` is a "function transform": its result
should not depend on the result of a context manager outside of ``f``.
"""
level = _grad_increment_nesting()
try:
# See NOTE [grad and vjp interaction with no_grad]
with torch.enable_grad():
primals = _wrap_all_tensors(primals, level)
diff_primals = _create_differentiable(primals, level)
primals_out = func(*diff_primals)
if has_aux:
if not (isinstance(primals_out, tuple) and len(primals_out) == 2):
raise RuntimeError(
"vjp(f, *primals): output of function f should be a tuple: (output, aux) "
"if has_aux is True"
)
primals_out, aux = primals_out
aux = _undo_create_differentiable(aux, level)
flat_primals_out, primals_out_spec = tree_flatten(primals_out)
assert_non_empty_tensor_output(flat_primals_out, 'vjp(f, *primals)')
flat_diff_primals, primals_spec = tree_flatten(diff_primals)
results = _undo_create_differentiable(primals_out, level)
for primal_out in flat_primals_out:
assert isinstance(primal_out, torch.Tensor)
if primal_out.is_floating_point() or primal_out.is_complex():
continue
raise RuntimeError("vjp(f, ...): All outputs of f must be "
"floating-point or complex Tensors, got Tensor "
f"with dtype {primal_out.dtype}")
def wrapper(cotangents, retain_graph=True, create_graph=None):
if create_graph is None:
create_graph = torch.is_grad_enabled()
flat_cotangents, cotangents_spec = tree_flatten(cotangents)
if primals_out_spec != cotangents_spec:
raise RuntimeError(
f'Expected pytree structure of cotangents to be the same '
f'as pytree structure of outputs to the function. '
f'cotangents: {treespec_pprint(cotangents_spec)}, '
f'primal output: {treespec_pprint(primals_out_spec)}')
result = _autograd_grad(flat_primals_out, flat_diff_primals, flat_cotangents,
retain_graph=retain_graph, create_graph=create_graph)
return tree_unflatten(result, primals_spec)
finally:
_grad_decrement_nesting()
if has_aux:
return results, wrapper, aux
else:
return results, wrapper
def _safe_zero_index(x):
assert len(x) == 1
return x[0]
def jacrev(func: Callable, argnums: Union[int, Tuple[int]] = 0, *, has_aux=False):
"""
Computes the Jacobian of :attr:`func` with respect to the arg(s) at index
:attr:`argnum` using reverse mode autodiff
Args:
func (function): A Python function that takes one or more arguments,
one of which must be a Tensor, and returns one or more Tensors
argnums (int or Tuple[int]): Optional, integer or tuple of integers,
saying which arguments to get the Jacobian with respect to.
Default: 0.
has_aux (bool): Flag indicating that :attr:`func` returns a
``(output, aux)`` tuple where the first element is the output of
the function to be differentiated and the second element is
auxiliary objects that will not be differentiated.
Default: False.
Returns:
Returns a function that takes in the same inputs as :attr:`func` and
returns the Jacobian of :attr:`func` with respect to the arg(s) at
:attr:`argnums`. If ``has_aux is True``, then the returned function
instead returns a ``(jacobian, aux)`` tuple where ``jacobian``
is the Jacobian and ``aux`` is auxiliary objects returned by :attr:`func`.
A basic usage with a pointwise, unary operation will give a diagonal array
as the Jacobian
>>> from functorch import jacrev
>>> x = torch.randn(5)
>>> jacobian = jacrev(torch.sin)(x)
>>> expected = torch.diag(torch.cos(x))
>>> assert torch.allclose(jacobian, expected)
If you would like to compute the output of the function as well as the
jacobian of the function, use the ``has_aux`` flag to return the output
as an auxiliary object:
>>> from functorch import jacrev
>>> x = torch.randn(5)
>>>
>>> def f(x):
>>> return x.sin()
>>>
>>> def g(x):
>>> result = f(x)
>>> return result, result
>>>
>>> jacobian_f, f_x = jacrev(g, has_aux=True)(x)
>>> assert torch.allclose(f_x, f(x))
:func:`jacrev` can be composed with vmap to produce batched
Jacobians:
>>> from functorch import jacrev, vmap
>>> x = torch.randn(64, 5)
>>> jacobian = vmap(jacrev(torch.sin))(x)
>>> assert jacobian.shape == (64, 5, 5)
Additionally, :func:`jacrev` can be composed with itself to produce
Hessians
>>> from functorch import jacrev
>>> def f(x):
>>> return x.sin().sum()
>>>
>>> x = torch.randn(5)
>>> hessian = jacrev(jacrev(f))(x)
>>> assert torch.allclose(hessian, torch.diag(-x.sin()))
By default, :func:`jacrev` computes the Jacobian with respect to the first
input. However, it can compute the Jacboian with respect to a different
argument by using :attr:`argnums`:
>>> from functorch import jacrev
>>> def f(x, y):
>>> return x + y ** 2
>>>
>>> x, y = torch.randn(5), torch.randn(5)
>>> jacobian = jacrev(f, argnums=1)(x, y)
>>> expected = torch.diag(2 * y)
>>> assert torch.allclose(jacobian, expected)
Additionally, passing a tuple to :attr:`argnums` will compute the Jacobian
with respect to multiple arguments
>>> from functorch import jacrev
>>> def f(x, y):
>>> return x + y ** 2
>>>
>>> x, y = torch.randn(5), torch.randn(5)
>>> jacobian = jacrev(f, argnums=(0, 1))(x, y)
>>> expectedX = torch.diag(torch.ones_like(x))
>>> expectedY = torch.diag(2 * y)
>>> assert torch.allclose(jacobian[0], expectedX)
>>> assert torch.allclose(jacobian[1], expectedY)
.. note::
Using PyTorch ``torch.no_grad`` together with ``jacrev``.
Case 1: Using ``torch.no_grad`` inside a function:
>>> def f(x):
>>> with torch.no_grad():
>>> c = x ** 2
>>> return x - c
In this case, ``jacrev(f)(x)`` will respect the inner ``torch.no_grad``.
Case 2: Using ``jacrev`` inside ``torch.no_grad`` context manager:
>>> with torch.no_grad():
>>> jacrev(f)(x)
In this case, ``jacrev`` will respect the inner ``torch.no_grad``, but not the
outer one. This is because ``jacrev`` is a "function transform": its result
should not depend on the result of a context manager outside of ``f``.
"""
@wraps(func)
def wrapper_fn(*args):
f_wrapper, primals = _argnums_partial(func, args, argnums)
vjp_out = vjp(f_wrapper, *primals, has_aux=has_aux)
if has_aux:
output, vjp_fn, aux = vjp_out
else:
output, vjp_fn = vjp_out
# See NOTE: [Computing jacobian with vmap and vjp for multiple outputs]
flat_output, output_spec = tree_flatten(output)
# NB: vjp already checks that all outputs are tensors
# Step 1: Construct grad_outputs by splitting the standard basis
flat_output_numels = tuple(out.numel() for out in flat_output)
flat_basis = _construct_standard_basis_for(flat_output, flat_output_numels)
basis = tree_unflatten(flat_basis, output_spec)
results = vmap(vjp_fn)(basis)
flat_primals, primals_spec = tree_flatten(primals)
flat_results, results_spec = tree_flatten(results)
# Step 2: The returned jacobian is one big tensor per input. In this step,
# we split each Tensor by output.
flat_results = [result.split(flat_output_numels, dim=0) for result in flat_results]
flat_input_flat_output = [
tuple(split.view(out.shape + primal.shape)
for split, out in zip(splits, flat_output))
for splits, primal in zip(flat_results, flat_primals)
]
# Step 3: Right now, `jacobian` is a List[List[Tensor]].
# The outer List corresponds to the number of primals,
# the inner List corresponds to the number of outputs.
# We need to:
# a. Exchange the order of the outer List and inner List
# b. tree_unflatten the inner Lists (which correspond to the primals)
# c. handle the argnums=int case
# d. tree_unflatten the outer List (which corresponds to the outputs)
flat_output_flat_input = tuple(zip(*flat_input_flat_output))
flat_output_input = tuple(tree_unflatten(flat_input, primals_spec)
for flat_input in flat_output_flat_input)
if isinstance(argnums, int):
flat_output_input = tuple(_safe_zero_index(flat_input)
for flat_input in flat_output_input)
output_input = tree_unflatten(flat_output_input, output_spec)
if has_aux:
return output_input, aux
return output_input
return wrapper_fn
# NOTE: [Computing jacobian with vmap and vjp for multiple outputs]
#
# Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3).
# It turns out we can compute the jacobian of this function with a single
# call to autograd.grad by using vmap over the correct grad_outputs.
#
# Firstly, one way to compute the jacobian is to stack x**2 and x.sum()
# into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()])
#
# To get the first row of the jacobian, we call
# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0]))
# To get the 2nd row of the jacobian, we call
# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0]))
# and so on.
#
# Using vmap, we can vectorize all 4 of these computations into one by
# passing the standard basis for R^4 as the grad_output.
# vmap(partial(autograd.grad, g(x), x))(torch.eye(4)).
#
# Now, how do we compute the jacobian *without stacking the output*?
# We can just split the standard basis across the outputs. So to
# compute the jacobian of f(x), we'd use
# >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...))
# The grad_outputs looks like the following:
# ( torch.tensor([[1, 0, 0],
# [0, 1, 0],
# [0, 0, 1],
# [0, 0, 0]]),
# torch.tensor([[0],
# [0],
# [0],
# [1]]) )
#
# But we're not done yet!
# >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...)))
# returns a Tensor of shape [4, 3]. We have to remember to split the
# jacobian of shape [4, 3] into two:
# - one of shape [3, 3] for the first output
# - one of shape [ 3] for the second output
def _construct_standard_basis_for(tensors, tensor_numels):
# This function:
# - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix.
# - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`.
# - Each chunk corresponds to one tensor. The chunk has the same dtype and
# device as the tensor
#
# For example, with tensor_numels = [1, 2, 1], this function returns:
# ( tensor([[1], tensor([[0, 0], tensor([[0],
# [0], [1, 0], [0],
# [0], [0, 1], [0],
# [0]]) , [0, 0]]) , [1]]) )
#
# Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors)
# Precondition: tensors always has at least one element.
#
# See NOTE: [Computing jacobian with vmap and grad for multiple tensors]
# for context behind this function.
assert len(tensors) == len(tensor_numels)
assert len(tensors) > 0
total_numel = sum(tensor_numels)
diag_start_indices = (0, *torch.tensor(tensor_numels).cumsum(dim=0)[:-1].neg().unbind())
chunks = tuple(tensor.new_zeros(total_numel, tensor_numel)
for tensor, tensor_numel in zip(tensors, tensor_numels))
for chunk, diag_start_idx in zip(chunks, diag_start_indices):
chunk.diagonal(diag_start_idx).fill_(1)
chunks = tuple(chunk.view(total_numel, *tensor.shape)
for chunk, tensor in zip(chunks, tensors))
return chunks
def _validate_and_wrap_argnum(argnum, num_args):
if not isinstance(argnum, int):
raise RuntimeError(f'argnum must be int, got: {type(argnum)}')
if argnum >= 0 and argnum < num_args:
return argnum
if argnum < 0 and argnum >= -num_args:
return argnum + num_args
raise RuntimeError(f'Got argnum={argnum}, but only {num_args} positional inputs')
def _check_unique_non_empty(argnums):
if isinstance(argnums, tuple):
if len(argnums) == 0:
raise RuntimeError("argnums must be non-empty")
if len(set(argnums)) != len(argnums):
raise RuntimeError(f"argnums elements must be unique, got {argnums}")
def _replace_args(old_args, new_args, argnums):
if isinstance(argnums, int):
if len(new_args) != 1:
raise RuntimeError(f'new_args should be of size 1, was of size {len(new_args)}')
return tuple(new_args[0] if i == argnums else old_args[i] for i in range(len(old_args)))
if isinstance(argnums, tuple):
if len(new_args) != len(argnums):
raise RuntimeError(
"new_args should have the same size as argnums. "
f"Argnums size {len(argnums)}, new_args size {len(new_args)}")
def get_right_elem(i):
return new_args[argnums.index(i)] if i in argnums else old_args[i]
return tuple(get_right_elem(i) for i in range(len(old_args)))
raise RuntimeError(f'argnums must be int or Tuple[int, ...], got: {type(argnums)}')
def _validate_and_wrap_argnums(argnums, num_args):
if isinstance(argnums, int):
return _validate_and_wrap_argnum(argnums, num_args)
if isinstance(argnums, tuple):
return tuple(_validate_and_wrap_argnum(argnum, num_args) for argnum in argnums)
raise AssertionError("Should never get here")
def _slice_argnums(args, argnums, as_tuple=True):
if not isinstance(argnums, int) and not isinstance(argnums, tuple):
raise RuntimeError(f'argnums must be int or Tuple[int, ...], got: {type(argnums)}')
argnums = _validate_and_wrap_argnums(argnums, len(args))
_check_unique_non_empty(argnums)
if isinstance(argnums, int):
if as_tuple:
return (args[argnums],)
else:
return args[argnums]
return tuple(args[i] for i in argnums)
def _argnums_partial(f, args, argnums):
def f_wrapper(*wrapper_args):
replaced_args = _replace_args(args, wrapper_args, argnums)
return f(*replaced_args)
wrapper_args = _slice_argnums(args, argnums)
wrapper_args = wrapper_args if isinstance(wrapper_args, tuple) else (wrapper_args, )
return (f_wrapper, wrapper_args)
JVP_NESTING = 0
@contextlib.contextmanager
def noop():
yield
@contextlib.contextmanager
def enable_fwd_grad(enabled=True):
prev_state = get_fwd_grad_enabled()
set_fwd_grad_enabled(enabled)
try:
yield
finally:
set_fwd_grad_enabled(prev_state)
def assert_flat_tuple_of_tensors(elts: Any, api: str, argname: str) -> None:
if not isinstance(elts, tuple):
raise RuntimeError(
f'{api}: Expected {argname} to be a tuple of Tensors, got {type(elts)}')
for elt in elts:
if isinstance(elt, torch.Tensor):
continue
raise RuntimeError(
f'{api}: Expected {argname} to be a tuple of Tensors, got '
f'a tuple with an element of type {type(elt)}')
if len(elts) == 0:
raise RuntimeError(
f'{api}: Expected {argname} to be a non-empty tuple of Tensors.')
def assert_non_empty_tensor_output(output: List[Any], api: str) -> None:
if output == [None] or len(output) < 1:
raise RuntimeError(
f'{api}: Expected f to be a function that has non-empty output (got output = {output})'
)
for o in output:
if not isinstance(o, torch.Tensor):
raise RuntimeError(
f'{api}: expected f(*primals) to return only tensors'
f', got unsupported type {type(o)}'
)
def assert_output_is_tensor_or_tensors(output: Any, api: str) -> None:
if isinstance(output, torch.Tensor):
return
if not isinstance(output, tuple):
raise RuntimeError(
f'{api}: Expected output of f to be a Tensor or Tensors, got '
f'{type(output)}')
if len(output) == 0:
raise RuntimeError(
f'{api}: Expected output of f to be a non-empty tuple of Tensors.')
for out in output:
if isinstance(out, torch.Tensor):
continue
raise RuntimeError(
f'{api}: Expected output of f to be a Tensor or Tensors, got '
f'{type(out)} as an output')
def assert_non_empty_list_of_tensors(output: List[torch.Tensor], api: str, argname: str) -> None:
if len(output) == 0:
raise RuntimeError(
f'{api}: Expected {argname} to contain at least one Tensor.')
for out in output:
if isinstance(out, torch.Tensor):
continue
raise RuntimeError(
f'{api}: Expected {argname} to only contain Tensors, got '
f'{type(out)}')
jvp_str = 'jvp(f, primals, tangents)'
def safe_unpack_dual(dual, strict):
if not isinstance(dual, torch.Tensor):
raise RuntimeError(
f'{jvp_str}: expected f(*args) to return only tensors'
f', got unsupported type {type(dual)}'
)
primal, tangent = fwAD.unpack_dual(dual)
if tangent is None:
if strict:
raise RuntimeError(
'jvp(f, primals, tangents, strict=True): '
'The output of f is independent of '
'the inputs. This is not allowed with strict=True.')
tangent = torch.zeros_like(primal)
return primal, tangent
def jvp(func: Callable, primals: Any, tangents: Any, *, strict: bool = False, has_aux: bool = False):
"""
Standing for the Jacobian-vector product, returns a tuple containing
the output of `func(*primals)` and the "Jacobian of ``func`` evaluated at
``primals``" times ``tangents``. This is also known as forward-mode autodiff.
Args:
func (function): A Python function that takes one or more arguments,
one of which must be a Tensor, and returns one or more Tensors
primals (Tensors): Positional arguments to :attr:`func` that must all be
Tensors. The returned function will also be computing the
derivative with respect to these arguments
tangents (Tensors): The "vector" for which Jacobian-vector-product is
computed. Must be the same structure and sizes as the inputs to
``func``.
has_aux (bool): Flag indicating that :attr:`func` returns a
``(output, aux)`` tuple where the first element is the output of
the function to be differentiated and the second element is
other auxiliary objects that will not be differentiated.
Default: False.
Returns:
Returns a ``(output, jvp_out)`` tuple containing the output of ``func``
evaluated at ``primals`` and the Jacobian-vector product.
If ``has_aux is True``, then instead returns a ``(output, jvp_out, aux)`` tuple.
.. warning::
PyTorch's forward-mode AD coverage on operators is not very good at the
moment. You may see this API error out with "forward-mode AD not
implemented for operator X". If so, please file us a bug report and we
will prioritize it.
jvp is useful when you wish to compute gradients of a function R^1 -> R^N
>>> from functorch import jvp
>>> x = torch.randn([])
>>> f = lambda x: x * torch.tensor([1., 2., 3])
>>> value, grad = jvp(f, (x,), (torch.tensor(1.),))
>>> assert torch.allclose(value, f(x))
>>> assert torch.allclose(grad, torch.tensor([1., 2, 3]))
:func:`jvp` can support functions with multiple inputs by passing in the
tangents for each of the inputs
>>> from functorch import jvp
>>> x = torch.randn(5)
>>> y = torch.randn(5)
>>> f = lambda x, y: (x * y)
>>> _, output = jvp(f, (x, y), (torch.ones(5), torch.ones(5)))
>>> assert torch.allclose(output, x + y)
"""
if not isinstance(primals, tuple):
raise RuntimeError(
f'{jvp_str}: Expected primals to be a tuple. '
f'E.g. it should be valid to call f(*primals).')
flat_primals, primals_spec = tree_flatten(primals)
flat_tangents, tangents_spec = tree_flatten(tangents)
if primals_spec != tangents_spec:
raise RuntimeError(
f'{jvp_str}: Expected primals and tangents to have the same python '
f'structure. For example, if primals is a tuple of 3 tensors, '
f'tangents also must be. Got primals with structure {primals_spec} '
f'and tangents with structure {tangents_spec}')
assert_non_empty_list_of_tensors(flat_primals, jvp_str, 'primals')
assert_non_empty_list_of_tensors(flat_tangents, jvp_str, 'tangents')
level = _jvp_increment_nesting()
try:
global JVP_NESTING
JVP_NESTING += 1
with enable_fwd_grad():
ctx = fwAD.dual_level if JVP_NESTING == 1 else noop
with ctx():
flat_duals = tuple(fwAD.make_dual(p, t)
for p, t in zip(flat_primals, flat_tangents))
duals = tree_unflatten(flat_duals, primals_spec)
result_duals = func(*duals)
if has_aux:
if not (isinstance(result_duals, tuple) and len(result_duals) == 2):
raise RuntimeError(
f"{jvp_str}: output of function f should be a tuple: (output, aux) "
"if has_aux is True"
)
result_duals, aux = result_duals
aux = _undo_create_differentiable(aux, level)
result_duals, spec = tree_flatten(result_duals)
assert_non_empty_tensor_output(result_duals, jvp_str)
primals_out, tangents_out = \
zip(*[safe_unpack_dual(dual, strict) for dual in result_duals])
primals_out = tree_map(
partial(_undo_create_differentiable, level=level), primals_out)
tangents_out = tree_map(
partial(_undo_create_differentiable, level=level), tangents_out)
primals_out_unflatten = tree_unflatten(primals_out, spec)
tangents_out_unflatten = tree_unflatten(tangents_out, spec)
if has_aux:
return primals_out_unflatten, tangents_out_unflatten, aux
return primals_out_unflatten, tangents_out_unflatten
finally:
_jvp_decrement_nesting()
JVP_NESTING -= 1
def safe_unflatten(tensor, dim, shape):
if len(shape) == 0:
assert tensor.shape[dim] == 1
return tensor.squeeze(dim)
return tensor.unflatten(dim, shape)
def jacfwd(func: Callable, argnums: argnums_t = 0, has_aux: bool = False):
"""
Computes the Jacobian of :attr:`func` with respect to the arg(s) at index
:attr:`argnum` using forward-mode autodiff
Args:
func (function): A Python function that takes one or more arguments,
one of which must be a Tensor, and returns one or more Tensors
argnums (int or Tuple[int]): Optional, integer or tuple of integers,
saying which arguments to get the Jacobian with respect to.
Default: 0.
has_aux (bool): Flag indicating that :attr:`func` returns a
``(output, aux)`` tuple where the first element is the output of
the function to be differentiated and the second element is
auxiliary objects that will not be differentiated.
Default: False.
Returns:
Returns a function that takes in the same inputs as :attr:`func` and
returns the Jacobian of :attr:`func` with respect to the arg(s) at
:attr:`argnums`. If ``has_aux is True``, then the returned function
instead returns a ``(jacobian, aux)`` tuple where ``jacobian``
is the Jacobian and ``aux`` is auxiliary objects returned by :attr:`func`.
.. warning::
PyTorch's forward-mode AD coverage on operators is not very good at the
moment. You may see this API error out with "forward-mode AD not
implemented for operator X". If so, please file us a bug report and we
will prioritize it.
A basic usage with a pointwise, unary operation will give a diagonal array
as the Jacobian
>>> from functorch import jacfwd
>>> x = torch.randn(5)
>>> jacobian = jacfwd(torch.sin)(x)
>>> expected = torch.diag(torch.cos(x))
>>> assert torch.allclose(jacobian, expected)
:func:`jacfwd` can be composed with vmap to produce batched
Jacobians:
>>> from functorch import jacfwd, vmap
>>> x = torch.randn(64, 5)
>>> jacobian = vmap(jacfwd(torch.sin))(x)
>>> assert jacobian.shape == (64, 5, 5)
If you would like to compute the output of the function as well as the
jacobian of the function, use the ``has_aux`` flag to return the output
as an auxiliary object:
>>> from functorch import jacfwd
>>> x = torch.randn(5)
>>>
>>> def f(x):
>>> return x.sin()
>>>
>>> def g(x):
>>> result = f(x)
>>> return result, result
>>>
>>> jacobian_f, f_x = jacfwd(g, has_aux=True)(x)
>>> assert torch.allclose(f_x, f(x))
Additionally, :func:`jacrev` can be composed with itself or :func:`jacrev`
to produce Hessians
>>> from functorch import jacfwd, jacrev
>>> def f(x):
>>> return x.sin().sum()
>>>
>>> x = torch.randn(5)
>>> hessian = jacfwd(jacrev(f))(x)
>>> assert torch.allclose(hessian, torch.diag(-x.sin()))
By default, :func:`jacfwd` computes the Jacobian with respect to the first
input. However, it can compute the Jacboian with respect to a different
argument by using :attr:`argnums`:
>>> from functorch import jacfwd
>>> def f(x, y):
>>> return x + y ** 2
>>>
>>> x, y = torch.randn(5), torch.randn(5)
>>> jacobian = jacfwd(f, argnums=1)(x, y)
>>> expected = torch.diag(2 * y)
>>> assert torch.allclose(jacobian, expected)
Additionally, passing a tuple to :attr:`argnums` will compute the Jacobian
with respect to multiple arguments
>>> from functorch import jacfwd
>>> def f(x, y):
>>> return x + y ** 2
>>>
>>> x, y = torch.randn(5), torch.randn(5)
>>> jacobian = jacfwd(f, argnums=(0, 1))(x, y)
>>> expectedX = torch.diag(torch.ones_like(x))
>>> expectedY = torch.diag(2 * y)
>>> assert torch.allclose(jacobian[0], expectedX)
>>> assert torch.allclose(jacobian[1], expectedY)
"""
@wraps(func)
def wrapper_fn(*args):
f_wrapper, primals = _argnums_partial(func, args, argnums)
flat_primals, primals_spec = tree_flatten(primals)
flat_primals_numels = tuple(p.numel() for p in flat_primals)
flat_basis = _construct_standard_basis_for(flat_primals, flat_primals_numels)
basis = tree_unflatten(flat_basis, primals_spec)
def push_jvp(basis):
output = jvp(f_wrapper, primals, basis, has_aux=has_aux)
if has_aux:
_, jvp_out, aux = output
return jvp_out, aux
_, jvp_out = output
return jvp_out
results = vmap(push_jvp)(basis)
if has_aux:
results, aux = results
# aux is in the standard basis format, e.g. NxN matrix
# We need to fetch the first element as original `func` output
flat_aux, aux_spec = tree_flatten(aux)
flat_aux = [value[0] for value in flat_aux]
aux = tree_unflatten(flat_aux, aux_spec)
jac_outs, spec = tree_flatten(results)
# Most probably below output check can never raise an error
# as jvp should test the output before
# assert_non_empty_output(jac_outs, 'jacfwd(f, ...)(*args)')
jac_outs_ins = tuple(
tuple(
safe_unflatten(jac_out_in, -1, primal.shape)
for primal, jac_out_in in
zip(flat_primals, jac_out.movedim(0, -1).split(flat_primals_numels, dim=-1))
)
for jac_out in jac_outs
)
jac_outs_ins = tuple(tree_unflatten(jac_ins, primals_spec) for jac_ins in jac_outs_ins)
if isinstance(argnums, int):
jac_outs_ins = tuple(jac_ins[0] for jac_ins in jac_outs_ins)
if has_aux:
return tree_unflatten(jac_outs_ins, spec), aux
return tree_unflatten(jac_outs_ins, spec)
return wrapper_fn
def hessian(func, argnums=0):
"""
Computes the Hessian of :attr:`func` with respect to the arg(s) at index
:attr:`argnum` via a forward-over-reverse strategy.
The forward-over-reverse strategy (composing ``jacfwd(jacrev(func))``) is
a good default for good performance. It is possible to compute Hessians
through other compositions of :func:`jacfwd` and :func:`jacrev` like
``jacfwd(jacfwd(func))`` or ``jacrev(jacrev(func))``.
Args:
func (function): A Python function that takes one or more arguments,
one of which must be a Tensor, and returns one or more Tensors
argnums (int or Tuple[int]): Optional, integer or tuple of integers,
saying which arguments to get the Hessian with respect to.
Default: 0.
Returns:
Returns a function that takes in the same inputs as :attr:`func` and
returns the Hessian of :attr:`func` with respect to the arg(s) at
:attr:`argnums`.
.. warning::
PyTorch's forward-mode AD coverage on operators is not very good at the
moment. You may see this API error out with "forward-mode AD not
implemented for operator X". If so, please file us a bug report and we
will prioritize it.
A basic usage with a R^N -> R^1 function gives a N x N Hessian:
>>> from functorch import hessian
>>> def f(x):
>>> return x.sin().sum()
>>>
>>> x = torch.randn(5)
>>> hess = jacfwd(jacrev(f))(x)
>>> assert torch.allclose(hess, torch.diag(-x.sin()))
"""
return jacfwd(jacrev(func, argnums), argnums)
def grad_and_value(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable:
"""
Returns a function to compute a tuple of the gradient and primal, or
forward, computation.
Args:
func (Callable): A Python function that takes one or more arguments.
Must return a single-element Tensor. If specified :attr:`has_aux`
equals ``True``, function can return a tuple of single-element
Tensor and other auxiliary objects: ``(output, aux)``.
argnums (int or Tuple[int]): Specifies arguments to compute gradients
with respect to. :attr:`argnums` can be single integer or tuple of
integers. Default: 0.
has_aux (bool): Flag indicating that :attr:`func` returns a tensor and
other auxiliary objects: ``(output, aux)``. Default: False.
Returns:
Function to compute a tuple of gradients with respect to its inputs
and the forward computation. By default, the output of the function is
a tuple of the gradient tensor(s) with respect to the first argument
and the primal computation. If specified :attr:`has_aux` equals
``True``, tuple of gradients and tuple of the forward computation with
output auxiliary objects is returned. If :attr:`argnums` is a tuple of
integers, a tuple of a tuple of the output gradients with respect to
each :attr:`argnums` value and the forward computation is returned.
See :func:`grad` for examples
"""
@wraps(func)
def wrapper(*args, **kwargs):
level = _grad_increment_nesting()
try:
output, aux, grad_input = None, None, None
# See NOTE [grad and vjp interaction with no_grad]
with torch.enable_grad():
args = _wrap_all_tensors(args, level)
kwargs = _wrap_all_tensors(kwargs, level)
diff_args = _slice_argnums(args, argnums, as_tuple=False)
tree_map_(partial(_create_differentiable, level=level), diff_args)
output = func(*args, **kwargs)
if has_aux:
if not (isinstance(output, tuple) and len(output) == 2):
raise RuntimeError(
"grad_and_value(f)(*args): output of function f should be a tuple: (output, aux) "
"if has_aux is True"
)
output, aux = output
if not isinstance(output, torch.Tensor):
raise RuntimeError('grad_and_value(f)(*args): Expected f(*args) '
f'to return a Tensor, got {type(output)}')
if output.dim() != 0:
raise RuntimeError('grad_and_value(f)(*args): Expected f(*args) '
'to return a scalar Tensor, got tensor with '
f'{output.dim()} dims. Maybe you wanted to '
'use the vjp or jacrev APIs instead?')
flat_diff_args, spec = tree_flatten(diff_args)
# NB: need create_graph so that backward pass isn't run in no_grad mode
flat_outputs = _as_tuple(output)
flat_grad_input = _autograd_grad(flat_outputs, flat_diff_args, create_graph=True)
grad_input = tree_unflatten(flat_grad_input, spec)
grad_input = _undo_create_differentiable(grad_input, level)
output = _undo_create_differentiable(output, level)
if aux is not None:
aux = _undo_create_differentiable(aux, level)
if has_aux:
return grad_input, (output, aux)
return grad_input, output
finally:
_grad_decrement_nesting()
return wrapper
def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Callable:
"""``grad`` operator helps computing gradients of :attr:`func` with respect to the
input(s) specified by :attr:`argnums`. This operator can be nested to
compute higher-order gradients.
Args:
func (Callable): A Python function that takes one or more arguments.
Must return a single-element Tensor. If specified :attr:`has_aux` equals ``True``,
function can return a tuple of single-element Tensor and other auxiliary objects:
``(output, aux)``.
argnums (int or Tuple[int]): Specifies arguments to compute gradients with respect to.
:attr:`argnums` can be single integer or tuple of integers. Default: 0.
has_aux (bool): Flag indicating that :attr:`func` returns a tensor and other
auxiliary objects: ``(output, aux)``. Default: False.
Returns:
Function to compute gradients with respect to its inputs. By default, the output of
the function is the gradient tensor(s) with respect to the first argument.
If specified :attr:`has_aux` equals ``True``, tuple of gradients and output auxiliary objects
is returned. If :attr:`argnums` is a tuple of integers, a tuple of output gradients with
respect to each :attr:`argnums` value is returned.
Example of using ``grad``:
>>> from functorch import grad
>>> x = torch.randn([])
>>> cos_x = grad(lambda x: torch.sin(x))(x)
>>> assert torch.allclose(cos_x, x.cos())
>>>
>>> # Second-order gradients
>>> neg_sin_x = grad(grad(lambda x: torch.sin(x)))(x)
>>> assert torch.allclose(neg_sin_x, -x.sin())
When composed with ``vmap``, ``grad`` can be used to compute per-sample-gradients:
>>> from functorch import grad
>>> from functorch import vmap
>>> batch_size, feature_size = 3, 5
>>>
>>> def model(weights, feature_vec):
>>> # Very simple linear model with activation
>>> assert feature_vec.dim() == 1
>>> return feature_vec.dot(weights).relu()
>>>
>>> def compute_loss(weights, example, target):
>>> y = model(weights, example)
>>> return ((y - target) ** 2).mean() # MSELoss
>>>
>>> weights = torch.randn(feature_size, requires_grad=True)
>>> examples = torch.randn(batch_size, feature_size)
>>> targets = torch.randn(batch_size)
>>> inputs = (weights, examples, targets)
>>> grad_weight_per_example = vmap(grad(compute_loss), in_dims=(None, 0, 0))(*inputs)
Example of using ``grad`` with :attr:`has_aux` and :attr:`argnums`:
>>> from functorch import grad
>>> def my_loss_func(y, y_pred):
>>> loss_per_sample = (0.5 * y_pred - y) ** 2
>>> loss = loss_per_sample.mean()
>>> return loss, (y_pred, loss_per_sample)
>>>
>>> fn = grad(my_loss_func, argnums=(0, 1), has_aux=True)
>>> y_true = torch.rand(4)
>>> y_preds = torch.rand(4, requires_grad=True)
>>> out = fn(y_true, y_preds)
>>> > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample))
.. note::
Using PyTorch ``torch.no_grad`` together with ``grad``.
Case 1: Using ``torch.no_grad`` inside a function:
>>> def f(x):
>>> with torch.no_grad():
>>> c = x ** 2
>>> return x - c
In this case, ``grad(f)(x)`` will respect the inner ``torch.no_grad``.
Case 2: Using ``grad`` inside ``torch.no_grad`` context manager:
>>> with torch.no_grad():
>>> grad(f)(x)
In this case, ``grad`` will respect the inner ``torch.no_grad``, but not the
outer one. This is because ``grad`` is a "function transform": its result
should not depend on the result of a context manager outside of ``f``.
"""
@wraps(func)
def wrapper(*args, **kwargs):
results = grad_and_value(func, argnums, has_aux=has_aux)(*args, **kwargs)
if has_aux:
grad, (_, aux) = results
return grad, aux
grad, _ = results
return grad
return wrapper
def _maybe_wrap_functional_tensor(maybe_tensor, level):
if not isinstance(maybe_tensor, torch.Tensor):
return maybe_tensor
wrapped = _wrap_functional_tensor(maybe_tensor, level)
_assert_wrapped_functional(maybe_tensor, wrapped)
return wrapped
def _wrap_all_tensors_to_functional(tensor_pytree, level):
return tree_map(partial(_maybe_wrap_functional_tensor, level=level), tensor_pytree)
def _maybe_unwrap_functional_tensor(maybe_tensor, *, reapply_views: bool):
if not isinstance(maybe_tensor, torch.Tensor):
return maybe_tensor
if not torch._is_functional_tensor(maybe_tensor):
# If it's not a functional tensor, just return it.
# This can happen if we functionalize a fn that returns a global,
# which was never wrapped properly.
return maybe_tensor
return _unwrap_functional_tensor(maybe_tensor, reapply_views)
def _unwrap_all_tensors_from_functional(tensor_pytree, *, reapply_views: bool):
return tree_map(lambda t: _maybe_unwrap_functional_tensor(t, reapply_views=reapply_views), tensor_pytree)
def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable:
"""
functionalize is a transform that can be used to remove (intermediate)
mutations and aliasing from a function, while preserving the function's
semantics.
``functionalize(func)`` returns a new function with the same semantics
as ``func``, but with all intermediate mutations removed.
Every inplace operation performed on an intermediate tensor:
``intermediate.foo_()``
gets replaced by its out-of-place equivalent:
``intermediate_updated = intermediate.foo()``.
functionalize is useful for shipping a pytorch program off to
backends or compilers that aren't able to easily represent
mutations or aliasing operators.
Args:
func (Callable): A Python function that takes one or more arguments.
remove (str): An optional string argument, that takes on either
the value 'mutations' or 'mutations_and_views'.
If 'mutations' is passed in then all mutating operators
will be replaced with their non-mutating equivalents.
If 'mutations_and_views' is passed in, then additionally, all aliasing
operators will be replaced with their non-aliasing equivalents.
Default: 'mutations'.
Returns:
Returns a new "functionalized" function. It takes the same inputs as
:attr:`func`, and has the same behavior, but any mutations
(and optionally aliasing) performed on intermeidate tensors
in the function will be removed.
functionalize will also remove mutations (and views) that were performed on function inputs.
However to preserve semantics, functionalize will "fix up" the mutations after
the transform has finished running, by detecting if any tensor inputs "should have"
been mutated, and copying the new data back to the inputs if necessary.
Example::
>>> import torch
>>> from functorch import make_fx
>>> from functorch.experimental import functionalize
>>>
>>> A function that uses mutations and views, but only on intermediate tensors.
>>> def f(a):
... b = a + 1
... c = b.view(-1)
... c.add_(1)
... return b
...
>>> inpt = torch.randn(2)
>>>
>>> out1 = f(inpt)
>>> out2 = functionalize(f)(inpt)
>>>
>>> # semantics are the same (outputs are equivalent)
>>> print(torch.allclose(out1, out2))
True
>>>
>>> f_traced = make_fx(f)(inpt)
>>> f_no_mutations_traced = make_fx(functionalize(f))(inpt)
>>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt)
>>>
>>> print(f_traced.code)
def forward(self, a_1):
add = torch.ops.aten.add(a_1, 1); a_1 = None
view = torch.ops.aten.view(add, [-1])
add_ = torch.ops.aten.add_(view, 1); view = None
return add
>>> print(f_no_mutations_traced.code)
def forward(self, a_1):
add = torch.ops.aten.add(a_1, 1); a_1 = None
view = torch.ops.aten.view(add, [-1]); add = None
add_1 = torch.ops.aten.add(view, 1); view = None
view_1 = torch.ops.aten.view(add_1, [2]); add_1 = None
return view_1
>>> print(f_no_mutations_and_views_traced.code)
def forward(self, a_1):
add = torch.ops.aten.add(a_1, 1); a_1 = None
view_copy = torch.ops.aten.view_copy(add, [-1]); add = None
add_1 = torch.ops.aten.add(view_copy, 1); view_copy = None
view_copy_1 = torch.ops.aten.view_copy(add_1, [2]); add_1 = None
return view_copy_1
>>> A function that mutates its input tensor
>>> def f(a):
... b = a.view(-1)
... b.add_(1)
... return a
...
>>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt)
>>>
>>> All mutations and views have been removed,
>>> but there is an extra copy_ in the graph to correctly apply the mutation to the input
>>> after the function has completed.
>>> print(f_no_mutations_and_views_traced.code)
def forward(self, a_1):
view_copy = torch.ops.aten.view_copy(a_1, [-1])
add = torch.ops.aten.add(view_copy, 1); view_copy = None
view_copy_1 = torch.ops.aten.view_copy(add, [2]); add = None
copy_ = torch.ops.aten.copy_(a_1, view_copy_1); a_1 = None
return view_copy_1
There are a few "failure modes" for functionalize that are worth calling out:
(1) Like other functorch transforms, `functionalize()` doesn't work with functions
that directly use `.backward()`. The same is true for torch.autograd.grad.
If you want to use autograd, you can compute gradients directly
with `functionalize(grad(f))`.
(2) Like other functorch transforms, `functionalize()` doesn't work with global state.
If you call `functionalize(f)` on a function that takes views / mutations of
non-local state, functionalization will simply no-op and pass the view/mutation
calls directly to the backend.
One way to work around this is is to ensure that any non-local state creation
is wrapped into a larger function, which you then call functionalize on.
(3) `resize_()` has some limitations: functionalize will only work on programs
that use resize_()` as long as the tensor being resized is not a view.
(4) `as_strided()` has some limitations: functionalize will not work on
`as_strided()` calls that result in tensors with overlapping memory.
Finally, a helpful mental model for understanding functionalization is that
most user pytorch programs are writting with the public torch API.
When executed, torch operators are generally decomposed into
our internal C++ "ATen" API.
The logic for functionalization happens entirely at the level of ATen.
Functionalization knows how to take every aliasing operator in ATen,
and map it to its non-aliasing equivalent
(e.g. ``tensor.view({-1})`` -> ``at::view_copy(tensor, {-1})``),
and how to take every mutating operator in ATen,
and map it to its non-mutating equivalent
(e.g. ``tensor.add_(1)`` -> ``at::add(tensor, -1)``),
while tracking aliases and mutations out-of-line to know when to fix things up.
Information about which ATen operators are aliasing or mutating all comes from
https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/native_functions.yaml.
"""
if remove == 'mutations':
reapply_views = True
elif remove == 'mutations_and_views':
reapply_views = False
else:
raise RuntimeError(
f"functionalize(f, remove='mutations'): received invalid argument for remove={remove}."
" Valid options are:\n"
" remove='mutations': all inplace and out= operators will be removed from the program, and replaced"
" with their out-of-place equivalents.\n"
" remove='mutations_and_views': In addition to the above, all aliasing operators {view} will be"
" replaced with their non-aliasing counterparts, {view}_copy.\n"
)
@wraps(func)
def wrapped(*args, **kwargs):
try:
func_level = _func_increment_nesting(reapply_views)
func_args = _wrap_all_tensors_to_functional(args, func_level)
func_kwargs = _wrap_all_tensors_to_functional(kwargs, func_level)
flattened_unwrapped_args, _ = tree_flatten(args)
flattened_wrapped_args, _ = tree_flatten(func_args)
flattened_unwrapped_kwargs, _ = tree_flatten(kwargs)
flattened_wrapped_kwargs, _ = tree_flatten(func_kwargs)
func_outputs = func(*func_args, **func_kwargs)
outputs = _unwrap_all_tensors_from_functional(func_outputs, reapply_views=reapply_views)
flat_outputs, func_out_spec = tree_flatten(outputs)
for a in flattened_wrapped_args + flattened_wrapped_kwargs:
if isinstance(a, torch.Tensor):
# Call sync_() on the inputs, to ensure that any pending mutations have been applied.
torch._sync(a)
# And if any mutations were applied to the inputs, we need to propagate them back to the user.
for unwrapped, wrapped in zip(flattened_unwrapped_args, flattened_wrapped_args):
if isinstance(unwrapped, torch.Tensor) and isinstance(wrapped, torch.Tensor):
_propagate_functional_input_mutation(unwrapped, wrapped)
for unwrapped, wrapped in zip(flattened_unwrapped_kwargs, flattened_wrapped_kwargs):
if isinstance(unwrapped, torch.Tensor) and isinstance(wrapped, torch.Tensor):
_propagate_functional_input_mutation(unwrapped, wrapped)
return outputs
finally:
_func_decrement_nesting()
return wrapped
def _register_jit_decomposition(decomp, use_python=False):
if decomp in decomposition_table_for_jvp:
decomposition_table_used = decomposition_table_for_jvp
elif decomp in decomposition_table:
decomposition_table_used = decomposition_table
else:
raise RuntimeError(f"could not find decomposition for {decomp}")
decomp_fn = decomposition_table_used[decomp]
if use_python:
decomp_fn = torch.jit.ignore(decomp_fn)
sig = inspect.signature(decomp_fn)
# Create a string wrapping the function from the signature
# example output:
# def wrapped_decomp(x: torch.Tensor, y: int, z: int):
# return decomp_fn(x, y, z)
# Thanks copilot!
def get_function_def(sig):
param_def = [f"{param_str}" for param_str in sig.parameters.values()]
param_use = [f"{param_str}" for param_str in sig.parameters.keys()]
return f"def wrapped_decomp({', '.join(param_def)}):\n return decomp_fn({', '.join(param_use)})\n"
f_str = get_function_def(sig)
graph = torch.jit.CompilationUnit(f_str).wrapped_decomp.graph
else:
graph = torch.jit.script(decomp_fn).graph
torch.jit._register_decomposition(decomp, graph)
# use an alternate way to register an operator into the decomposition table
# _register_jit_decomposition doesn't work for some operators, e.g. addr,
# because the Tensor types generated cannot be unioned by torchscript
# decomp should be type OpOverload
vmap_decompositions_lib = torch.library.Library("aten", "IMPL", "FuncTorchBatched")
def _register_python_decomposition_vmap(decomp):
if decomp in decomposition_table:
vmap_decompositions_lib.impl(decomp, decomposition_table[decomp])
else:
raise RuntimeError(f"could not find decomposition for {decomp}")
_register_jit_decomposition(torch.ops.aten.trace.default, use_python=True)
_register_jit_decomposition(torch.ops.aten.nll_loss_backward.default)
_register_jit_decomposition(torch.ops.aten.nll_loss2d_backward.default)
_register_jit_decomposition(torch.ops.aten._log_softmax_backward_data.default)
_register_jit_decomposition(torch.ops.aten._softmax_backward_data.default)
_register_jit_decomposition(torch.ops.aten.log_sigmoid_forward.default)
_register_jit_decomposition(torch.ops.aten.native_layer_norm_backward.default)
_register_jit_decomposition(torch.ops.aten.native_batch_norm_backward.default)
_register_jit_decomposition(torch.ops.aten.cudnn_batch_norm_backward.default)
_register_python_decomposition_vmap(torch.ops.aten.mse_loss_backward.default)
_register_python_decomposition_vmap(torch.ops.aten.addr.default)
| pytorch-master | functorch/functorch/_src/eager_transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import functools
from torch import Tensor
from typing import Any, Callable, Optional, Tuple, Union, List
from torch.utils._pytree import tree_flatten, tree_unflatten, _broadcast_to_and_flatten, TreeSpec
from .pytree_hacks import tree_map_
from functools import partial
from functorch._C import (
_add_batch_dim,
_remove_batch_dim,
_vmap_decrement_nesting,
_vmap_increment_nesting,
)
in_dims_t = Union[int, Tuple]
out_dims_t = Union[int, Tuple[int, ...]]
# Checks that all args-to-be-batched have the same batch dim size
def _validate_and_get_batch_size(
flat_in_dims: List[Optional[int]],
flat_args: List) -> int:
batch_sizes = [arg.size(in_dim) for in_dim, arg in zip(flat_in_dims, flat_args)
if in_dim is not None]
if len(batch_sizes) == 0:
raise ValueError('vmap: Expected at least one Tensor to vmap over')
if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes):
raise ValueError(
f'vmap: Expected all tensors to have the same size in the mapped '
f'dimension, got sizes {batch_sizes} for the mapped dimension')
return batch_sizes[0]
def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
if isinstance(batched_outputs, tuple):
return len(batched_outputs)
return 1
# If value is a tuple, check it has length `num_elements`.
# If value is not a tuple, make a tuple with `value` repeated `num_elements` times
def _as_tuple(value: Any, num_elements: int, error_message_lambda: Callable[[], str]) -> Tuple:
if not isinstance(value, tuple):
return (value,) * num_elements
if len(value) != num_elements:
raise ValueError(error_message_lambda())
return value
def _process_batched_inputs(
in_dims: in_dims_t, args: Tuple, func: Callable
) -> Tuple[int, List[Any], List[Any], TreeSpec]:
if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'expected `in_dims` to be int or a (potentially nested) tuple '
f'matching the structure of inputs, got: {type(in_dims)}.')
if len(args) == 0:
raise ValueError(
f'vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add '
f'inputs, or you are trying to vmap over a function with no inputs. '
f'The latter is unsupported.')
flat_args, args_spec = tree_flatten(args)
flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
if flat_in_dims is None:
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'in_dims is not compatible with the structure of `inputs`. '
f'in_dims has structure {tree_flatten(in_dims)[1]} but inputs '
f'has structure {args_spec}.')
for i, (arg, in_dim) in enumerate(zip(flat_args, flat_in_dims)):
if not isinstance(in_dim, int) and in_dim is not None:
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'Got in_dim={in_dim} for an input but in_dim must be either '
f'an integer dimension or None.')
if isinstance(in_dim, int) and not isinstance(arg, Tensor):
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'Got in_dim={in_dim} for an input but the input is of type '
f'{type(arg)}. We cannot vmap over non-Tensor arguments, '
f'please use None as the respective in_dim')
if in_dim is not None and (in_dim < -arg.dim() or in_dim >= arg.dim()):
raise ValueError(
f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '
f'Got in_dim={in_dim} for some input, but that input is a Tensor '
f'of dimensionality {arg.dim()} so expected in_dim to satisfy '
f'-{arg.dim()} <= in_dim < {arg.dim()}.')
if in_dim is not None and in_dim < 0:
flat_in_dims[i] = in_dim % arg.dim()
return _validate_and_get_batch_size(flat_in_dims, flat_args), flat_in_dims, flat_args, args_spec
# Creates BatchedTensors for every Tensor in arg that should be batched.
# Returns the (potentially) batched arguments and the batch_size.
def _create_batched_inputs(
flat_in_dims: List[Any], flat_args: List[Any], vmap_level: int, args_spec) -> Tuple:
# See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
batched_inputs = [arg if in_dim is None else
_add_batch_dim(arg, in_dim, vmap_level)
for in_dim, arg in zip(flat_in_dims, flat_args)]
return tree_unflatten(batched_inputs, args_spec)
# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
def _unwrap_batched(
batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
out_dims: out_dims_t,
vmap_level: int, batch_size: int, func: Callable) -> Tuple:
flat_batched_outputs, output_spec = tree_flatten(batched_outputs)
for out in flat_batched_outputs:
if isinstance(out, torch.Tensor):
continue
raise ValueError(f'vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return '
f'Tensors, got type {type(out)} as a return.')
def incompatible_error():
raise ValueError(
f'vmap({_get_name(func)}, ..., out_dims={out_dims})(<inputs>): '
f'out_dims is not compatible with the structure of `outputs`. '
f'out_dims has structure {tree_flatten(out_dims)[1]} but outputs '
f'has structure {output_spec}.')
if isinstance(batched_outputs, torch.Tensor):
# Some weird edge case requires us to spell out the following
# see test_out_dims_edge_case
if isinstance(out_dims, int):
flat_out_dims = [out_dims]
elif isinstance(out_dims, tuple) and len(out_dims) == 1:
flat_out_dims = out_dims
out_dims = out_dims[0]
else:
incompatible_error()
else:
flat_out_dims = _broadcast_to_and_flatten(out_dims, output_spec)
if flat_out_dims is None:
incompatible_error()
flat_outputs = [
_remove_batch_dim(batched_output, vmap_level, batch_size, out_dim)
for batched_output, out_dim in zip(flat_batched_outputs, flat_out_dims)
]
return tree_unflatten(flat_outputs, output_spec)
def _check_int(x, func, out_dims):
if isinstance(x, int):
return
raise ValueError(
f'vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be '
f'an int or a python collection of ints representing where in the outputs the '
f'vmapped dimension should appear.')
def _check_out_dims_is_int_or_int_pytree(out_dims: out_dims_t, func: Callable) -> None:
if isinstance(out_dims, int):
return
tree_map_(partial(_check_int, func=func, out_dims=out_dims), out_dims)
def _get_name(func: Callable):
if hasattr(func, '__name__'):
return func.__name__
# Not all callables have __name__, in fact, only static functions/methods do.
# A callable created via functools.partial or an nn.Module, to name some
# examples, don't have a __name__.
return repr(func)
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
# sends those into func, and then unwraps the output BatchedTensors. Operations
# on BatchedTensors perform the batched operations that the user is asking for.
#
# vmap's randomness behavior differs from JAX's, which would require a PRNG key
# to be passed everywhere.
def vmap(
func: Callable,
in_dims: in_dims_t = 0,
out_dims: out_dims_t = 0,
randomness: str = 'error') -> Callable:
"""
vmap is the vectorizing map; ``vmap(func)`` returns a new function that
maps :attr:`func` over some dimension of the inputs. Semantically, vmap
pushes the map into PyTorch operations called by :attr:`func`, effectively
vectorizing those operations.
vmap is useful for handling batch dimensions: one can write a function
:attr:`func` that runs on examples and then lift it to a function that can
take batches of examples with ``vmap(func)``. vmap can also be used to
compute batched gradients when composed with autograd.
Args:
func (function): A Python function that takes one or more arguments.
Must return one or more Tensors.
in_dims (int or nested structure): Specifies which dimension of the
inputs should be mapped over. :attr:`in_dims` should have a
structure like the inputs. If the :attr:`in_dim` for a particular
input is None, then that indicates there is no map dimension.
Default: 0.
out_dims (int or Tuple[int]): Specifies where the mapped dimension
should appear in the outputs. If :attr:`out_dims` is a Tuple, then
it should have one element per output. Default: 0.
randomness (str): Specifies whether the randomness in this
vmap should be the same or different across batches. If 'different',
the randomness for each batch will be different. If 'same', the
randomness will be the same across batches. If 'error', any calls to
random functions will error. Default: 'error'. WARNING: this flag
only applies to random PyTorch operations and does not apply to
Python's random module or numpy randomness.
Returns:
Returns a new "batched" function. It takes the same inputs as
:attr:`func`, except each input has an extra dimension at the index
specified by :attr:`in_dims`. It takes returns the same outputs as
:attr:`func`, except each output has an extra dimension at the index
specified by :attr:`out_dims`.
.. warning:
:func:`vmap` works best with functional-style code. Please do not
perform any side-effects in :attr:`func`, with the exception of
in-place PyTorch operations. Examples of side-effects include mutating
Python data structures and assigning values to variables not captured
in :attr:`func`.
One example of using :func:`vmap` is to compute batched dot products. PyTorch
doesn't provide a batched ``torch.dot`` API; instead of unsuccessfully
rummaging through docs, use :func:`vmap` to construct a new function.
>>> torch.dot # [D], [D] -> []
>>> batched_dot = functorch.vmap(torch.dot) # [N, D], [N, D] -> [N]
>>> x, y = torch.randn(2, 5), torch.randn(2, 5)
>>> batched_dot(x, y)
:func:`vmap` can be helpful in hiding batch dimensions, leading to a simpler
model authoring experience.
>>> batch_size, feature_size = 3, 5
>>> weights = torch.randn(feature_size, requires_grad=True)
>>>
>>> def model(feature_vec):
>>> # Very simple linear model with activation
>>> return feature_vec.dot(weights).relu()
>>>
>>> examples = torch.randn(batch_size, feature_size)
>>> result = functorch.vmap(model)(examples)
:func:`vmap` can also help vectorize computations that were previously difficult
or impossible to batch. One example is higher-order gradient computation.
The PyTorch autograd engine computes vjps (vector-Jacobian products).
Computing a full Jacobian matrix for some function f: R^N -> R^N usually
requires N calls to ``autograd.grad``, one per Jacobian row. Using :func:`vmap`,
we can vectorize the whole computation, computing the Jacobian in a single
call to ``autograd.grad``.
>>> # Setup
>>> N = 5
>>> f = lambda x: x ** 2
>>> x = torch.randn(N, requires_grad=True)
>>> y = f(x)
>>> I_N = torch.eye(N)
>>>
>>> # Sequential approach
>>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0]
>>> for v in I_N.unbind()]
>>> jacobian = torch.stack(jacobian_rows)
>>>
>>> # vectorized gradient computation
>>> def get_vjp(v):
>>> return torch.autograd.grad(y, x, v)
>>> jacobian = functorch.vmap(get_vjp)(I_N)
:func:`vmap` can also be nested, producing an output with multiple batched dimensions
>>> torch.dot # [D], [D] -> []
>>> batched_dot = functorch.vmap(functorch.vmap(torch.dot)) # [N1, N0, D], [N1, N0, D] -> [N1, N0]
>>> x, y = torch.randn(2, 3, 5), torch.randn(2, 3, 5)
>>> batched_dot(x, y) # tensor of size [2, 3]
If the inputs are not batched along the first dimension, :attr:`in_dims` specifies
the dimension that each inputs are batched along as
>>> torch.dot # [N], [N] -> []
>>> batched_dot = functorch.vmap(torch.dot, in_dims=1) # [N, D], [N, D] -> [D]
>>> x, y = torch.randn(2, 5), torch.randn(2, 5)
>>> batched_dot(x, y) # output is [5] instead of [2] if batched along the 0th dimension
If there are multiple inputs each of which is batched along different dimensions,
:attr:`in_dims` must be a tuple with the batch dimension for each input as
>>> torch.dot # [D], [D] -> []
>>> batched_dot = functorch.vmap(torch.dot, in_dims=(0, None)) # [N, D], [D] -> [N]
>>> x, y = torch.randn(2, 5), torch.randn(5)
>>> batched_dot(x, y) # second arg doesn't have a batch dim because in_dim[1] was None
If the input is a Python struct, :attr:`in_dims` must be a tuple containing a struct
matching the shape of the input:
>>> f = lambda dict: torch.dot(dict['x'], dict['y'])
>>> x, y = torch.randn(2, 5), torch.randn(5)
>>> input = {'x': x, 'y': y}
>>> batched_dot = functorch.vmap(f, in_dims=({'x': 0, 'y': None},))
>>> batched_dot(input)
By default, the output is batched along the first dimension. However, it can be batched
along any dimension by using :attr:`out_dims`
>>> f = lambda x: x ** 2
>>> x = torch.randn(2, 5)
>>> batched_pow = functorch.vmap(f, out_dims=1)
>>> batched_pow(x) # [5, 2]
For any function that uses kwargs, the returned function will not batch the kwargs but will
accept kwargs
>>> x = torch.randn([2, 5])
>>> def f(x, scale=4.):
>>> return x * scale
>>>
>>> batched_pow = functorch.vmap(f)
>>> assert torch.allclose(batched_pow(x), x * 4)
>>> batched_pow(x, scale=x) # scale is not batched, output has shape [2, 2, 5]
.. note::
vmap does not provide general autobatching or handle variable-length
sequences out of the box.
"""
_check_randomness_arg(randomness)
@functools.wraps(func)
def wrapped(*args, **kwargs):
_check_out_dims_is_int_or_int_pytree(out_dims, func)
batch_size, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func)
return _flat_vmap(
func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs
)
return wrapped
def chunk_vmap(
func: Callable,
in_dims: in_dims_t = 0,
out_dims: out_dims_t = 0,
randomness: str = 'error',
chunks=2) -> Callable:
"""
chunk_vmap is the vectorizing map (vmap) using chunks of input data. It is a mix of vmap (which vectorizes
everything) and map (which executes things sequentially). ``chunk_vmap`` vectorizes the input with number of
chunks at a time. For more details about vectorizing map, see :func:`vmap`.
Args:
func (function): A Python function that takes one or more arguments.
Must return one or more Tensors.
in_dims (int or nested structure): Specifies which dimension of the
inputs should be mapped over. :attr:`in_dims` should have a
structure like the inputs. If the :attr:`in_dim` for a particular
input is None, then that indicates there is no map dimension.
Default: 0.
out_dims (int or Tuple[int]): Specifies where the mapped dimension
should appear in the outputs. If :attr:`out_dims` is a Tuple, then
it should have one element per output. Default: 0.
randomness (str): Specifies whether the randomness in this
vmap should be the same or different across batches. If 'different',
the randomness for each batch will be different. If 'same', the
randomness will be the same across batches. If 'error', any calls to
random functions will error. Default: 'error'. WARNING: this flag
only applies to random PyTorch operations and does not apply to
Python's random module or numpy randomness.
chunks (int): Number of chunks to use to split the input data. Default is 2.
If equals to 1 then :func:`vmap` is called.
Returns:
Returns a new "batched" function. It takes the same inputs as
:attr:`func`, except each input has an extra dimension at the index
specified by :attr:`in_dims`. It takes returns the same outputs as
:attr:`func`, except each output has an extra dimension at the index
specified by :attr:`out_dims`.
"""
_check_randomness_arg(randomness)
if chunks == 1:
return vmap(func, in_dims=in_dims, out_dims=out_dims, randomness=randomness)
def _get_chunk_flat_args(flat_args_, flat_in_dims_, chunks_):
flat_args_chunks = tuple(
t.chunk(chunks_, dim=in_dim) if in_dim is not None else [t, ] * chunks_
for t, in_dim in zip(flat_args_, flat_in_dims_)
)
# transpose chunk dim and flatten structure
# chunks_flat_args is a list of flatten args
chunks_flat_args = zip(*flat_args_chunks)
return chunks_flat_args
def _flatten_chunks_output(chunks_output_):
# chunks_output is a list of chunked outputs
# flatten chunked outputs:
flat_chunks_output = []
arg_spec_list = []
for output in chunks_output_:
flat_output, arg_specs = tree_flatten(output)
flat_chunks_output.append(flat_output)
arg_spec_list.append(arg_specs)
arg_spec = arg_spec_list[0] # all specs should be the same
# transpose chunk dim and flatten structure
# flat_output_chunks is flat list of chunks
flat_output_chunks = list(zip(*flat_chunks_output))
return flat_output_chunks, arg_spec
@functools.wraps(func)
def wrapped_with_chunks(*args, **kwargs):
_check_out_dims_is_int_or_int_pytree(out_dims, func)
_, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func)
# Chunk flat arguments
chunks_flat_args = _get_chunk_flat_args(flat_args, flat_in_dims, chunks)
# Apply vmap on chunks
chunks_output = []
rs = torch.get_rng_state() if randomness == "same" else None
for flat_args in chunks_flat_args:
batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
if rs is not None:
torch.set_rng_state(rs)
chunks_output.append(
_flat_vmap(
func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs
)
)
flat_output_chunks, arg_spec = _flatten_chunks_output(chunks_output)
# Removing temporary variables helps to reduce memory usage on device like CUDA
del chunks_output
# concat chunks on out_dim
flat_out_dims = _broadcast_to_and_flatten(out_dims, arg_spec)
assert len(flat_out_dims) == len(flat_output_chunks)
flat_output = []
for out_dim in flat_out_dims:
flat_output.append(torch.cat(flat_output_chunks[0], dim=out_dim))
# release source data
del flat_output_chunks[0]
del flat_output_chunks
# finally unflatten the output
return tree_unflatten(flat_output, arg_spec)
return wrapped_with_chunks
# Vmap refactored helper funcions:
def _check_randomness_arg(randomness):
if randomness not in ['error', 'different', 'same']:
raise RuntimeError(f"Only allowed values for randomness are 'error', 'different', or 'same'. Got {randomness}")
def _flat_vmap(func, batch_size, flat_in_dims, flat_args, args_spec, out_dims, randomness, **kwargs):
vmap_level = _vmap_increment_nesting(batch_size, randomness)
try:
batched_inputs = _create_batched_inputs(flat_in_dims, flat_args, vmap_level, args_spec)
batched_outputs = func(*batched_inputs, **kwargs)
return _unwrap_batched(batched_outputs, out_dims, vmap_level, batch_size, func)
finally:
_vmap_decrement_nesting()
| pytorch-master | functorch/functorch/_src/vmap.py |
import time
import os
import json
import torch
from torch.profiler import profile, ProfilerActivity
def synchronize():
pass
class NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs=1,
devices=None, kwargs_for_f=None, kwargs_for_profiler=None):
"""
Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx]
[num_runs] times to [trace_filename].
[activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA.
Return total runtime without the profiler
Outputs to trace_filename
"""
if devices is None:
devices = ["cuda"]
global synchronize
if devices != ["cpu"] and torch.cuda.is_available():
synchronize = torch.cuda.synchronize
if kwargs_for_f is None:
kwargs_for_f = {}
if kwargs_for_profiler is None:
kwargs_for_profiler = {}
with optimize_ctx:
torch.manual_seed(1337)
for _ in range(5): # warmup runs
f(input, **kwargs_for_f)
synchronize()
torch.manual_seed(1337)
t0 = time.perf_counter()
for _ in range(num_runs):
f(input, **kwargs_for_f)
synchronize()
t1 = time.perf_counter()
timing = t1 - t0
with profile(activities=activities, **kwargs_for_profiler) as prof:
with optimize_ctx:
synchronize()
torch.manual_seed(1337)
for _ in range(num_runs):
f(input, **kwargs_for_f)
synchronize()
prof.export_chrome_trace(trace_filename)
return timing
def get_chrome_trace_events(filename):
f = open(filename)
data = json.load(f)
events = data["traceEvents"]
return events
def is_gpu_compute_event(event):
global gpu_pids
return "pid" in event and event["pid"] in gpu_pids and "ph" in event and event["ph"] == "X"
def get_sorted_gpu_events(events):
sorted_gpu_events = []
for event in events:
if(not is_gpu_compute_event(event)):
continue
sorted_gpu_events.append(event)
return sorted(sorted_gpu_events, key=lambda x: x["ts"])
def get_duration(sorted_gpu_events):
if len(sorted_gpu_events) == 0:
return 0
event = sorted_gpu_events[0]
current_end_time = event["ts"] + event["dur"]
total_duration = event["dur"]
for event in sorted_gpu_events[1:]:
start_time = max(event["ts"], current_end_time)
end_time = event["ts"] + event["dur"]
total_duration = total_duration + max(end_time - start_time, 0)
current_end_time = max(current_end_time, end_time)
return total_duration
def get_sorted_gpu_mm_conv_events(events):
def is_mm_conv_event(event):
return "name" in event and ("gemm" in event["name"] or "conv" in event["name"]
or "cutlass" in event["name"] or "wgrad" in event["name"])
gpu_events = get_sorted_gpu_events(events)
sorted_events = []
for event in gpu_events:
if(not is_mm_conv_event(event)):
continue
sorted_events.append(event)
return sorted_events
gpu_pids = []
def compute_utilization(filename: str, total_length: float):
"""
Process the chrome traces outputs by the pytorch profiler to compute GPU Utilization
and percent of times spent on matmal and convolution
Args:
filename(str): Name of chrome traces file produced by pytorch profiler
total_length(float): total length of the process without profiler in second
Return:
tuple: (GPU Utilization, percent of time spent on matmal and convolution)
"""
events = get_chrome_trace_events(filename)
# get pids of GPU events
global gpu_pids
gpu_pids = []
for event in events:
if "name" not in event:
continue
if event["name"] == 'process_labels' and "GPU" in event["args"]["labels"]:
gpu_pids.append(event["pid"])
total_length = total_length * 1e6
sorted_gpu_events = get_sorted_gpu_events(events)
utilization = get_duration(sorted_gpu_events) / total_length
sorted_gpu_mm_conv_events = get_sorted_gpu_mm_conv_events(events)
mm_conv_utilization = get_duration(sorted_gpu_mm_conv_events) / total_length
return utilization, mm_conv_utilization
def benchmark_utilization(f, input, trace_folder, optimize_ctx=None, trace_file_name="tmp_chrome_trace", num_runs=1):
"""
Benchmark the GPU Utilization and percent of time spent on matmal and convolution operations of
running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times.
It will produce a chrome trace file in trace_folder/trace_file_name.json
Example:
```
def f(a):
return a.sum()
a = torch.rand(2**20, device="cuda")
utilization, mm_conv_utilization = benchmark_utilization(f, a, "tmp", trace_file_name = "tmp_chrome_trace")
```
Args:
f: function to benchmark
input: input to :attr:`f`
trace_folder: name of the folder to store the chrome trace
optimize_ctx: the context in which f will run
trace_file_name: name of the dumped chrome trace file, default to "tmp_chrome_trace"
num_runs: number of times to run f, excluding the warm-up runs, default to 1.
Return:
tuple: (GPU Utilization, percent of time spent on matmal and convolution)
"""
isExist = os.path.exists(trace_folder)
if not isExist:
os.makedirs(trace_folder)
print("create folder " + trace_folder)
if optimize_ctx is None:
optimize_ctx = NullContext()
chrome_trace_file_name = os.path.join(trace_folder, trace_file_name + ".json")
total_length = dump_chrome_trace(f, input, chrome_trace_file_name, optimize_ctx,
[ProfilerActivity.CUDA], num_runs=num_runs, devices="cuda")
utilization, mm_conv_utilization = compute_utilization(chrome_trace_file_name, total_length)
return utilization, mm_conv_utilization
| pytorch-master | functorch/functorch/_src/benchmark_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch import Tensor
from typing import List, Tuple
from .named_members_polyfill import _named_parameters, _named_buffers
import copy
# Utilities to make nn.Module "functional"
# In particular the goal is to be able to provide a function that takes as input
# the parameters and evaluate the nn.Module using fixed inputs.
def _del_nested_attr(obj: nn.Module, names: List[str]) -> None:
"""
Deletes the attribute specified by the given list of names.
For example, to delete the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'])
"""
if len(names) == 1:
delattr(obj, names[0])
else:
_del_nested_attr(getattr(obj, names[0]), names[1:])
def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None:
"""
Set the attribute specified by the given list of names to value.
For example, to set the attribute obj.conv.weight,
use _del_nested_attr(obj, ['conv', 'weight'], value)
"""
if len(names) == 1:
setattr(obj, names[0], value)
else:
_set_nested_attr(getattr(obj, names[0]), names[1:], value)
def _get_nested_attr(obj: nn.Module, names: List[str]) -> None:
if len(names) == 1:
return getattr(obj, names[0])
else:
_get_nested_attr(getattr(obj, names[0]), names[1:])
def raise_parameter_tying_error():
raise RuntimeError(
"make_functional(module): we don't yet support models that "
"do parameter tying (also sometimes known as weight sharing). "
"Please try to rewrite your model by replacing all instances of the "
"tied parameter with another and/or comment your support in "
"https://github.com/pytorch/functorch/issues/446")
def create_names_map(named_params, tied_named_params):
"""
named_params is a dictionary of tensors: {'A': A, 'B': B}
tied_named_params is another dictionary of tensors {'A': A, 'B': B, 'B_tied': B}
with potentially tied (or 'duplicated') tensors
This function creates a mapping from the names in named_params to the
names in tied_named_params: {'A': ['A'], 'B': ['B', 'B_tied']}.
"""
named_params = {k: v for k, v in named_params}
tied_named_params = {k: v for k, v in tied_named_params}
tensors_dict_keys = set(named_params.keys())
tied_tensors_dict_keys = set(tied_named_params.keys())
assert tensors_dict_keys.issubset(tied_tensors_dict_keys)
tensor_to_mapping = {}
for key, tensor in named_params.items():
tensor_to_mapping[tensor] = (key, [])
for key, tensor in tied_named_params.items():
assert tensor in tensor_to_mapping
tensor_to_mapping[tensor][1].append(key.split('.'))
result = {key: value for key, value in tensor_to_mapping.values()}
return result
def _extract_members(mod: nn.Module, _named_members, named_members, subclass):
all_named_members = tuple(_named_members(mod, remove_duplicate=False))
named_members = tuple(named_members())
names_map = create_names_map(named_members, all_named_members)
# Remove all the members in the model
memo = {}
for name, p in all_named_members:
if p not in memo:
memo[p] = subclass(torch.empty_like(p, device='meta'))
replacement = memo[p]
_set_nested_attr(mod, name.split("."), replacement)
if len(named_members) == 0:
names, params = (), ()
else:
names, params = zip(*named_members)
return params, names, names_map
def extract_weights(mod: nn.Module):
"""
This function removes all the Parameters from the model and
return them as a tuple as well as their original attribute names.
The weights must be re-loaded with `load_weights` before the model
can be used again.
Note that this function modifies the model in place and after this
call, mod.parameters() will be empty.
"""
return _extract_members(mod, _named_parameters, mod.named_parameters, nn.Parameter)
def extract_buffers(mod: nn.Module):
return _extract_members(mod, _named_buffers, mod.named_buffers, lambda x: x)
def load_weights(mod: nn.Module, names: List[str], params: Tuple[Tensor, ...], as_params=False) -> None:
"""
Reload a set of weights so that `mod` can be used again to perform a forward pass.
Note that the `params` are regular Tensors (that can have history) and so are left
as Tensors. This means that mod.parameters() will still be empty after this call.
"""
for name, p in zip(names, params):
if as_params:
p = nn.Parameter(p)
_del_nested_attr(mod, name.split("."))
_set_nested_attr(mod, name.split("."), p)
def _swap_state(mod: nn.Module, names_map: List[str], elems):
result = []
for (_, attr_names), elem in zip(names_map.items(), elems):
for i, attr_name in enumerate(attr_names):
if i == 0:
result.append(_get_nested_attr(mod, attr_name))
_del_nested_attr(mod, attr_name)
_set_nested_attr(mod, attr_name, elem)
return result
def load_buffers(mod: nn.Module, names: List[str], buffers: Tuple[Tensor, ...], as_params=False) -> None:
for name, p in zip(names, buffers):
_set_nested_attr(mod, name.split("."), p)
def load_state(
model: nn.Module,
weights: List[Tensor], weight_names: List[str],
buffers=(), buffer_names=()):
"""load_state(model, weights, weight_names, buffers=(), buffer_names=()) -> model
load_state takes `weights` and `buffers` and assigns them to the model.
This is the inverse operation of `make_functional_deprecated_v1`.
"""
assert len(weight_names) == len(weights)
load_weights(model, weight_names, weights)
if len(buffers) > 0:
assert len(buffer_names) == len(buffers)
load_buffers(model, buffer_names, buffers)
return model
def make_functional_deprecated_v1(model: nn.Module):
"""make_functional_deprecated_v1(model) -> weights, func, weight_names
Given an nn.Module, make_functional_deprecated_v1 extracts the state (weights)
and returns a functional version of the model, `func`. This makes
it so that it is possible use transforms over the parameters of
`model`.
`func` can be invoked as follows:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, func, _ = make_functional_deprecated_v1(model)
func(weights, (x,))
```
And here is an example of applying the grad transform:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, _, func = make_functional_deprecated_v1(model)
grad_weights = grad(func)(weights, (x,))
```
To put the state back into a model, use `load_state`.
"""
buffers = list(model.buffers())
if len(buffers) > 0:
raise RuntimeError('make_functional_deprecated_v1(model): `model` has buffers. Please use '
'make_functional_with_buffers_deprecated_v1(model) instead.')
weights, descriptors, _ = extract_weights(model)
def fun(weights, data):
mutable_model = copy.deepcopy(model)
load_weights(mutable_model, descriptors, weights)
return mutable_model(*data)
return weights, fun, descriptors
def make_functional_with_buffers_deprecated_v1(model: nn.Module):
"""make_functional_with_buffers_deprecated_v1(model) -> weights, buffers, func, weight_names, buffer_names
Given an nn.Module, make_functional_with_buffers_deprecated_v1 extracts the state (weights and buffers)
and returns a functional version of the model, `func`.
`func` can be invoked as follows:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, buffers, func, _, _ = make_functional_with_buffers_deprecated_v1(model)
func(weights, buffers, (x,))
```
And here is an example of applying the grad transform:
```
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
weights, buffers, func, _, _ = make_functional_with_buffers_deprecated_v1(model)
func(weights, buffers, (x,))
grad_weights = grad(func)(weights, buffers, (x,))
```
To put the state back into a model, use `load_state`.
"""
weights, weight_descriptors, _ = extract_weights(model)
buffers, buf_descriptors, _ = extract_buffers(model)
def fun(weights, buffers, data):
mutable_model = copy.deepcopy(model)
load_weights(mutable_model, weight_descriptors, weights)
load_buffers(mutable_model, buf_descriptors, buffers)
return mutable_model(*data)
return weights, buffers, fun, weight_descriptors, buf_descriptors
class FunctionalModuleWithBuffers(nn.Module):
"""
This is the callable object returned by :func:`make_functional_with_buffers`.
"""
def __init__(self, stateless_model, param_names, buffer_names,
param_names_map, buffer_names_map):
super(FunctionalModuleWithBuffers, self).__init__()
self.stateless_model = stateless_model
self.param_names = param_names
self.buffer_names = buffer_names
self.all_names_map = dict(param_names_map)
self.all_names_map.update(buffer_names_map)
@staticmethod
def _create_from(model, disable_autograd_tracking=False):
# TODO: We don't need to copy the model to create a stateless copy
model_copy = copy.deepcopy(model)
params, param_names, param_names_map = extract_weights(model_copy)
buffers, buffer_names, buffer_names_map = extract_buffers(model_copy)
if disable_autograd_tracking:
for param in params:
param.requires_grad_(False)
return (
FunctionalModuleWithBuffers(model_copy, param_names, buffer_names,
param_names_map, buffer_names_map),
params,
buffers,
)
def forward(self, params, buffers, *args, **kwargs):
# Temporarily load the state back onto self.stateless_model
old_state = _swap_state(
self.stateless_model,
self.all_names_map,
list(params) + list(buffers))
try:
return self.stateless_model(*args, **kwargs)
finally:
# Remove the loaded state on self.stateless_model
_swap_state(self.stateless_model, self.all_names_map, old_state)
class FunctionalModule(nn.Module):
"""
This is the callable object returned by :func:`make_functional`.
"""
def __init__(self, stateless_model, param_names, names_map):
super(FunctionalModule, self).__init__()
self.stateless_model = stateless_model
self.param_names = param_names
self.names_map = names_map
@staticmethod
def _create_from(model, disable_autograd_tracking=False):
# TODO: We don't need to copy the model to create a stateless copy
model_copy = copy.deepcopy(model)
params, param_names, names_map = extract_weights(model_copy)
if disable_autograd_tracking:
for param in params:
param.requires_grad_(False)
return FunctionalModule(model_copy, param_names, names_map), params
def forward(self, params, *args, **kwargs):
# Temporarily load the state back onto self.stateless_model
old_state = _swap_state(self.stateless_model, self.names_map, params)
try:
return self.stateless_model(*args, **kwargs)
finally:
# Remove the loaded state on self.stateless_model
_swap_state(self.stateless_model, self.names_map, old_state)
def make_functional(model: nn.Module, disable_autograd_tracking: bool = False):
"""make_functional(model, disable_autograd_tracking=False) -> func, params
Given a ``torch.nn.Module``, :func:`make_functional` extracts the state
(params) and returns a functional version of the model, ``func``. This
makes it so that it is possible use transforms over the parameters of
``model``.
``func`` can be invoked as follows:
.. code-block:: python
import torch
import torch.nn as nn
from functorch import make_functional
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
func, params = make_functional(model)
func(params, x)
And here is an example of applying the grad transform over the parameters
of a model.
.. code-block:: python
import torch
import torch.nn as nn
from functorch import make_functional, grad
x = torch.randn(4, 3)
t = torch.randn(4, 3)
model = nn.Linear(3, 3)
func, params = make_functional(model)
def compute_loss(params, x, t):
y = func(params, x)
return nn.functional.mse_loss(y, t)
grad_weights = grad(compute_loss)(params, x, t)
If the model has any buffers, please use :func:`make_functional_with_buffers` instead.
Args:
model (torch.nn.Module): Input model.
disable_autograd_tracking (bool): Flag to disable gradients tracking for output parameters.
The returned params are unrelated to the set of params from the original model. If False (default),
the params will have ``requires_grad=True`` on them (aka they will be trackable with regular
PyTorch autograd), matching the requires_grad-ness of the params from the original model.
Otherwise, the returned params will have ``requires_grad=False``. Default, False.
If you plan on using regular PyTorch autograd (e.g., if you want to call ``.backward()`` or
``torch.autograd.grad()``, then set ``disable_autograd_tracking=False``.
Otherwise, if you're only planning on using functorch's gradient transforms,
then please set ``disable_autograd_tracking=True`` to avoid unnecessarily tracking
history with PyTorch autograd.
"""
buffers = list(model.buffers())
if len(buffers) > 0:
raise RuntimeError('make_functional(model): `model` has buffers. Please use '
'make_functional_with_buffers(model) instead.')
return FunctionalModule._create_from(model, disable_autograd_tracking=disable_autograd_tracking)
def make_functional_with_buffers(model: nn.Module, disable_autograd_tracking: bool = False):
"""make_functional_with_buffers(model, disable_autograd_tracking=False) -> func, params, buffers
Given a ``torch.nn.Module``, make_functional_with_buffers extracts the
state (params and buffers) and returns a functional version of the model
``func`` that can be invoked like a function.
``func`` can be invoked as follows:
.. code-block:: python
import torch
import torch.nn as nn
from functorch import make_functional_with_buffers
x = torch.randn(4, 3)
model = nn.Linear(3, 3)
func, params, buffers = make_functional_with_buffers(model)
func(params, buffers, x)
And here is an example of applying the grad transform over the parameters
of a model:
.. code-block:: python
import torch
import torch.nn as nn
from functorch import make_functional_with_buffers, grad
x = torch.randn(4, 3)
t = torch.randn(4, 3)
model = nn.Linear(3, 3)
func, params, buffers = make_functional_with_buffers(model)
def compute_loss(params, buffers, x, t):
y = func(params, buffers, x)
return nn.functional.mse_loss(y, t)
grad_weights = grad(compute_loss)(params, buffers, x, t)
Args:
model (torch.nn.Module): Input model.
disable_autograd_tracking (bool): Flag to disable gradients tracking for output parameters.
The returned params are unrelated to the set of params from the original model. If False (default),
the params will have ``requires_grad=True`` on them (aka they will be trackable with regular
PyTorch autograd), matching the requires_grad-ness of the params from the original model.
Otherwise, the returned params will have ``requires_grad=False``. Default, False.
If you plan on using regular PyTorch autograd (e.g., if you want to call ``.backward()`` or
``torch.autograd.grad()``, then set ``disable_autograd_tracking=False``.
Otherwise, if you're only planning on using functorch's gradient transforms,
then please set ``disable_autograd_tracking=True`` to avoid unnecessarily tracking
history with PyTorch autograd.
"""
return FunctionalModuleWithBuffers._create_from(model, disable_autograd_tracking=disable_autograd_tracking)
def transpose_stack(tuple_of_tuple_of_tensors):
tuple_of_tuple_of_tensors = tuple(zip(*tuple_of_tuple_of_tensors))
results = tuple(torch.stack(shards).detach() for shards in tuple_of_tuple_of_tensors)
return results
def combine_state_for_ensemble(models):
"""combine_state_for_ensemble(models) -> func, params, buffers
Prepares a list of torch.nn.Modules for ensembling with :func:`vmap`.
Given a list of ``M`` ``nn.Modules`` of the same class, stacks all of their
parameters and buffers together to make ``params`` and ``buffers``.
Each parameter and buffer in the result will have an additional dimension
of size ``M``.
:func:`combine_state_for_ensemble` also returns ``func``, a functional
version of one of the models in :attr:`models`. One cannot directly run
``func(params, buffers, *args, **kwargs)`` directly, you probably want to
use ``vmap(func, ...)(params, buffers, *args, **kwargs)``
Here's an example of how to ensemble over a very simple model:
.. code-block:: python
num_models = 5
batch_size = 64
in_features, out_features = 3, 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
data = torch.randn(batch_size, 3)
fmodel, params, buffers = combine_state_for_ensemble(models)
output = vmap(fmodel, (0, 0, None))(params, buffers, data)
assert output.shape == (num_models, batch_size, out_features)
.. warning::
All of the modules being stacked together must be the same (except for
the values of their parameters/buffers). For example, they should be in the
same mode (training vs eval).
This API is subject to change -- we're investigating better ways to
create ensembles and would love your feedback how to improve this.
"""
if len(models) == 0:
raise RuntimeError('combine_state_for_ensemble: Expected at least one model, got 0.')
if not (all(m.training for m in models) or all(not m.training for m in models)):
raise RuntimeError('combine_state_for_ensemble: Expected all models to '
'have the same training/eval mode.')
model0_typ = type(models[0])
if not all(type(m) == model0_typ for m in models):
raise RuntimeError('combine_state_for_ensemble: Expected all models to '
'be of the same class.')
funcs, params, buffers = zip(*[make_functional_with_buffers(model)
for model in models])
params = transpose_stack(params)
buffers = transpose_stack(buffers)
return funcs[0], params, buffers
def functional_init(model_class, ensemble_shape=(), device='cpu'):
def wrapped(*args, **kwargs):
if len(ensemble_shape) >= 2:
raise ValueError('NYI: ensemble_shape with more than 1 element')
if len(ensemble_shape) == 0:
model = model_class(*args, **kwargs).to(device)
return make_functional_deprecated_v1(model)
num_models = ensemble_shape[0]
if num_models <= 0:
raise ValueError(f"num_models {num_models} should be > 0")
# NB: Not very efficient, more of a POC
models = tuple(model_class(*args, **kwargs).to(device)
for _ in range(num_models))
_, fn, names = make_functional_deprecated_v1(model_class(*args, **kwargs))
weights = tuple(make_functional_deprecated_v1(model)[0] for model in models)
weights = tuple(zip(*weights))
weights = tuple(torch.stack(shards).detach() for shards in weights)
return weights, fn, names
return wrapped
def functional_init_with_buffers(model_class, ensemble_shape=(), device='cpu'):
def wrapped(*args, **kwargs):
if len(ensemble_shape) >= 2:
raise ValueError('NYI: ensemble_shape with more than 1 element')
if len(ensemble_shape) == 0:
model = model_class(*args, **kwargs).to(device)
return make_functional_deprecated_v1(model)
num_models = ensemble_shape[0]
if num_models <= 0:
raise ValueError(f"num_models {num_models} should be > 0")
# NB: Not very efficient, more of a POC
models = tuple(model_class(*args, **kwargs).to(device)
for _ in range(num_models))
_, _, fn, weight_names, buffer_names = \
make_functional_with_buffers_deprecated_v1(model_class(*args, **kwargs))
weights, buffers = zip(*tuple(make_functional_with_buffers_deprecated_v1(model)[:2]
for model in models))
weights = tuple(zip(*weights))
weights = tuple(torch.stack(shards).detach() for shards in weights)
buffers = tuple(zip(*buffers))
buffers = tuple(torch.stack(shards).detach() for shards in buffers)
return weights, buffers, fn, weight_names, buffer_names
return wrapped
| pytorch-master | functorch/functorch/_src/make_functional.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
import os
import subprocess
import signal
@contextmanager
def magic_trace(output='trace.fxt', magic_trace_cache='/tmp/magic-trace'):
pid = os.getpid()
if not os.path.exists(magic_trace_cache):
print(f"Downloading magic_trace to: {magic_trace_cache}")
subprocess.run(['wget', '-O', magic_trace_cache, '-q',
'https://github.com/janestreet/magic-trace/releases/download/v1.0.2/magic-trace'])
subprocess.run(['chmod', '+x', magic_trace_cache])
args = [magic_trace_cache, 'attach', '-pid', str(pid), '-o', output]
p = subprocess.Popen(args, stderr=subprocess.PIPE, encoding='utf-8')
while True:
x = p.stderr.readline()
print(x)
if 'Attached' in x:
break
try:
yield
finally:
p.send_signal(signal.SIGINT)
r = p.wait()
print(p.stderr.read())
p.stderr.close()
if r != 0:
raise ValueError(f'magic_trace exited abnormally: {r}')
| pytorch-master | functorch/functorch/dim/magic_trace.py |
import torch
from typing import Union, Sequence
import inspect
import dis
from .tree_map import tree_flatten, tree_map
from .wrap_type import wrap_type
from functorch._C import dim as _C
_C._patch_tensor_class()
dims, DimList, dimlists = _C.dims, _C.DimList, _C.dimlists
class DimensionMismatchError(Exception):
pass
class DimensionBindError(Exception):
pass
from . import op_properties
# use dict to avoid writing C++ bindings for set
pointwise = {t: True for t in op_properties.pointwise}
use_c = True
if not use_c:
from . import reference
class _Tensor:
# fast path around slow wrapping/unwrapping logic for simply queries used
# by the implementation...
@property
def dims(self):
return tuple(d for d in self._levels if isinstance(d, Dim))
def dim(self):
return self.ndim
if use_c:
__torch_function__ = classmethod(_C.__torch_function__)
expand = _C._instancemethod(_C.expand)
else:
__torch_function__ = reference.__torch_function__
expand = reference.expand
index = _C._instancemethod(_C.index)
def __repr__(self):
tensor, levels, ndim = self._tensor, self._levels, self.ndim
return f'{tensor}\nwith dims={tuple(l + ndim if isinstance(l, int) else l for l in levels)} sizes={tuple(tensor.size())}'
TensorLike = (_Tensor, torch.Tensor)
class Dim(_C.Dim, _Tensor):
# note that _C.Dim comes before tensor because we want the Dim API for things like size to take precendence.
# Tensor defines format, but we want to print Dims with special formatting
__format__ = object.__format__
class Tensor(_Tensor, _C.Tensor):
if not use_c:
from_batched = staticmethod(_C.Tensor_from_batched)
from_positional = staticmethod(_C.Tensor_from_positional)
sum = _C._instancemethod(_C.Tensor_sum)
def cat(tensors, dim, new_dim):
n = dims()
return stack(tensors, n, dim).index([n, dim], new_dim)
if use_c:
_wrap = _C._wrap
def _def(name, *args, **kwargs):
orig = getattr(torch.Tensor, name)
setattr(_Tensor, name, _C._instancemethod(_wrap(orig, *args, **kwargs)))
t__getitem__ = _C._instancemethod(_C.__getitem__)
stack = _C.stack
split = _C._instancemethod(_C.split)
else:
_wrap, _def = reference._wrap, reference._def
t__getitem__ = reference.t__getitem__
stack = reference.stack
split = reference.split
# note: there is no python reference
t__setitem__ = _C._instancemethod(_C.__setitem__)
# this is patched in the C API because otherwise torch.Tensor will
# no longer be considered a sequence and things will break
# torch.Tensor.__getitem__ = t__getitem__
_Tensor.__getitem__ = t__getitem__
# torch.Tensor.__setitem__ = t__setitem__
_Tensor.__setitem__ = t__setitem__
torch.Tensor.split = split
_Tensor.split = split
torch.Tensor.expand = _C._instancemethod(_C.expand)
torch.Tensor.index = _C._instancemethod(_C.index)
wrap_type(use_c, _Tensor, torch.Tensor, _Tensor.__torch_function__)
del _Tensor.ndim
if use_c:
_Tensor.permute = _Tensor.order = _C._instancemethod(_C.order)
else:
_Tensor.permute = _Tensor.order = reference.positional
_def('mean')
_def('sum')
_def('all')
_def('amax')
_def('amin')
_def('aminmax')
_def('any')
_def('count_nonzero')
_def('logsumexp')
_def('nanmean')
_def('nansum')
_def('prod')
_def('std', keepdim_offset=2)
_def('var', keepdim_offset=2)
_def('max', single_dim=True)
_def('min', single_dim=True)
_def('argmax', single_dim=True)
_def('argmin', single_dim=True)
_def('kthvalue', single_dim=True)
_def('median', single_dim=True)
_def('nanmedian', single_dim=True)
_def('mode', single_dim=True)
_def('sort', reduce=False)
_def('argsort', reduce=False)
_def('unbind', single_dim=True)
_def('chunk', dim_offset=1, reduce=False)
_def('cummax', single_dim=True, reduce=False)
_def('cummin', single_dim=True, reduce=False)
_def('cumprod', single_dim=True, reduce=False)
_def('cumprod_', single_dim=True, reduce=False)
_def('cumsum', single_dim=True, reduce=False)
_def('cumsum_', single_dim=True, reduce=False)
_def('logcumsumexp', single_dim=True, reduce=False)
_def('renorm', dim_offset=1, single_dim=True, reduce=False)
_def('softmax', single_dim=True, reduce=False)
softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False)
# stuff to handle in the future, because they require special
# binding logic for dims
# cross
# diag_embed
# diagonal
# diagonal_scatter
# diff
# nanquantile
# quantile
# roll
# rot90
# topk (new dimes on output)
# should these all be subsumed by inplace indexing?
# index_add_
# index_add
# index_copy
# index_copy_
# index_fill
# index_fill_
# index_select
# scatter
# scatter_
# scatter_add
# scatter_add_
# scatter_reduce
| pytorch-master | functorch/functorch/dim/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import _Tensor, Tensor
from .reference import _dims, _enable_layers, llist, ltuple
class DelayedMulTensor(_Tensor):
def __init__(self, lhs, rhs):
self._lhs, self._rhs = lhs, rhs
self._data = None
self._levels_data = None
self._has_device = lhs._has_device or rhs._has_device
self._batchtensor_data = None
self._tensor_data = None
@property
def _levels(self):
if self._levels_data is None:
levels = llist(self._lhs._levels)
for l in self._rhs._levels:
if l not in levels:
levels.append(l)
self._levels_data = ltuple(levels)
return self._levels_data
@property
def _batchtensor(self):
if self._batchtensor_data is None:
with _enable_layers(self._levels):
print("bt multiply fallback")
self._batchtensor_data = self._lhs._batchtensor * self._rhs._batchtensor
return self._batchtensor_data
@property
def _tensor(self):
if self._tensor_data is None:
self._tensor_data = Tensor.from_batched(self._batchtensor, self._has_device)._tensor
return self._tensor_data
@property
def ndim(self):
return self._batchtensor.ndim
@property
def dims(self):
return ltuple(super().dims)
def sum(self, dim):
dims = _dims(dim, 0, False, False)
n = ord('a')
all_levels = self._levels
def to_char(d):
return chr(n + all_levels.index(d))
plhs, levelslhs = self._lhs._tensor, self._lhs._levels
prhs, levelsrhs = self._rhs._tensor, self._rhs._levels
new_dims = tuple(d for d in self.dims if d not in dims)
new_levels = [l for l in self._levels if l not in dims]
fmt = ''.join([*(to_char(d) for d in levelslhs), ',',
*(to_char(d) for d in levelsrhs), '->',
*(to_char(d) for d in new_levels)])
result_data = torch.einsum(fmt, (plhs, prhs))
return Tensor.from_positional(result_data, new_levels, True)
| pytorch-master | functorch/functorch/dim/delayed_mul_tensor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
# pointwise operators can go through a faster pathway
tensor_magic_methods = [
'add',
''
]
pointwise_magic_methods_with_reverse = (
'add', 'sub', 'mul', 'floordiv', 'div', 'truediv', 'mod',
'pow', 'lshift', 'rshift', 'and', 'or', 'xor'
)
pointwise_magic_methods = (
*(x for m in pointwise_magic_methods_with_reverse for x in (m, 'r' + m)),
'eq', 'gt', 'le', 'lt', 'ge', 'gt', 'ne', 'neg', 'pos',
'abs', 'invert',
'iadd', 'isub', 'imul', 'ifloordiv', 'idiv',
'itruediv', 'imod', 'ipow', 'ilshift', 'irshift', 'iand',
'ior', 'ixor',
'int', 'long', 'float', 'complex',
)
pointwise_methods = (
*(f'__{m}__' for m in pointwise_magic_methods),
)
pointwise = (
*(getattr(torch.Tensor, m) for m in pointwise_methods),
torch.nn.functional.dropout,
torch.where,
torch.Tensor.abs,
torch.abs,
torch.Tensor.acos,
torch.acos,
torch.Tensor.acosh,
torch.acosh,
torch.Tensor.add,
torch.add,
torch.Tensor.addcdiv,
torch.addcdiv,
torch.Tensor.addcmul,
torch.addcmul,
torch.Tensor.addr,
torch.addr,
torch.Tensor.angle,
torch.angle,
torch.Tensor.asin,
torch.asin,
torch.Tensor.asinh,
torch.asinh,
torch.Tensor.atan,
torch.atan,
torch.Tensor.atan2,
torch.atan2,
torch.Tensor.atanh,
torch.atanh,
torch.Tensor.bitwise_and,
torch.bitwise_and,
torch.Tensor.bitwise_left_shift,
torch.bitwise_left_shift,
torch.Tensor.bitwise_not,
torch.bitwise_not,
torch.Tensor.bitwise_or,
torch.bitwise_or,
torch.Tensor.bitwise_right_shift,
torch.bitwise_right_shift,
torch.Tensor.bitwise_xor,
torch.bitwise_xor,
torch.Tensor.ceil,
torch.ceil,
torch.celu,
torch.nn.functional.celu,
torch.Tensor.clamp,
torch.clamp,
torch.Tensor.clamp_max,
torch.clamp_max,
torch.Tensor.clamp_min,
torch.clamp_min,
torch.Tensor.copysign,
torch.copysign,
torch.Tensor.cos,
torch.cos,
torch.Tensor.cosh,
torch.cosh,
torch.Tensor.deg2rad,
torch.deg2rad,
torch.Tensor.digamma,
torch.digamma,
torch.Tensor.div,
torch.div,
torch.dropout,
torch.nn.functional.dropout,
torch.nn.functional.elu,
torch.Tensor.eq,
torch.eq,
torch.Tensor.erf,
torch.erf,
torch.Tensor.erfc,
torch.erfc,
torch.Tensor.erfinv,
torch.erfinv,
torch.Tensor.exp,
torch.exp,
torch.Tensor.exp2,
torch.exp2,
torch.Tensor.expm1,
torch.expm1,
torch.feature_dropout,
torch.Tensor.float_power,
torch.float_power,
torch.Tensor.floor,
torch.floor,
torch.Tensor.floor_divide,
torch.floor_divide,
torch.Tensor.fmod,
torch.fmod,
torch.Tensor.frac,
torch.frac,
torch.Tensor.frexp,
torch.frexp,
torch.Tensor.gcd,
torch.gcd,
torch.Tensor.ge,
torch.ge,
torch.nn.functional.gelu,
torch.nn.functional.glu,
torch.Tensor.gt,
torch.gt,
torch.Tensor.hardshrink,
torch.hardshrink,
torch.nn.functional.hardshrink,
torch.nn.functional.hardsigmoid,
torch.nn.functional.hardswish,
torch.nn.functional.hardtanh,
torch.Tensor.heaviside,
torch.heaviside,
torch.Tensor.hypot,
torch.hypot,
torch.Tensor.i0,
torch.i0,
torch.Tensor.igamma,
torch.igamma,
torch.Tensor.igammac,
torch.igammac,
torch.Tensor.isclose,
torch.isclose,
torch.Tensor.isfinite,
torch.isfinite,
torch.Tensor.isinf,
torch.isinf,
torch.Tensor.isnan,
torch.isnan,
torch.Tensor.isneginf,
torch.isneginf,
torch.Tensor.isposinf,
torch.isposinf,
torch.Tensor.isreal,
torch.isreal,
torch.Tensor.kron,
torch.kron,
torch.Tensor.lcm,
torch.lcm,
torch.Tensor.ldexp,
torch.ldexp,
torch.Tensor.le,
torch.le,
torch.nn.functional.leaky_relu,
torch.Tensor.lerp,
torch.lerp,
torch.Tensor.lgamma,
torch.lgamma,
torch.Tensor.log,
torch.log,
torch.Tensor.log10,
torch.log10,
torch.Tensor.log1p,
torch.log1p,
torch.Tensor.log2,
torch.log2,
torch.nn.functional.logsigmoid,
torch.Tensor.logical_and,
torch.logical_and,
torch.Tensor.logical_not,
torch.logical_not,
torch.Tensor.logical_or,
torch.logical_or,
torch.Tensor.logical_xor,
torch.logical_xor,
torch.Tensor.logit,
torch.logit,
torch.Tensor.lt,
torch.lt,
torch.Tensor.maximum,
torch.maximum,
torch.Tensor.minimum,
torch.minimum,
torch.nn.functional.mish,
torch.Tensor.mvlgamma,
torch.mvlgamma,
torch.Tensor.nan_to_num,
torch.nan_to_num,
torch.Tensor.ne,
torch.ne,
torch.Tensor.neg,
torch.neg,
torch.Tensor.nextafter,
torch.nextafter,
torch.Tensor.outer,
torch.outer,
torch.polar,
torch.Tensor.polygamma,
torch.polygamma,
torch.Tensor.positive,
torch.positive,
torch.Tensor.pow,
torch.pow,
torch.Tensor.prelu,
torch.prelu,
torch.nn.functional.prelu,
torch.Tensor.rad2deg,
torch.rad2deg,
torch.Tensor.reciprocal,
torch.reciprocal,
torch.Tensor.relu,
torch.relu,
torch.nn.functional.relu,
torch.nn.functional.relu6,
torch.Tensor.remainder,
torch.remainder,
torch.Tensor.round,
torch.round,
torch.rrelu,
torch.nn.functional.rrelu,
torch.Tensor.rsqrt,
torch.rsqrt,
torch.rsub,
torch.selu,
torch.nn.functional.selu,
torch.Tensor.sgn,
torch.sgn,
torch.Tensor.sigmoid,
torch.sigmoid,
torch.nn.functional.sigmoid,
torch.Tensor.sign,
torch.sign,
torch.Tensor.signbit,
torch.signbit,
torch.nn.functional.silu,
torch.Tensor.sin,
torch.sin,
torch.Tensor.sinc,
torch.sinc,
torch.Tensor.sinh,
torch.sinh,
torch.nn.functional.softplus,
torch.nn.functional.softshrink,
torch.Tensor.sqrt,
torch.sqrt,
torch.Tensor.square,
torch.square,
torch.Tensor.sub,
torch.sub,
torch.Tensor.tan,
torch.tan,
torch.Tensor.tanh,
torch.tanh,
torch.nn.functional.tanh,
torch.threshold,
torch.nn.functional.threshold,
torch.trapz,
torch.Tensor.true_divide,
torch.true_divide,
torch.Tensor.trunc,
torch.trunc,
torch.Tensor.xlogy,
torch.xlogy,
torch.rand_like,
)
| pytorch-master | functorch/functorch/dim/op_properties.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch._C import (
_vmap_add_layers,
_vmap_remove_layers,
)
from contextlib import contextmanager
_enabled = False
@contextmanager
def _enable_layers(dims):
global _enabled
assert not _enabled
input = list(sorted((d._level, d.size) for d in dims if not isinstance(d, int)))
n = len(input)
try:
_vmap_add_layers(input)
_enabled = True
yield
finally:
_enabled = False
_vmap_remove_layers(n)
| pytorch-master | functorch/functorch/dim/batch_tensor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from types import FunctionType, BuiltinMethodType, MethodDescriptorType, WrapperDescriptorType, GetSetDescriptorType
from functorch._C import dim as _C
_wrap_method = _C._wrap_method
FUNC_TYPES = (FunctionType, MethodDescriptorType, BuiltinMethodType, WrapperDescriptorType)
PROPERTY_TYPES = (GetSetDescriptorType, property)
def _py_wrap_method(orig, __torch_function__):
def impl(*args, **kwargs):
return __torch_function__(orig, None, args, kwargs)
return impl
def wrap_type(use_c, to_patch, pattern, __torch_function__):
if use_c:
wrap_method = _wrap_method
else:
wrap_method = _py_wrap_method
all = {}
for t in reversed(pattern.mro()[:-1]): # skip object
all.update(t.__dict__)
def wrap_attr(orig):
return property(wrap_method(orig.__get__, __torch_function__))
for name, obj in all.items():
if name in ('__dict__', '__new__', '__init__', '__repr__', '__weakref__', '__doc__', '__module__', '__dir__'):
continue
# skip things that have been overloaded
# things that come from object like `__eq__` still need to be patched, however.
if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr(object, name, None):
continue
if isinstance(obj, FUNC_TYPES):
setattr(to_patch, name, wrap_method(obj, __torch_function__))
elif isinstance(obj, PROPERTY_TYPES):
setattr(to_patch, name, wrap_attr(obj))
| pytorch-master | functorch/functorch/dim/wrap_type.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# reference python implementations for C ops
import torch
from .tree_map import tree_flatten, tree_map
from .batch_tensor import _enable_layers
from . import op_properties
from functorch._C import dim as _C
DimList = _C.DimList
from functools import reduce
import operator
# use dict to avoid writing C++ bindings for set
pointwise = set(op_properties.pointwise)
def prod(x):
return reduce(operator.mul, x, 1)
def _wrap_dim(d, N, keepdim):
from . import Dim
if isinstance(d, Dim):
assert not keepdim, "cannot preserve first-class dimensions with keepdim=True"
return d
elif d >= 0:
return d - N
else:
return d
def _dims(d, N, keepdim, single_dim):
from . import Dim
if isinstance(d, (Dim, int)):
return ltuple((_wrap_dim(d, N, keepdim),))
assert not single_dim, f"expected a single dimension or int but found: {d}"
return ltuple(_wrap_dim(x, N, keepdim) for x in d)
def _bind_dims_to_size(lhs_size, rhs, lhs_debug):
from . import DimensionMismatchError
not_bound = tuple((i, r) for i, r in enumerate(rhs) if not r.is_bound)
if len(not_bound) == 1:
idx, d = not_bound[0]
rhs_so_far = prod(r.size for r in rhs if r.is_bound)
if lhs_size % rhs_so_far != 0:
rhs_s = tuple('?' if not r.is_bound else str(r.size) for r in rhs)
raise DimensionMismatchError(f"inferred dimension does not evenly fit into larger dimension: {lhs_size} vs {rhs_s}")
new_size = lhs_size // rhs_so_far
d.size = new_size
elif len(not_bound) > 1:
rhs_s = tuple('?' if not r.is_bound else str(r.size) for r in rhs)
raise DimensionMismatchError(f"cannot infer the size of two dimensions at once: {rhs} with sizes {rhs_s}")
else:
rhs_size = prod(r.size for r in rhs)
if lhs_size != rhs_size:
raise DimensionMismatchError(
f"Dimension sizes to do not match ({lhs_size} != {rhs_size}) when matching {lhs_debug} to {rhs}")
def _tensor_levels(inp):
from . import _Tensor
if isinstance(inp, _Tensor):
return inp._tensor, llist(inp._levels), inp._has_device
else:
return inp, llist(range(-inp.ndim, 0)), True
def _match_levels(v, from_levels, to_levels):
view = []
permute = []
requires_view = False
size = v.size()
for t in to_levels:
try:
idx = from_levels.index(t)
permute.append(idx)
view.append(size[idx])
except ValueError:
view.append(1)
requires_view = True
if permute != list(range(len(permute))):
v = v.permute(*permute)
if requires_view:
v = v.view(*view)
return v
# make a single dimension positional but do not permute it,
# used to do multi-tensor operators where the dim being acted on
# should not physically move if possible
def _positional_no_permute(self, dim, expand_dim=False):
from . import Tensor
ptensor, levels = self._tensor, llist(self._levels)
try:
idx = levels.index(dim)
except ValueError:
if not expand_dim:
raise
idx = 0
ptensor = ptensor.expand(dim.size, *ptensor.size())
levels.insert(0, 0)
idx_batched = 0
for i in range(idx):
if isinstance(levels[i], int):
levels[i] -= 1
idx_batched += 1
levels[idx] = -idx_batched - 1
return Tensor.from_positional(ptensor, levels, self._has_device), idx_batched
def seq(a, b):
from . import Dim
if isinstance(a, Dim) != isinstance(b, Dim):
return False
if isinstance(a, Dim):
return a is b
else:
return a == b
class isin:
def __contains__(self, item):
for x in self:
if seq(item, x):
return True
return False
def index(self, item):
for i, x in enumerate(self):
if seq(item, x):
return i
raise ValueError
class llist(isin, list):
pass
class ltuple(isin, tuple):
pass
empty_dict = {}
@classmethod
def __torch_function__(self, orig, cls, args, kwargs=empty_dict):
from . import _Tensor, TensorLike, Tensor
from .delayed_mul_tensor import DelayedMulTensor
if orig is torch.Tensor.__mul__:
lhs, rhs = args
if isinstance(lhs, _Tensor) and isinstance(rhs, _Tensor) and lhs.ndim == 0 and rhs.ndim == 0:
return DelayedMulTensor(lhs, rhs)
all_dims = llist()
flat_args, unflatten = tree_flatten((args, kwargs))
device_holding_tensor = None
for f in flat_args:
if isinstance(f, _Tensor):
if f._has_device:
device_holding_tensor = f._batchtensor
for d in f.dims:
if d not in all_dims:
all_dims.append(d)
def unwrap(t):
if isinstance(t, _Tensor):
r = t._batchtensor
if device_holding_tensor is not None and not t._has_device:
r = r.to(device=device_holding_tensor.device)
return r
return t
if orig in pointwise:
result_levels = llist()
arg_levels = llist()
to_expand = []
for i, f in enumerate(flat_args):
if isinstance(f, TensorLike):
ptensor, levels, _ = _tensor_levels(f)
if isinstance(f, _Tensor) and not f._has_device and device_holding_tensor is not None:
ptensor = ptensor.to(device=device_holding_tensor.device)
flat_args[i] = ptensor
for l in levels:
if l not in result_levels:
result_levels.append(l)
to_expand.append((i, levels))
for i, levels in to_expand:
flat_args[i] = _match_levels(flat_args[i], levels, result_levels)
args, kwargs = unflatten(flat_args)
result = orig(*args, **kwargs)
def wrap(t):
if isinstance(t, TensorLike):
return Tensor.from_positional(t, result_levels, device_holding_tensor is not None)
return t
return tree_map(wrap, result)
else:
def wrap(t):
if isinstance(t, TensorLike):
return Tensor.from_batched(t, device_holding_tensor is not None)
return t
with _enable_layers(all_dims):
print(f"batch_tensor for {orig}")
args, kwargs = unflatten(unwrap(f) for f in flat_args)
result = orig(*args, **kwargs)
# print("END", orig)
return tree_map(wrap, result)
def positional(self, *dims):
from . import Dim, Tensor
ptensor, levels = self._tensor, llist(self._levels)
flat_dims = llist()
view = []
needs_view = False
ndim = self.ndim
for d in dims:
if isinstance(d, DimList):
flat_dims.extend(d)
view.extend(e.size for e in d)
elif isinstance(d, Dim):
flat_dims.append(d)
view.append(d.size)
elif isinstance(d, int):
d = _wrap_dim(d, ndim, False)
flat_dims.append(d)
view.append(ptensor.size(d))
else:
flat_dims.extend(d)
view.append(prod(e.size for e in d))
needs_view = True
permute = list(range(len(levels)))
nflat = len(flat_dims)
for i, d in enumerate(flat_dims):
try:
idx = levels.index(d)
except ValueError as e:
raise DimensionBindError(f'tensor of dimensions {self.dims} does not contain dim {d}') from e
p = permute[idx]
del levels[idx]
del permute[idx]
levels.insert(i, 0)
permute.insert(i, p)
ptensor = ptensor.permute(*permute)
seen = 0
for i in range(len(levels) - 1, -1, -1):
if isinstance(levels[i], int):
seen += 1
levels[i] = -seen
result = Tensor.from_positional(ptensor, levels, self._has_device)
if needs_view:
result = result.reshape(*view, *result.size()[len(flat_dims):])
return result
def _contains_dim(input):
from . import Dim
for i in input:
if isinstance(i, Dim):
return True
def expand(self, *sizes):
if not _contains_dim(sizes):
return self.__torch_function__(torch.Tensor.expand, None, (self, *sizes))
dims = sizes
sizes = [d.size for d in dims] + [-1] * self.ndim
self = self.expand(*sizes)
return self[dims]
_not_present = object()
def _getarg(name, offset, args, kwargs, default):
if len(args) > offset:
return args[offset]
return kwargs.get(name, default)
def _patcharg(name, offset, args, kwargs, value):
if len(args) > offset:
args[offset] = value
else:
kwargs[name] = value
def _wrap(orig, dim_offset=0, keepdim_offset=1, dim_name='dim', single_dim=False, reduce=True):
from . import TensorLike, Dim, Tensor
def fn(self, *args, **kwargs):
dim = _getarg(dim_name, dim_offset, args, kwargs, _not_present)
if dim is _not_present or (single_dim and not isinstance(dim, Dim)):
with _enable_layers(self.dims):
print(f"dim fallback batch_tensor for {orig}")
return Tensor.from_batched(orig(self._batchtensor, *args, **kwargs), self._has_device)
keepdim = _getarg('keepdim', keepdim_offset, args, kwargs, False) if reduce else False
t, levels = self._tensor, llist(self._levels)
dims = _dims(dim, self._batchtensor.ndim, keepdim, single_dim)
dim_indices = tuple(levels.index(d) for d in dims)
if reduce and not keepdim:
new_levels = [l for i, l in enumerate(levels) if i not in dim_indices]
else:
new_levels = levels
if len(dim_indices) == 1:
dim_indices = dim_indices[0] # so that dims that really only take a single argument work...
args = list(args)
_patcharg(dim_name, dim_offset, args, kwargs, dim_indices)
def wrap(t):
if isinstance(t, TensorLike):
return Tensor.from_positional(t, new_levels, self._has_device)
return t
with _enable_layers(new_levels):
print(f"dim used batch_tensor for {orig}")
r = orig(t, *args, **kwargs)
return tree_map(wrap, r)
return fn
def _def(name, *args, **kwargs):
from . import _Tensor
orig = getattr(torch.Tensor, name)
setattr(_Tensor, name, _wrap(orig, *args, **kwargs))
no_slice = slice(None)
_orig_getitem = torch.Tensor.__getitem__
class dim_tracker:
def __init__(self):
self.dims = llist()
self.count = []
def record(self, d):
if d not in self.dims:
self.dims.append(d)
self.count.append(1)
def __getitem__(self, d):
return self.count[self.dims.index(d)]
def t__getitem__(self, input):
from . import Dim, DimensionBindError, _Tensor, TensorLike, DimList, Tensor
# * bail to original example if we have a single non-Dim tensor, or a non-tensor
# * locate ... or an unbound tensor list, and determine its size, bind dim list
# (remember that None does not count to the total dim count)
# * bind simple dims and dim-packs to their sizes, count the number of uses of each dim,
# produce the re-view if needed
# * for each single-use dim index, replace with no_slice and mark that it will be added
# (keep track of whether we have to call super)
# * call super if needed
# * if we have dims to bind, bind them (it will help if we eliminated ... and None before)
# this handles bool indexing handling, as well as some other simple cases.
is_simple = (not isinstance(input, Dim) and
not isinstance(input, (tuple, list)) and
# WAR for functorch bug where zero time tensors in getitem are not handled correctly.
not (isinstance(input, TensorLike) and input.ndim == 0))
if is_simple:
if isinstance(self, _Tensor):
return _Tensor.__torch_function__(_orig_getitem, None, (self, input))
else:
return _orig_getitem(self, input)
# can further optimize this case
if not isinstance(input, tuple):
input = [input]
else:
input = list(input)
dims_indexed = 0
expanding_object = None
dimlists = []
for i, s in enumerate(input):
if s is ... or isinstance(s, DimList) and not s.is_bound:
if expanding_object is not None:
msg = 'at most one ... or unbound dimension list can exist in indexing list but' \
f' found 2 at offsets {i} and {expanding_object}'
raise DimensionBindError(msg)
expanding_object = i
if isinstance(s, DimList):
dims_indexed += len(s) if s.is_bound else 0
dimlists.append(i)
elif s is not None and s is not ...:
dims_indexed += 1
ndim = self.ndim
if dims_indexed > ndim:
raise IndexError(f'at least {dims_indexed} indices were supplied but the tensor only has {ndim} dimensions.')
if expanding_object is not None:
expanding_ndims = ndim - dims_indexed
obj = input[expanding_object]
if obj is ...:
input[expanding_object:expanding_object + 1] = [no_slice] * expanding_ndims
else:
obj.bind_len(expanding_ndims)
# flatten the dimslists into the indexing
for i in reversed(dimlists):
input[i:i + 1] = input[i]
dims_indexed = 0
requires_view = False
size = self.size()
view_sizes = []
dims_seen = dim_tracker()
def add_dims(t):
if not isinstance(t, _Tensor):
return
for d in t.dims:
dims_seen.record(d)
add_dims(self)
dim_packs = []
for i, idx in enumerate(input):
if idx is None:
input[i] = no_slice
view_sizes.append(1)
requires_view = True
else:
sz = size[dims_indexed]
if isinstance(idx, Dim):
idx.size = sz
dims_seen.record(idx)
view_sizes.append(sz)
elif isinstance(idx, (tuple, list)) and idx and isinstance(idx[0], Dim):
for d in idx:
dims_seen.record(idx)
_bind_dims_to_size(sz, idx, f'offset {i}')
view_sizes.extend(d.size for d in idx)
requires_view = True
dim_packs.append(i)
else:
add_dims(idx)
view_sizes.append(sz)
dims_indexed += 1
if requires_view:
self = self.view(*view_sizes)
for i in reversed(dim_packs):
input[i:i + 1] = input[i]
# currenty:
# input is flat, containing either Dim, or Tensor, or something valid for standard indexing
# self may have first-class dims as well.
# to index:
# drop the first class dims from self, they just become direct indices of their positions
# figure out the dimensions of the indexing tensors: union of all the dims in the tensors in the index.
# these dimensions will appear and need to be bound at the first place tensor occures
if isinstance(self, _Tensor):
ptensor_self, levels = self._tensor, list(self._levels)
# indices to ptensor rather than self which has first-class dimensions
input_it = iter(input)
flat_inputs = [next(input_it) if isinstance(l, int) else l for l in levels]
has_device = self._has_device
to_pad = 0
else:
ptensor_self, flat_inputs = self, input
to_pad = ptensor_self.ndim - len(flat_inputs)
has_device = True
result_levels = []
index_levels = []
tensor_insert_point = None
to_expand = {}
requires_getindex = False
for i, inp in enumerate(flat_inputs):
if isinstance(inp, Dim) and dims_seen[inp] == 1:
flat_inputs[i] = no_slice
result_levels.append(inp)
elif isinstance(inp, TensorLike):
requires_getindex = True
if tensor_insert_point is None:
tensor_insert_point = len(result_levels)
ptensor, levels, _ = _tensor_levels(inp)
to_expand[i] = levels
flat_inputs[i] = ptensor
for l in levels:
if l not in index_levels:
index_levels.append(l)
else:
requires_getindex = True
result_levels.append(0)
if tensor_insert_point is not None:
result_levels[tensor_insert_point:tensor_insert_point] = index_levels
for i, levels in to_expand.items():
flat_inputs[i] = _match_levels(flat_inputs[i], levels, index_levels)
if requires_getindex:
result = _orig_getitem(ptensor_self, flat_inputs)
else:
result = ptensor_self
next_positional = -1
if to_pad > 0:
result_levels.extend([0] * to_pad)
for i, r in enumerate(reversed(result_levels)):
if isinstance(r, int):
result_levels[-1 - i] = next_positional
next_positional -= 1
return Tensor.from_positional(result, result_levels, has_device)
# XXX - dim is optional and can be the outer-most dimension...
def stack(tensors, new_dim, dim=0, out=None):
if isinstance(dim, int):
return torch.stack(tensors, dim, out).index(dim, new_dim)
index = None
if out is not None:
out, index = _positional_no_permute(out, dim, expand_dim=True)
ptensors = []
for t in tensors:
pt, pi = _positional_no_permute(t, dim, expand_dim=True)
if index is not None and pi != index:
pt = pt.move_dim(pi, index)
else:
index = pi
ptensors.append(pt)
pr = torch.stack(ptensors, index, out=out)
return pr.index((index, index + 1), (new_dim, dim))
_orig_split = torch.Tensor.split
def split(self, split_size_or_sections, dim=0):
from . import Dim, _Tensor
if isinstance(split_size_or_sections, int) or any(isinstance(t, int) for t in split_size_or_sections):
if isinstance(dim, Dim):
raise ValueError('when dim is specified as a Dim object, split sizes must also be dimensions.')
return _orig_split(self, split_size_or_sections, dim=dim)
if isinstance(dim, Dim):
assert isinstance(self, _Tensor), f"Tensor does not have dimension {dim}"
self, dim = _positional_no_permute(self, dim)
size = self.size(dim)
total_bound_size = 0
unbound = []
sizes = []
for i, d in enumerate(split_size_or_sections):
if d.is_bound:
sizes.append(d.size)
total_bound_size += d.size
else:
sizes.append(0)
unbound.append(i)
if unbound:
assert total_bound_size <= size, \
f"result dimensions are larger than original: {total_bound_size} vs {size} ({split_size_or_sections})"
remaining_size = size - total_bound_size
chunk_size = -(-remaining_size // len(unbound))
for u in unbound:
sz = min(chunk_size, remaining_size)
split_size_or_sections[u].size = sz
sizes[u] = sz
remaining_size -= sz
else:
assert total_bound_size == size, \
f"result dimensions do not match original: {total_bound_size} vs {size} ({split_size_or_sections})"
return tuple(t.index(dim, d) for d, t in zip(split_size_or_sections, _orig_split(self, sizes, dim=dim)))
| pytorch-master | functorch/functorch/dim/reference.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
_vmap_levels = []
@dataclass
class LevelInfo:
level: int
alive: bool = True
class Dim:
def __init__(self, name: str, size: Union[None, int] = None):
self.name = name
self._size = None
self._vmap_level = None
if size is not None:
self.size = size
def __del__(self):
if self._vmap_level is not None:
_vmap_active_levels[self._vmap_stack].alive = False
while not _vmap_levels[-1].alive and current_level() == _vmap_levels[-1].level:
_vmap_decrement_nesting()
_vmap_levels.pop()
@property
def size(self):
assert self.is_bound
return self._size
@size.setter
def size(self, size: int):
if self._size is None:
self._size = size
self._vmap_level = _vmap_increment_nesting(size, 'same')
self._vmap_stack = len(_vmap_levels)
_vmap_levels.append(LevelInfo(self._vmap_level))
elif self._size != size:
raise DimensionBindError(
f"Dim '{self}' previously bound to a dimension of size {self._size} cannot bind to a dimension of size {size}")
@property
def is_bound(self):
return self._size is not None
def __repr__(self):
return self.name
def extract_name(inst):
assert inst.opname == 'STORE_FAST' or inst.opname == 'STORE_NAME'
return inst.argval
_cache = {}
def dims(lists=0):
frame = inspect.currentframe()
assert frame is not None
calling_frame = frame.f_back
assert calling_frame is not None
code, lasti = calling_frame.f_code, calling_frame.f_lasti
key = (code, lasti)
if key not in _cache:
first = lasti // 2 + 1
instructions = list(dis.get_instructions(calling_frame.f_code))
unpack = instructions[first]
if unpack.opname == 'STORE_FAST' or unpack.opname == 'STORE_NAME':
# just a single dim, not a list
name = unpack.argval
ctor = Dim if lists == 0 else DimList
_cache[key] = lambda: ctor(name=name)
else:
assert unpack.opname == 'UNPACK_SEQUENCE'
ndims = unpack.argval
names = tuple(extract_name(instructions[first + 1 + i]) for i in range(ndims))
first_list = len(names) - lists
_cache[key] = lambda: tuple(Dim(n) if i < first_list else DimList(name=n) for i, n in enumerate(names))
return _cache[key]()
def _dim_set(positional, arg):
def convert(a):
if isinstance(a, Dim):
return a
else:
assert isinstance(a, int)
return positional[a]
if arg is None:
return positional
elif not isinstance(arg, (Dim, int)):
return tuple(convert(a) for a in arg)
else:
return (convert(arg),)
| pytorch-master | functorch/functorch/dim/dim.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch._C import dim
tree_flatten = dim.tree_flatten
def tree_map(fn, tree):
vs, unflatten = tree_flatten(tree)
return unflatten(fn(v) for v in vs)
| pytorch-master | functorch/functorch/dim/tree_map.py |
from .._src.python_key import pythonkey_decompose
from .._src.decompositions import register_decomposition, decomposition_table, get_decompositions
from .._src.fx_minifier import minifier
from .._src.aot_autograd import (
aot_function,
aot_module,
compiled_function,
compiled_module,
num_of_recompilations,
clear_compile_cache,
aot_module_simplified,
get_graph_being_compiled,
make_boxed_func,
make_boxed_compiler
)
from .._src.compilers import (
ts_compile,
draw_graph_compile,
nop,
nnc_jit,
memory_efficient_fusion,
debug_compile,
print_compile,
default_decompositions
)
from .._src.partitioners import (
min_cut_rematerialization_partition,
default_partition,
draw_graph,
draw_joint_graph,
)
from .._src import config
| pytorch-master | functorch/functorch/compile/__init__.py |
import sys
log_file_path = sys.argv[1]
with open(log_file_path) as f:
lines = f.readlines()
for line in lines:
# Ignore errors from CPU instruction set, symbol existing testing,
# or compilation error formatting
ignored_keywords = [
'src.c',
'CheckSymbolExists.c',
'test_compilation_error_formatting',
]
if all([keyword not in line for keyword in ignored_keywords]):
print(line)
| pytorch-master | .jenkins/pytorch/print_sccache_log.py |
from datetime import datetime, timedelta
from tempfile import mkdtemp
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
temp_dir = mkdtemp()
print(temp_dir)
def genrsa(path):
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
with open(path, "wb") as f:
f.write(key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
))
return key
def create_cert(path, C, ST, L, O, key):
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, C),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST),
x509.NameAttribute(NameOID.LOCALITY_NAME, L),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, O),
])
cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.utcnow()
).not_valid_after(
# Our certificate will be valid for 10 days
datetime.utcnow() + timedelta(days=10)
).add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
).sign(key, hashes.SHA256())
# Write our certificate out to disk.
with open(path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
return cert
def create_req(path, C, ST, L, O, key):
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
# Provide various details about who we are.
x509.NameAttribute(NameOID.COUNTRY_NAME, C),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST),
x509.NameAttribute(NameOID.LOCALITY_NAME, L),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, O),
])).sign(key, hashes.SHA256())
with open(path, "wb") as f:
f.write(csr.public_bytes(serialization.Encoding.PEM))
return csr
def sign_certificate_request(path, csr_cert, ca_cert, private_ca_key):
cert = x509.CertificateBuilder().subject_name(
csr_cert.subject
).issuer_name(
ca_cert.subject
).public_key(
csr_cert.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.utcnow()
).not_valid_after(
# Our certificate will be valid for 10 days
datetime.utcnow() + timedelta(days=10)
# Sign our certificate with our private key
).sign(private_ca_key, hashes.SHA256())
with open(path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
return cert
ca_key = genrsa(temp_dir + "/ca.key")
ca_cert = create_cert(temp_dir + "/ca.pem", u"US", u"New York", u"New York", u"Gloo Certificate Authority", ca_key)
pkey = genrsa(temp_dir + "/pkey.key")
csr = create_req(temp_dir + "/csr.csr", u"US", u"California", u"San Francisco", u"Gloo Testing Company", pkey)
cert = sign_certificate_request(temp_dir + "/cert.pem", csr, ca_cert, ca_key)
| pytorch-master | .jenkins/pytorch/create_test_cert.py |
raise ModuleNotFoundError("Sorry PyTorch, but our NumPy is in the other folder")
| pytorch-master | .jenkins/pytorch/fake_numpy/numpy.py |
import sys
import json
import numpy
sample_data_list = sys.argv[1:]
sample_data_list = [float(v.strip()) for v in sample_data_list]
sample_mean = numpy.mean(sample_data_list)
sample_sigma = numpy.std(sample_data_list)
data = {
'mean': sample_mean,
'sigma': sample_sigma,
}
print(json.dumps(data))
| pytorch-master | .jenkins/pytorch/perf_test/get_stats.py |
import sys
import json
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test-name', dest='test_name', action='store',
required=True, help='test name')
parser.add_argument('--sample-stats', dest='sample_stats', action='store',
required=True, help='stats from sample')
parser.add_argument('--update', action='store_true',
help='whether to update baseline using stats from sample')
args = parser.parse_args()
test_name = args.test_name
if 'cpu' in test_name:
backend = 'cpu'
elif 'gpu' in test_name:
backend = 'gpu'
data_file_path = '../{}_runtime.json'.format(backend)
with open(data_file_path) as data_file:
data = json.load(data_file)
if test_name in data:
mean = float(data[test_name]['mean'])
sigma = float(data[test_name]['sigma'])
else:
# Let the test pass if baseline number doesn't exist
mean = sys.maxsize
sigma = 0.001
print("population mean: ", mean)
print("population sigma: ", sigma)
# Let the test pass if baseline number is NaN (which happened in
# the past when we didn't have logic for catching NaN numbers)
if math.isnan(mean) or math.isnan(sigma):
mean = sys.maxsize
sigma = 0.001
sample_stats_data = json.loads(args.sample_stats)
sample_mean = float(sample_stats_data['mean'])
sample_sigma = float(sample_stats_data['sigma'])
print("sample mean: ", sample_mean)
print("sample sigma: ", sample_sigma)
if math.isnan(sample_mean):
raise Exception('''Error: sample mean is NaN''')
elif math.isnan(sample_sigma):
raise Exception('''Error: sample sigma is NaN''')
z_value = (sample_mean - mean) / sigma
print("z-value: ", z_value)
if z_value >= 3:
raise Exception('''\n
z-value >= 3, there is high chance of perf regression.\n
To reproduce this regression, run
`cd .jenkins/pytorch/perf_test/ && bash {}.sh` on your local machine
and compare the runtime before/after your code change.
'''.format(test_name))
else:
print("z-value < 3, no perf regression detected.")
if args.update:
print("We will use these numbers as new baseline.")
new_data_file_path = '../new_{}_runtime.json'.format(backend)
with open(new_data_file_path) as new_data_file:
new_data = json.load(new_data_file)
new_data[test_name] = {}
new_data[test_name]['mean'] = sample_mean
new_data[test_name]['sigma'] = max(sample_sigma, sample_mean * 0.1)
with open(new_data_file_path, 'w') as new_data_file:
json.dump(new_data, new_data_file, indent=4)
| pytorch-master | .jenkins/pytorch/perf_test/compare_with_baseline.py |
import sys
import json
data_file_path = sys.argv[1]
commit_hash = sys.argv[2]
with open(data_file_path) as data_file:
data = json.load(data_file)
data['commit'] = commit_hash
with open(data_file_path, 'w') as data_file:
json.dump(data, data_file)
| pytorch-master | .jenkins/pytorch/perf_test/update_commit_hash.py |
#!/usr/bin/env python3
import subprocess
import os
COMMON_TESTS = [
(
"Checking that torch is available",
"import torch",
),
(
"Checking that MKL is available",
"import torch; exit(0 if torch.backends.mkl.is_available() else 1)",
),
]
GPU_TESTS = [
(
"Checking that CUDA archs are setup correctly",
"import torch; torch.randn([3,5]).cuda()",
),
(
"Checking that magma is available",
"import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)",
),
(
"Checking that CuDNN is available",
"import torch; exit(0 if torch.backends.cudnn.is_available() else 1)",
),
]
if __name__ == "__main__":
if 'USE_CUDA' in os.environ and os.environ['USE_CUDA'] == '1':
TESTS = COMMON_TESTS + GPU_TESTS
else:
TESTS = COMMON_TESTS
for description, python_commands in TESTS:
print(description)
command_args = ["python", "-c", python_commands]
command_string = " ".join(command_args)
print("Command:", command_string)
try:
subprocess.check_call(command_args)
except subprocess.CalledProcessError as e:
sdk_root = os.environ.get('WindowsSdkDir', 'C:\\Program Files (x86)\\Windows Kits\\10')
debugger = os.path.join(sdk_root, 'Debuggers', 'x64', 'cdb.exe')
if os.path.exists(debugger):
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
command_string = " ".join(command_args)
print("Reruning with traceback enabled")
print("Command:", command_string)
subprocess.run(command_args, check=False)
exit(e.returncode)
| pytorch-master | .jenkins/pytorch/win-test-helpers/run_python_nn_smoketests.py |
import os
import fire
from collections import deque, namedtuple
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torch.distributions import Categorical
import torch.nn.functional as F
import gym
# constants
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# data
Memory = namedtuple('Memory', ['state', 'action', 'action_log_prob', 'reward', 'done', 'value'])
AuxMemory = namedtuple('Memory', ['state', 'target_value', 'old_values'])
class ExperienceDataset(Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __len__(self):
return len(self.data[0])
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind], self.data))
def create_shuffled_dataloader(data, batch_size):
ds = ExperienceDataset(data)
return DataLoader(ds, batch_size = batch_size, shuffle = True)
# helpers
def exists(val):
return val is not None
def normalize(t, eps = 1e-5):
return (t - t.mean()) / (t.std() + eps)
def update_network_(loss, optimizer):
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
def init_(m):
if isinstance(m, nn.Linear):
gain = torch.nn.init.calculate_gain('tanh')
torch.nn.init.orthogonal_(m.weight, gain)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
# networks
class Actor(nn.Module):
def __init__(self, state_dim, hidden_dim, num_actions):
super().__init__()
self.net = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh()
)
self.action_head = nn.Sequential(
nn.Linear(hidden_dim, num_actions),
nn.Softmax(dim=-1)
)
self.value_head = nn.Linear(hidden_dim, 1)
self.apply(init_)
def forward(self, x):
hidden = self.net(x)
return self.action_head(hidden), self.value_head(hidden)
class Critic(nn.Module):
def __init__(self, state_dim, hidden_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(state_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, 1),
)
self.apply(init_)
def forward(self, x):
return self.net(x)
# agent
def clipped_value_loss(values, rewards, old_values, clip):
value_clipped = old_values + (values - old_values).clamp(-clip, clip)
value_loss_1 = (value_clipped.flatten() - rewards) ** 2
value_loss_2 = (values.flatten() - rewards) ** 2
return torch.mean(torch.max(value_loss_1, value_loss_2))
class PPG:
def __init__(
self,
state_dim,
num_actions,
actor_hidden_dim,
critic_hidden_dim,
epochs,
epochs_aux,
minibatch_size,
lr,
betas,
lam,
gamma,
beta_s,
eps_clip,
value_clip
):
self.actor = Actor(state_dim, actor_hidden_dim, num_actions).to(device)
self.critic = Critic(state_dim, critic_hidden_dim).to(device)
self.opt_actor = Adam(self.actor.parameters(), lr=lr, betas=betas)
self.opt_critic = Adam(self.critic.parameters(), lr=lr, betas=betas)
self.minibatch_size = minibatch_size
self.epochs = epochs
self.epochs_aux = epochs_aux
self.lam = lam
self.gamma = gamma
self.beta_s = beta_s
self.eps_clip = eps_clip
self.value_clip = value_clip
def save(self):
torch.save({
'actor': self.actor.state_dict(),
'critic': self.critic.state_dict()
}, f'./ppg.pt')
def load(self):
if not os.path.exists('./ppg.pt'):
return
data = torch.load(f'./ppg.pt')
self.actor.load_state_dict(data['actor'])
self.critic.load_state_dict(data['critic'])
def learn(self, memories, aux_memories, next_state):
# retrieve and prepare data from memory for training
states = []
actions = []
old_log_probs = []
rewards = []
masks = []
values = []
for mem in memories:
states.append(mem.state)
actions.append(torch.tensor(mem.action))
old_log_probs.append(mem.action_log_prob)
rewards.append(mem.reward)
masks.append(1 - float(mem.done))
values.append(mem.value)
# calculate generalized advantage estimate
next_state = torch.from_numpy(next_state).to(device)
next_value = self.critic(next_state).detach()
values = values + [next_value]
returns = []
gae = 0
for i in reversed(range(len(rewards))):
delta = rewards[i] + self.gamma * values[i + 1] * masks[i] - values[i]
gae = delta + self.gamma * self.lam * masks[i] * gae
returns.insert(0, gae + values[i])
# convert values to torch tensors
to_torch_tensor = lambda t: torch.stack(t).to(device).detach()
states = to_torch_tensor(states)
actions = to_torch_tensor(actions)
old_values = to_torch_tensor(values[:-1])
old_log_probs = to_torch_tensor(old_log_probs)
rewards = torch.tensor(returns).float().to(device)
# store state and target values to auxiliary memory buffer for later training
aux_memory = AuxMemory(states, rewards, old_values)
aux_memories.append(aux_memory)
# prepare dataloader for policy phase training
dl = create_shuffled_dataloader([states, actions, old_log_probs, rewards, old_values], self.minibatch_size)
# policy phase training, similar to original PPO
for _ in range(self.epochs):
for states, actions, old_log_probs, rewards, old_values in dl:
action_probs, _ = self.actor(states)
values = self.critic(states)
dist = Categorical(action_probs)
action_log_probs = dist.log_prob(actions)
entropy = dist.entropy()
# calculate clipped surrogate objective, classic PPO loss
ratios = (action_log_probs - old_log_probs).exp()
advantages = normalize(rewards - old_values.detach())
surr1 = ratios * advantages
surr2 = ratios.clamp(1 - self.eps_clip, 1 + self.eps_clip) * advantages
policy_loss = - torch.min(surr1, surr2) - self.beta_s * entropy
update_network_(policy_loss, self.opt_actor)
# calculate value loss and update value network separate from policy network
value_loss = clipped_value_loss(values, rewards, old_values, self.value_clip)
update_network_(value_loss, self.opt_critic)
def learn_aux(self, aux_memories):
# gather states and target values into one tensor
states = []
rewards = []
old_values = []
for state, reward, old_value in aux_memories:
states.append(state)
rewards.append(reward)
old_values.append(old_value)
states = torch.cat(states)
rewards = torch.cat(rewards)
old_values = torch.cat(old_values)
# get old action predictions for minimizing kl divergence and clipping respectively
old_action_probs, _ = self.actor(states)
old_action_probs.detach_()
# prepared dataloader for auxiliary phase training
dl = create_shuffled_dataloader([states, old_action_probs, rewards, old_values], self.minibatch_size)
# the proposed auxiliary phase training
# where the value is distilled into the policy network, while making sure the policy network does not change the action predictions (kl div loss)
for epoch in range(self.epochs_aux):
for states, old_action_probs, rewards, old_values in tqdm(dl, desc=f'auxiliary epoch {epoch}'):
action_probs, policy_values = self.actor(states)
action_logprobs = action_probs.log()
# policy network loss copmoses of both the kl div loss as well as the auxiliary loss
aux_loss = clipped_value_loss(policy_values, rewards, old_values, self.value_clip)
loss_kl = F.kl_div(action_logprobs, old_action_probs, reduction='batchmean')
policy_loss = aux_loss + loss_kl
update_network_(policy_loss, self.opt_actor)
# paper says it is important to train the value network extra during the auxiliary phase
values = self.critic(states)
value_loss = clipped_value_loss(values, rewards, old_values, self.value_clip)
update_network_(value_loss, self.opt_critic)
# main
def main(
env_name = 'LunarLander-v2',
num_episodes = 50000,
max_timesteps = 500,
actor_hidden_dim = 32,
critic_hidden_dim = 256,
minibatch_size = 64,
lr = 0.0005,
betas = (0.9, 0.999),
lam = 0.95,
gamma = 0.99,
eps_clip = 0.2,
value_clip = 0.4,
beta_s = .01,
update_timesteps = 5000,
num_policy_updates_per_aux = 32,
epochs = 1,
epochs_aux = 6,
seed = None,
render = False,
render_every_eps = 250,
save_every = 1000,
load = False,
monitor = False
):
env = gym.make(env_name)
if monitor:
env = gym.wrappers.Monitor(env, './tmp/', force=True)
state_dim = env.observation_space.shape[0]
num_actions = env.action_space.n
memories = deque([])
aux_memories = deque([])
agent = PPG(
state_dim,
num_actions,
actor_hidden_dim,
critic_hidden_dim,
epochs,
epochs_aux,
minibatch_size,
lr,
betas,
lam,
gamma,
beta_s,
eps_clip,
value_clip
)
if load:
agent.load()
if exists(seed):
torch.manual_seed(seed)
np.random.seed(seed)
time = 0
updated = False
num_policy_updates = 0
for eps in tqdm(range(num_episodes), desc='episodes'):
render_eps = render and eps % render_every_eps == 0
state = env.reset()
for timestep in range(max_timesteps):
time += 1
if updated and render_eps:
env.render()
state = torch.from_numpy(state).to(device)
action_probs, _ = agent.actor(state)
value = agent.critic(state)
dist = Categorical(action_probs)
action = dist.sample()
action_log_prob = dist.log_prob(action)
action = action.item()
next_state, reward, done, _ = env.step(action)
memory = Memory(state, action, action_log_prob, reward, done, value)
memories.append(memory)
state = next_state
if time % update_timesteps == 0:
agent.learn(memories, aux_memories, next_state)
num_policy_updates += 1
memories.clear()
if num_policy_updates % num_policy_updates_per_aux == 0:
agent.learn_aux(aux_memories)
aux_memories.clear()
updated = True
if done:
if render_eps:
updated = False
break
if render_eps:
env.close()
if eps % save_every == 0:
agent.save()
if __name__ == '__main__':
fire.Fire(main)
| phasic-policy-gradient-master | train.py |
import os
from setuptools import find_packages, setup
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CURRENT_DIR, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="nucleotide_transformer",
version="0.0.1",
packages=find_packages(),
url="https://github.com/instadeepai/nucleotide-transformer",
license="CC BY-NC-SA 4.0",
author="InstaDeep Ltd",
python_requires=">=3.8",
description="The Nucleotide Transformer: Building and Evaluating "
"Robust Foundation Models for Human Genomics ",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"absl-py>=1.0.0",
"jax>=0.3.25",
"jaxlib>=0.3.25",
"dm-haiku>=0.0.9",
"numpy>=1.23.5",
"boto3>=1.24.28",
"typing_extensions>=3.10.0",
"joblib>=1.2.0",
"tqdm>=4.56.0",
"regex>=2022.1.18",
],
dependency_links=[
"https://storage.googleapis.com/jax-releases/jax_releases.html",
],
keywords=["Genomics", "Language Model", "Deep Learning", "JAX"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| nucleotide-transformer-main | setup.py |
# Copyright 2022 InstaDeep Ltd
#
# Licensed under the Creative Commons BY-NC-SA 4.0 License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Any, Callable, Dict, Optional, Tuple
import boto3
import haiku as hk
import joblib
import tqdm
from botocore import UNSIGNED
from botocore.config import Config
from nucleotide_transformer.model import (
NucleotideTransformerConfig,
build_nucleotide_transformer_fn,
)
from nucleotide_transformer.tokenizers import FixedSizeNucleotidesKmersTokenizer
ENV_XDG_CACHE_HOME = "XDG_CACHE_HOME"
DEFAULT_CACHE_DIR = "~/.cache"
def _get_dir() -> str:
"""
Get directory to save files on user machine.
"""
return os.path.expanduser(
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), "nucleotide_transformer"
)
)
def download_from_s3_bucket(
s3_client: boto3.session.Session, bucket: str, key: str, filename: str
) -> None:
"""
Download data from the s3 bucket and display downloading progression bar.
Args:
s3_client: Boto3 s3 client
bucket: Bucket name.
key: Path towards file in the bucket.
filename: Path to save file locally.
"""
kwargs = {
"Bucket": bucket,
"Key": key,
}
object_size = s3_client.head_object(**kwargs)["ContentLength"]
with tqdm.tqdm(total=object_size, unit="B", unit_scale=True, desc=filename) as pbar:
with open(filename, "wb") as f:
s3_client.download_fileobj(
Bucket=bucket,
Key=key,
ExtraArgs=None,
Fileobj=f,
Callback=lambda bytes_transferred: pbar.update(bytes_transferred),
)
def download_ckpt_and_hyperparams(model_name: str) -> Tuple[hk.Params, Dict[str, Any]]:
"""
Download checkpoint and hyperparams on kao datacenter.
Args:
model_name: Name of the model.
Returns:
Model parameters.
Model hyperparameters' dict.
"""
# Get directories
save_dir = os.path.join(_get_dir(), model_name)
params_save_dir = os.path.join(save_dir, "ckpt.joblib")
hyperparams_save_dir = os.path.join(save_dir, "hyperparams.json")
if os.path.exists(hyperparams_save_dir) and os.path.exists(params_save_dir):
# Load locally
with open(hyperparams_save_dir, "rb") as f:
hyperparams = json.load(f)
with open(params_save_dir, "rb") as f:
params = joblib.load(f)
return params, hyperparams
else:
os.makedirs(save_dir, exist_ok=True)
s3_endpoint = "https://s3.kao-prod.instadeep.io"
session = boto3.Session()
s3_client = session.client(
service_name="s3",
endpoint_url=s3_endpoint,
config=Config(signature_version=UNSIGNED),
)
# Download params and hyperparams
bucket = "nucleotide-transformer"
download_from_s3_bucket(
s3_client=s3_client,
bucket=bucket,
key=f"checkpoints/{model_name}/hyperparams.json",
filename=hyperparams_save_dir,
)
download_from_s3_bucket(
s3_client=s3_client,
bucket=bucket,
key=f"checkpoints/{model_name}/ckpt.joblib",
filename=params_save_dir,
)
# Load locally
with open(hyperparams_save_dir, "rb") as f:
hyperparams = json.load(f)
with open(params_save_dir, "rb") as f:
params = joblib.load(f)
return params, hyperparams
def get_pretrained_model(
model_name: str,
mixed_precision: bool = False,
embeddings_layers_to_save: Tuple[int, ...] = (),
attention_maps_to_save: Optional[Tuple[Tuple[int, int], ...]] = None,
max_positions: int = 1024,
) -> Tuple[
hk.Params, Callable, FixedSizeNucleotidesKmersTokenizer, NucleotideTransformerConfig
]:
"""
Create a Haiku Nucleotide Transformer
model by downloading pre-trained weights and hyperparameters.
Nucleotide Transformer Models have ESM-like architectures.
Args:
model_name: Name of the model.
mixed_precision: Whether to use mixed precision.
embeddings_layers_to_save: Intermediate embeddings to return in the output.
attention_maps_to_save: Intermediate attention maps to return in the output.
max_positions: Maximum length of a token (for padding).
Returns:
Model parameters.
Haiku function to call the model.
Tokenizer.
Model config (hyperparameters).
Example:
parameters, forward_fn, tokenizer, config = get_pretrained_model(
model_name="500M_1000G",
mixed_precision=False,
# Get embedding at layers 5 and 20
embeddings_layers_to_save=(5, 20,),
# Get attention map number 4 at layer 1 and attention map number 14
# at layer 12
attention_maps_to_save=((1,4), (12, 14)),
max_positions=128,
)
"""
if attention_maps_to_save is None:
attention_maps_to_save = ()
supported_models = [
"500M_human_ref",
"500M_1000G",
"2B5_1000G",
"2B5_multi_species",
]
if not (model_name in supported_models):
raise NotImplementedError(
f"Unknown {model_name} model. " f"Supported models are {supported_models}"
)
# Download weights and hyperparams
parameters, hyperparams = download_ckpt_and_hyperparams(model_name)
tokenizer = FixedSizeNucleotidesKmersTokenizer(
k_mers=hyperparams["k_for_kmers"],
fixed_length=max_positions,
prepend_cls_token=True,
)
# Get config
config = NucleotideTransformerConfig(
alphabet_size=len(tokenizer.vocabulary) - 2,
pad_token_id=tokenizer.pad_token_id,
mask_token_id=tokenizer.mask_token_id,
max_positions=hyperparams["max_positions"],
embed_scale=hyperparams["embed_scale"],
# architecture
emb_layer_norm_before=hyperparams["emb_layer_norm_before"],
key_size=hyperparams["key_dim"] if "key_dim" in hyperparams.keys() else None,
attention_heads=hyperparams["attention_heads"],
embed_dim=hyperparams["embed_dim"],
ffn_embed_dim=hyperparams["ffn_embed_dim"],
num_layers=hyperparams["num_layers"],
# bert
token_dropout=hyperparams["token_dropout"],
masking_ratio=hyperparams["masking_ratio"],
masking_prob=hyperparams["masking_prob"],
# embeddings to save
embeddings_layers_to_save=embeddings_layers_to_save,
attention_maps_to_save=attention_maps_to_save,
)
# NOTE: module names are changed here, to validate !
full_model_name = "nucleotide_transformer" + model_name
parameters = rename_modules(parameters, full_model_name)
forward_fn = build_nucleotide_transformer_fn(
model_config=config, mixed_precision=mixed_precision, model_name=full_model_name
)
return parameters, forward_fn, tokenizer, config
def rename_modules(parameters: hk.Params, model_name: str) -> hk.Params:
"""
Adjusts the names of the modules from checkpoints to NucleotideTransformer.
Args:
parameters: Parameters loaded from .joblib archive.
model_name: Name of the loaded model.
Returns:
Parameters with updated names.
"""
for layer_name in list(parameters.keys()):
new_name = layer_name.replace("esm_transformer", model_name)
if "attention_layer" in new_name:
if new_name.split("/")[3] == "mha":
new_name = "/".join(
new_name.split("/")[:3]
+ ["self_attention"]
+ new_name.split("/")[4:]
)
if "mha_layer_norm" in new_name:
new_name = new_name.replace("mha_layer_norm", "self_attention_layer_norm")
if "esm_roberta_lm_head" in new_name:
new_name = new_name.replace("esm_roberta_lm_head", "roberta_lm_head")
parameters[new_name] = parameters.pop(layer_name)
return parameters
| nucleotide-transformer-main | nucleotide_transformer/pretrained.py |
# Copyright 2022 InstaDeep Ltd
#
# Licensed under the Creative Commons BY-NC-SA 4.0 License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NUCLEOTIDES = ["A", "T", "C", "G"]
VALID_EXTRA_NUCLEOTIDES = ["N", "M", "Y", "B", "S", "W", "K", "H", "D", "V", "R"]
EXTRA_NUCLEOTIDES = ["N"]
| nucleotide-transformer-main | nucleotide_transformer/constants.py |
nucleotide-transformer-main | nucleotide_transformer/__init__.py |
|
# Copyright 2022 InstaDeep Ltd
#
# Licensed under the Creative Commons BY-NC-SA 4.0 License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import jax.numpy as jnp
from typing_extensions import TypeAlias
Embedding: TypeAlias = jnp.ndarray
Tokens: TypeAlias = jnp.ndarray
AttentionMask: TypeAlias = jnp.ndarray
TransformerOutput: TypeAlias = Dict[str, jnp.ndarray] # type: ignore
| nucleotide-transformer-main | nucleotide_transformer/types.py |
# Copyright 2022 InstaDeep Ltd
#
# Licensed under the Creative Commons BY-NC-SA 4.0 License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Nucleotide Transformer model in Jax."""
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple
import haiku as hk
import jax.numpy as jnp
import jmp
from nucleotide_transformer.layers import (
ESMLearnedPositionalEmbeddings,
RobertaLMHead,
SelfAttentionBlock,
TokensDropout,
)
from nucleotide_transformer.types import (
AttentionMask,
Embedding,
Tokens,
TransformerOutput,
)
def build_padding_attention_mask(tokens: Tokens, pad_token_id: int) -> AttentionMask:
"""
Builds a padding mask from a sequence of tokens by masking <pad> in the attention.
Args:
tokens: Batch of sequences of shape (batch_size, seq_len).
pad_token_id: Int corresponding to the <pad> token to mask.
Returns:
Batch of attention masks, masking out <pad> tokens.
"""
padding_mask = tokens != pad_token_id
padding_mask = padding_mask[:, None, :]
padding_mask = jnp.einsum("bhT, bht->bhtT", padding_mask, padding_mask)
return padding_mask
@dataclass
class NucleotideTransformerConfig:
"""
Parameters to initialize a Nucleotide Transformer model.
Args:
alphabet_size: Token vocabulary.
pad_token_id: ID of pad token.
mask_token_id: ID of mask token.
max_positions: Maximum sequence length.
embed_scale: Correction ratio applied to the embeddings to make up for the
norm difference between the input during training and inference.
emb_layer_norm_before: Whether to use layer norm before the first attention
layer.
attention_heads: Number of attention heads.
key_size: The dimension of the query, key, and values within each attention
head, if not specified, it is set to attention_heads//embed_dim.
It can be useful to set a custom key size if we want to impose the size of
the query, key and value tensor ( for example, tensors shaped with
power of 2 are more efficiently handled on TPUs ).
Note: Parametrizing the model with a custom key size has been done in :
Brown, Tom, et al. "Language models are few-shot learners."
Advances in neural information processing systems 33 (2020): 1877-1901.
embed_dim: Embedding dimension.
ffn_embed_dim: Feed forward embedding dimension.
num_layers: Number of attention blocks.
token_dropout: Token dropout.
masking_ratio: Masking ratio (used if token dropout is enabled).
masking_prob: Masking probability (used if token dropout is enabled).
use_gradient_checkpointing: Whether to use gradient checkpointing (checkpoint
gradients in the forward pass to reduce the computation in the backward).
"""
alphabet_size: int
pad_token_id: int
mask_token_id: int
max_positions: int = 1000
embed_scale: float = 1.0
# architecture
emb_layer_norm_before: bool = False
attention_heads: int = 20
key_size: Optional[int] = None
embed_dim: int = 1280
ffn_embed_dim: int = 5120
num_layers: int = 24
# dropout
token_dropout: bool = False
masking_ratio: float = 0.1
masking_prob: float = 0.8
# logging
use_gradient_checkpointing: bool = False
# return
embeddings_layers_to_save: Tuple[int, ...] = ()
attention_maps_to_save: Tuple[Tuple[int, int], ...] = ()
def __post_init__(self) -> None:
"""
Checks that the given values are compatible.
"""
if self.key_size is None:
if not self.embed_dim % self.attention_heads == 0:
raise ValueError(
f"When no key size is provided, the embedding dimension should be "
f"divisible by the number of heads, however provided embedding "
f"dimension is {self.embed_dim} and the number of heads is "
f"{self.attention_heads}."
)
self.key_size = self.embed_dim // self.attention_heads
class NucleotideTransformer(hk.Module):
"""
Jax implementation of Nucleotide Transformer models.
"""
def __init__(
self,
config: NucleotideTransformerConfig,
name: Optional[str] = None,
):
"""
Initialize a Nucleotide Transformer model.
Args:
config: Dataclass containing model hyperparameters.
name: Name for module (custom will break weight loading).
"""
self._config = config
super().__init__(name=name)
self._embed_layer = hk.Embed(self._config.alphabet_size, self._config.embed_dim)
self._pos_embed_layer = ESMLearnedPositionalEmbeddings(
config.max_positions, config.embed_dim, config.pad_token_id
)
self._lm_head = RobertaLMHead(
embed_dim=self._config.embed_dim,
alphabet_size=self._config.alphabet_size,
name="roberta_lm_head",
)
if self._config.emb_layer_norm_before:
self.emb_ln_before = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name="emb_layer_norm_before",
)
# Process attention maps to save requirement into more suitable format
attention_maps_to_save = config.attention_maps_to_save
self._attention_layers_to_save = list({t[0] for t in attention_maps_to_save})
self._attention_maps_per_layer_to_save = {
layer: [t[1] for t in attention_maps_to_save if t[0] == layer]
for layer in self._attention_layers_to_save
}
# Checking user request can be executed, raise error otherwise
max_layer = max(self._attention_layers_to_save + [0])
if max_layer > config.num_layers:
raise ValueError(
f"You are requiring attention maps for layer {max_layer}, "
f"while the model has {config.num_layers} layers only."
)
for layer, maps in self._attention_maps_per_layer_to_save.items():
max_map = max(maps)
if max_map > config.attention_heads:
raise ValueError(
f"You are requiring attention maps number {max_map} "
f"at layer {layer}, while the model has {config.attention_heads} "
f"only."
)
@hk.transparent
def apply_attention_blocks(
self,
x: Embedding,
outs: Dict[str, Embedding],
attention_mask: Optional[AttentionMask] = None,
) -> Tuple[Embedding, Dict[str, Embedding]]:
"""
Create the blocks of attention layers and applies them.
Args:
x: The sequence embedding.
outs: A dictionary to carry through the attention layers which stores the
intermediate sequence embedding and attention maps.
attention_mask: Attention mask of shape (batch_size, 1, seq_len, seq_len).
Returns:
The output sequence embedding.
The optional intermediate results (embeddings of the layer and attention
weights).
"""
layers: List[Callable] = [
self._attention_block(layer_idx)
for layer_idx in range(self._config.num_layers)
]
if self._config.use_gradient_checkpointing:
# the remat-ed function cannot take control flow arguments
layers = [hk.remat(layer) for layer in layers]
for layer_idx, layer in enumerate(layers):
output = layer(
x=x,
attention_mask=attention_mask,
)
x = output["embeddings"]
# Save intermediate embeddings if needed
if (layer_idx + 1) in self._config.embeddings_layers_to_save:
outs[f"embeddings_{(layer_idx+1)}"] = output["embeddings"]
# Save intermediate attention maps if needed
if (layer_idx + 1) in self._attention_layers_to_save:
for map_number in self._attention_maps_per_layer_to_save[layer_idx + 1]:
dkey = f"attention_map_layer_{layer_idx + 1}_number_{map_number}"
outs[dkey] = output["attention_weights"][:, map_number + 1]
return x, outs
@hk.transparent
def _attention_block(self, layer_idx: int) -> SelfAttentionBlock:
return SelfAttentionBlock( # type: ignore
num_heads=self._config.attention_heads,
embed_dim=self._config.embed_dim,
key_size=self._config.key_size,
ffn_embed_dim=self._config.ffn_embed_dim,
name=f"attention_layer_{layer_idx}",
)
def __call__(
self,
tokens: Tokens,
attention_mask: Optional[AttentionMask] = None,
) -> TransformerOutput:
"""
Computes the embeddings based on the input tokens.
Args:
tokens: Input tokens out of the tokenizer of shape (batch_size, seq_len).
attention_mask: Attention mask of shape (batch_size, 1, seq_len, seq_len).
If no mask is provided, a mask by default which equals 1 over all non
pad tokens and 0 over pad tokens is computed.
Returns:
Dictionary containing the final embeddings and logits.
"""
# Prepare outputs dict
outs: Dict[str, jnp.ndarray] = {}
# Compute embeddings
x = self._embed_layer(tokens)
# Tokens dropout if needed
if self._config.token_dropout:
x = TokensDropout(
embed_dim=self._config.embed_dim,
mask_token_id=self._config.mask_token_id,
pad_token_id=self._config.pad_token_id,
masking_ratio=self._config.masking_ratio,
masking_prob=self._config.masking_prob,
)(x, tokens)
# RoBERTa's mask scaling factor
x = self._config.embed_scale * x
# Add check that the sequence fed into the transformer is not longer
# than the max positions used to instantiate the learned positional
# embeddings layer
assert tokens.shape[1] <= self._config.max_positions, (
"Inputs to the learned positional embeddings layer have a length "
f"{x.shape[1]} greater than the max positions used to instantiate "
f"it: {self._config.max_positions}"
)
# Positional Embedding
x = x + self._pos_embed_layer(tokens)
if self._config.emb_layer_norm_before:
x = self.emb_ln_before(x)
# Attention mask
if attention_mask is None:
attention_mask = build_padding_attention_mask(
tokens=tokens, pad_token_id=self._config.pad_token_id
)
# construct a tower of attention layers
x, outs = self.apply_attention_blocks(
x=x,
outs=outs,
attention_mask=attention_mask,
)
# Language Model Head
lm_head_outs = self._lm_head(x)
outs["logits"] = lm_head_outs["logits"]
embeddings = lm_head_outs["embeddings"]
# Save final embeddings if needed
if self._config.num_layers in self._config.embeddings_layers_to_save:
outs[f"embeddings_{self._config.num_layers}"] = embeddings
return outs # type: ignore
def build_nucleotide_transformer_fn(
model_config: NucleotideTransformerConfig,
mixed_precision: bool = False,
model_name: Optional[str] = None,
) -> Callable:
"""
Creates the model's forward pass.
Args:
model_config: Model hyperparameters.
mixed_precision: Whether to use mixed precision computation.
model_name: Model's name.
Returns:
Nucleotide Transformer model forward function.
"""
if mixed_precision:
# Use mixed precision (only support A100 GPU and TPU for now)
half = jnp.bfloat16
full = jnp.float32
policy = jmp.Policy(compute_dtype=half, param_dtype=full, output_dtype=full)
hk.mixed_precision.set_policy(NucleotideTransformer, policy)
# Remove it in batch norm to avoid instabilities
policy = jmp.Policy(compute_dtype=full, param_dtype=full, output_dtype=half)
hk.mixed_precision.set_policy(hk.BatchNorm, policy)
hk.mixed_precision.set_policy(hk.LayerNorm, policy)
def nucleotide_transformer_fn(
tokens: Tokens, attention_mask: Optional[AttentionMask] = None
) -> TransformerOutput:
"""Forward pass."""
# Run the encoder over the inputs.
encoder = NucleotideTransformer(config=model_config, name=model_name)
outs = encoder(
tokens=tokens,
attention_mask=attention_mask,
)
return outs
return nucleotide_transformer_fn
| nucleotide-transformer-main | nucleotide_transformer/model.py |
# Copyright 2022 InstaDeep Ltd
#
# Licensed under the Creative Commons BY-NC-SA 4.0 License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import haiku as hk
import jax
import jax.numpy as jnp
from haiku import initializers
from nucleotide_transformer.types import (
AttentionMask,
Embedding,
Tokens,
TransformerOutput,
)
class MultiHeadAttention(hk.MultiHeadAttention):
"""
Multi-head attention with masking applied. Modified from the core implementation to
support biases in keys and values.
"""
def __init__(
self,
num_heads: int,
key_size: int,
value_size: Optional[int] = None,
model_size: Optional[int] = None,
name: Optional[str] = None,
):
"""
Args:
num_heads: Number of independent attention heads.
key_size: The size of keys and queries used for attention.
value_size: Optional size of the value projection. If None, defaults
to the key size.
model_size: Optional size of the output embedding. If None, defaults
to the key size multiplied by the number of heads.
name: Optional name for this module.
"""
w_init = hk.initializers.VarianceScaling(2.0, "fan_in", "uniform")
super().__init__(
num_heads=num_heads,
key_size=key_size,
w_init=w_init,
value_size=value_size,
model_size=model_size,
name=name,
)
@hk.transparent
def attention_weights(
self,
query: jnp.ndarray,
key: jnp.ndarray,
attention_mask: Optional[AttentionMask] = None,
) -> jnp.ndarray:
"""
Computes the attention weights.
Args:
query: Embedding sequence to compute queries.
key: Embedding sequence to compute keys.
attention_mask: Input attention_mask. Defaults to None.
Returns:
Attention weights.
"""
query_heads = self._linear_projection_he_init(query, self.key_size, "query")
key_heads = self._linear_projection_he_init(key, self.key_size, "key")
attention_logits = jnp.einsum("...thd,...Thd->...htT", query_heads, key_heads)
sqrt_key_size = jnp.sqrt(self.key_size).astype(query.dtype)
attention_logits = attention_logits / sqrt_key_size
if attention_mask is not None:
assert len(attention_mask.shape) == len(attention_logits.shape)
attention_logits = jnp.where(attention_mask, attention_logits, -1e30)
attention_weights = jax.nn.softmax(attention_logits)
return attention_weights
@hk.transparent
def compute_embeddings(
self,
value: jnp.ndarray,
attention_weights: jnp.ndarray,
) -> jnp.ndarray:
"""
Computes the output embeddings.
Args:
value: Embedding sequence to compute values.
attention_weights: Attention weights.
Returns:
Output embeddings.
"""
# He initialization
w_init = initializers.VarianceScaling(2.0, "fan_in", "uniform")
b_init = initializers.VarianceScaling(2.0, "fan_in", "uniform")
value_heads = self._linear_projection_he_init(value, self.value_size, "value")
attention = jnp.einsum("...htT,...Thd->...thd", attention_weights, value_heads)
# Concatenate attention matrix of all heads into a single vector.
attention_vec = jnp.reshape(attention, (*value.shape[:-1], -1))
return hk.Linear(
self.model_size, w_init=w_init, b_init=b_init, name="mha_output"
)(attention_vec)
def __call__(
self,
query: jnp.ndarray,
key: jnp.ndarray,
value: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
) -> TransformerOutput:
"""
Computes both the embeddings and the attention weights.
Args:
query: Embedding sequence to compute queries.
key: Embedding sequence to compute keys.
value: Embedding sequence to compute values.
attention_mask: Mask to be applied during the attention layers.
Triangular for autoregressive models. Defaults to None.
Returns:
Dictionary containing the output embeddings and the attention weights.
"""
attention_weights = self.attention_weights(query, key, attention_mask)
embeddings = self.compute_embeddings(value, attention_weights)
return {"embeddings": embeddings, "attention_weights": attention_weights}
@hk.transparent
def _linear_projection_he_init(
self, x: jnp.ndarray, head_size: int, name: Optional[str] = None
) -> jnp.ndarray:
"""
Linear layer for multi-head attention mechanism. Initialized with the He method.
Args:
x: Input embeddings.
head_size: Embedding size of each attention head.
name: Name of the linear layer.
Returns:
Multi-head embeddings.
"""
# He initialization
w_init = initializers.VarianceScaling(2.0, "fan_in", "uniform")
b_init = initializers.VarianceScaling(2.0, "fan_in", "uniform")
y = hk.Linear(
self.num_heads * head_size, w_init=w_init, b_init=b_init, name=name
)(x)
return y.reshape((*x.shape[:-1], self.num_heads, head_size))
class SelfAttentionBlock(hk.Module):
"""
Attention block made of self-attention.
"""
def __init__(
self,
num_heads: int,
embed_dim: int,
ffn_embed_dim: int,
key_size: Optional[int] = None,
name: Optional[str] = None,
):
super().__init__(name=name)
# Add checks on dimensions
if key_size is None:
if embed_dim % num_heads != 0:
raise ValueError(
f"The embedding dimension should be divisible by the number of "
f"heads, however provided embedding dimension is {embed_dim} and "
f"the number of heads is {num_heads}."
)
key_size = embed_dim // num_heads
# Define layers
self.fc1 = hk.Linear(ffn_embed_dim, name="fc1")
self.fc2 = hk.Linear(embed_dim, name="fc2")
self.layer_norm_self_attention = hk.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name="self_attention_layer_norm",
)
self.layer_norm_mlp = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name="final_layer_norm"
)
self.sa_layer = MultiHeadAttention(
num_heads=num_heads,
key_size=key_size,
model_size=embed_dim,
name="self_attention",
)
@hk.transparent
def self_attention(
self,
x: Embedding,
attention_mask: Optional[AttentionMask] = None,
) -> TransformerOutput:
"""
Applies the self attention mechanism.
Args:
x: Input token embeddings of shape (batch_size, seq_len, embed_dim).
attention_mask: Attention mask of shape (batch_size, 1, seq_len, seq_len).
Returns:
Dictionary containing the output embeddings and the attention weights.
"""
return self.sa_layer(x, x, x, attention_mask=attention_mask)
@hk.transparent
def mlp(self, x: Embedding) -> Embedding:
"""
Applies one layer-norm, one linear layer, a Gelu activation,
then a final linear layer.
Args:
x: Embeddings of shape (batch_size, seq_len, key_size * num_heads).
Returns:
The transformed sequence embedding.
"""
x = self.layer_norm_mlp(x)
x = jax.nn.gelu(
self.fc1(x),
approximate=False,
)
x = self.fc2(x)
return x
def __call__(
self,
x: Tokens,
attention_mask: Optional[AttentionMask] = None,
) -> TransformerOutput:
"""
Computes the output of the attention layer.
Args:
x: Input token embeddings of shape (batch_size,seq_len,embed_dim).
attention_mask: Attention mask of shape (batch_size, 1,seq_len, seq_len).
Returns:
A dictionary containing the output embeddings and the attention weights.
"""
# Self-Attention
res = x
x = self.layer_norm_self_attention(x)
output = self.self_attention(
x=x,
attention_mask=attention_mask,
)
x = output["embeddings"]
x = res + x
# MLP
x = x + self.mlp(x)
output["embeddings"] = x
return output # type: ignore
class RobertaLMHead(hk.Module):
"""
Roberta Language Model head. Transform final attention layer output into a
distribution over tokens at each position.
"""
def __init__(self, embed_dim: int, alphabet_size: int, name: Optional[str] = None):
"""
Args:
embed_dim: Embedding dimension.
alphabet_size: Number of tokens in the alphabet.
name: Name of the layer. Defaults to None.
"""
super().__init__(name=name)
self.embed_dim = embed_dim
self.alphabet_size = alphabet_size
# Define layers
self._first_layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name="emb_layer_norm_after"
)
self._fc1 = hk.Linear(self.embed_dim, name="lm_head_fc_1")
self._final_fc = hk.Linear(self.alphabet_size, name="lm_final_fc")
self._second_layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name="lm_head_layer_norm"
)
def __call__(self, x: jnp.ndarray) -> Dict[str, jnp.ndarray]:
x = self._first_layer_norm(x)
# Embeddings are computed after the first layer norm to be consistent with ESM
embeddings = x
x = self._fc1(x)
x = jax.nn.gelu(x, approximate=False)
x = self._second_layer_norm(x)
# Compute logits
logits = self._final_fc(x)
return {"embeddings": embeddings, "logits": logits}
class TokensDropout(hk.Module):
"""
Tokens dropout layer.
"""
def __init__(
self,
embed_dim: int,
pad_token_id: int,
mask_token_id: int,
masking_ratio: float,
masking_prob: float,
name: Optional[str] = None,
):
"""
Args:
embed_dim: Embedding dimension.
pad_token_id: ID of the pad token.
mask_token_id: ID of the pad token.
masking_ratio: Masking ratio.
masking_prob: Probability to mask.
name: Name of the layer. Defaults to None.
"""
super().__init__(name=name)
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.masking_ratio = masking_ratio
self.masking_prob = masking_prob
self.embed_dim = embed_dim
def __call__(self, x: jnp.ndarray, tokens: Tokens) -> jnp.ndarray:
padding_mask_tokens = tokens == self.pad_token_id
tokens_repeated = jnp.repeat(
tokens[:, :, None], repeats=self.embed_dim, axis=-1
)
x = jnp.where(tokens_repeated == self.mask_token_id, 0.0, x)
mask_ratio_train = self.masking_ratio * self.masking_prob
src_lengths = (~padding_mask_tokens).sum(-1)
mask_ratio_observed = (tokens == self.mask_token_id).sum(-1) / src_lengths
x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
return x
class ESMLearnedPositionalEmbeddings(hk.Module):
"""
Learned positional embeddings to be added to token embeddings. Specific to ESM as it
is implemented by shifting the positions by 2 (1 + padding_idx).
"""
def __init__(
self,
vocab_size: int,
embed_dim: int,
padding_idx: int,
name: Optional[str] = None,
):
"""
Args:
vocab_size: Tokenizer's vocabulary size.
embed_dim: Embedding size.
padding_idx: Index attributed to the padding
token. Defaults to 1.
name: Name of the layer. Defaults to None.
"""
super().__init__(name=name)
self.padding_idx = padding_idx
self._embed_layer = hk.Embed(vocab_size + padding_idx + 1, embed_dim)
def __call__(self, tokens: jnp.ndarray) -> jnp.ndarray:
mask = tokens != self.padding_idx
positions = jnp.cumsum(mask, axis=1) * mask + self.padding_idx
return self._embed_layer(positions)
| nucleotide-transformer-main | nucleotide_transformer/layers.py |
# Copyright 2022 InstaDeep Ltd
#
# Licensed under the Creative Commons BY-NC-SA 4.0 License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import product
from typing import Dict, List, Optional, Tuple
import numpy as np
import regex as re
from nucleotide_transformer.constants import EXTRA_NUCLEOTIDES, NUCLEOTIDES
def _compute_k_mers(k: int) -> List[str]:
"""
Generates all the different k-mers for nucleotides given a value of k.
Args:
k: The k parameter for k-mers.
Returns:
All the different k-mers.
"""
return ["".join(elt) for elt in product(NUCLEOTIDES, repeat=k)]
class StandardTokenizer:
"""
Simple tokenizer that extracts pre-defined tokens from sequences using regex.
"""
def __init__(
self,
standard_tokens: List[str],
unk_token: str = "<unk>",
pad_token: str = "<pad>",
mask_token: str = "<mask>",
class_token: str = "<cls>",
eos_token: str = "<eos>",
bos_token: str = "<bos>",
prepend_bos_token: bool = False,
prepend_cls_token: bool = False,
append_eos_token: bool = False,
extra_special_tokens: Optional[List[str]] = None,
tokens_to_ids: Optional[Dict[str, int]] = None,
):
"""
Initializes a basic tokenizer instance.
Args:
standard_tokens: Standard tokens, where special tokens are omitted.
unk_token: Unknown token.
pad_token: Pad token.
mask_token: Mask token.
class_token: Class token.
eos_token: End of speech tokens.
bos_token: Beginning of sentence token.
prepend_bos_token: Prepend beginning of sentence token.
prepend_cls_token: Prepend class token.
append_eos_token: Append end of speech token.
extra_special_tokens: (Optional) Enable the user to define optionally
additional special tokens. Since regex is used for tokenization, any
special tokens that are also special tokens in regex must include
a "\" escape seq. For instance "$" -> "\\$"
tokens_to_ids: (Optional) Enable the user to optionally choose ids for
the tokens. If you provide this argument the dictionary must include
the following special tokens
["<unk>","<pad>","<mask>","<cls>","<eos>","<bos>"]
or instantiation will fail. Additionally, if the ids in your dictionary
do not start at 0 then an error will also be raised. If this argument is
not specified, then ids are attributed automatically by the tokenizer
during initialization.
"""
# Define special tokens essential to masked language modelling
special_tokens_1 = [unk_token, pad_token, mask_token, class_token]
special_tokens_2 = [eos_token, bos_token]
special_tokens = special_tokens_1 + special_tokens_2
all_tokens = special_tokens_1 + standard_tokens + special_tokens_2
if extra_special_tokens is not None:
special_tokens.extend(extra_special_tokens)
self._all_tokens = all_tokens
self._standard_tokens = standard_tokens
self._special_tokens = special_tokens
self._unk_token = unk_token
self._pad_token = pad_token
self._mask_token = mask_token
self._class_token = class_token
self._eos_token = eos_token
self._bos_token = bos_token
self._prepend_bos_token = prepend_bos_token
self._prepend_cls_token = prepend_cls_token
self._append_eos_token = append_eos_token
# Can only
if self._prepend_bos_token and self._prepend_cls_token:
raise ValueError(
"Cannot prepend both BOS and CLS token, must choose only one"
)
# Matching between tokens and ids
if tokens_to_ids is not None:
if set(tokens_to_ids.keys()) != set(self._all_tokens):
raise ValueError(
f"Specified matching between tokens and ids, "
f"but some tokens are missing or mismatch. "
f"Got specifications for tokens: {set(tokens_to_ids.keys())} "
f"and expected for {set(self._all_tokens)}"
)
sorted_tokens = np.sort(list(tokens_to_ids.values()))
if np.any(sorted_tokens != np.arange(len(self._all_tokens))):
raise ValueError(
f"Specified matching between tokens and ids, "
f"but some ids are missing or mismatch. "
f"Got specifications for ids: {sorted_tokens} "
f"and expected for {np.arange(len(self._all_tokens))}"
)
self._tokens_to_ids = tokens_to_ids
else:
self._tokens_to_ids = {tok: i for i, tok in enumerate(self._all_tokens)}
self._ids_to_tokens = {i: tok for tok, i in self._tokens_to_ids.items()}
self._compiled_regex = re.compile("|".join(self._all_tokens + [r"\S"])) # noqa
@property
def vocabulary(self) -> List[str]:
return self._all_tokens
@property
def standard_tokens(self) -> List[str]:
return self._standard_tokens
@property
def vocabulary_size(self) -> int:
"""
Property that returns the total number of tokens.
Returns:
Total number of tokens.
"""
return len(self.vocabulary)
@property
def unk_token_id(self) -> int:
"""
Property that returns id (int representation) of the unknown token.
Returns:
Id (int representation) of the unknown token.
"""
return self.token_to_id(self.unk_token)
@property
def pad_token_id(self) -> int:
"""
Property that returns id (int representation) of the pad token.
Returns:
Id (int representation) of the pad token.
"""
return self.token_to_id(self.pad_token)
@property
def mask_token_id(self) -> int:
"""
Property that returns id (int representation) of the mask token.
Returns:
Id (int representation) of the mask token.
"""
return self.token_to_id(self.mask_token)
@property
def class_token_id(self) -> int:
"""
Property that returns id (int representation) of the class token.
Returns:
Id (int representation) of the class token.
"""
return self.token_to_id(self.class_token)
@property
def eos_token_id(self) -> int:
"""
Property that returns id (int representation) of the eos token.
Returns:
Id (int representation) of the eos token.
"""
return self.token_to_id(self.eos_token)
@property
def bos_token_id(self) -> int:
"""
Property that returns id (int representation) of the bos token.
Returns:
Id (int representation) of the bos token.
"""
return self.token_to_id(self.bos_token)
@property
def special_tokens(self) -> List[str]:
return self._special_tokens
@property
def unk_token(self) -> str:
return self._unk_token
@property
def pad_token(self) -> str:
return self._pad_token
@property
def mask_token(self) -> str:
return self._mask_token
@property
def class_token(self) -> str:
return self._class_token
@property
def eos_token(self) -> str:
return self._eos_token
@property
def bos_token(self) -> str:
return self._bos_token
def id_to_token(self, token_id: int) -> str:
try:
return self._ids_to_tokens[token_id]
except KeyError:
raise KeyError(f"Token id {token_id} not found in vocabulary")
def token_to_id(self, token: str) -> int:
try:
return self._tokens_to_ids[token]
except KeyError:
raise KeyError(f"Token {token} not found in vocabulary")
def tokenize(self, sequence: str) -> Tuple[List[str], List[int]]:
"""
Tokenizes a sequence and returns the list of tokens as well
as the list of their IDs. Any character found in the sequence that does not
correspond to any token in the vocabulary is replaced by the unk token.
Args:
sequence: Sequence to be tokenized.
Returns:
List of tokens.
List of token ids.
"""
tokens: List[str] = self._compiled_regex.findall(sequence)
tokens = [
tok if tok in self._tokens_to_ids.keys() else self._unk_token
for tok in tokens
]
if self._prepend_cls_token:
tokens = [self._class_token] + tokens
if self._prepend_bos_token:
tokens = [self._bos_token] + tokens
if self._append_eos_token:
tokens.append(self._eos_token)
tokens_ids = [self.token_to_id(tok) for tok in tokens]
return tokens, tokens_ids
def pad_tokens_batch(
self, batch: List[Tuple[List[str], List[int]]]
) -> List[Tuple[List[str], List[int]]]:
"""
Takes a batch of sequences tokens ids and returns a batch of padded sequences.
Args:
batch: List of tuples, each composed of a sequence's tokens and token ids.
Returns:
List of 2-elements tuple for each sequence in the input where the tuple is
containing 1. the list of the str representations of the
tokens for that sequence and 2. the list of the int representations of
the tokens for that sequence. Pad Tokens are added so that each sequence
of tokens in the batch has the same length (all sequences padded to the
length of the longest sequence in the batch).
"""
lengths = [len(t[0]) for t in batch]
maximum_length = max(lengths)
deltas = [maximum_length - length for length in lengths]
padded_tokens = [
t[0] + ([self.pad_token] * delta) for t, delta in zip(batch, deltas)
]
padded_tokens_ids = [
t[1] + ([self.pad_token_id] * delta) for t, delta in zip(batch, deltas)
]
return [
(toks, toks_ids) for toks, toks_ids in zip(padded_tokens, padded_tokens_ids)
]
def batch_tokenize(self, sequences: List[str]) -> List[Tuple[List[str], List[int]]]:
"""
Tokenizes a batch of sequences.
Sequences are padded to the maximum length in the batch.
Args:
sequences: Batch of sequences to be tokenized.
Returns:
Batch of tokenized sequences as well as their token ids,
where every sequence has been padded to the maximum length
in the batch.
"""
return self.pad_tokens_batch( # type: ignore
[self.tokenize(seq) for seq in sequences]
)
class NucleotidesKmersTokenizer(StandardTokenizer):
"""
This is a tokenizer specific for nucleotide sequences.
It only considers sequence containing the tokens A, T, C, G and N.
N is always considered as a special token and tokenized alone.
"""
def __init__(
self,
k_mers: int,
unk_token: str = "<unk>",
pad_token: str = "<pad>",
mask_token: str = "<mask>",
class_token: str = "<cls>",
eos_token: str = "<eos>",
bos_token: str = "<bos>",
prepend_bos_token: bool = False,
prepend_cls_token: bool = False,
append_eos_token: bool = False,
tokens_to_ids: Optional[Dict[str, int]] = None,
):
"""
Instantiates a FixedSizeNucleotideKmersTokenizer.
Args:
k_mers: How many nucleotides to consider for generating vocabulary.
unk_token: Unknown token.
pad_token: Pad token.
mask_token: Mask token.
class_token: Class token.
eos_token: End of speech tokens.
bos_token: Beginning of sentence token.
prepend_bos_token: Prepend beginning of sentence token.
prepend_cls_token: Prepend class token.
append_eos_token: Append end of speech token.
tokens_to_ids: (Optional) Enable the user to optionally choose ids for
the tokens. If you provide this argument the dictionary must include
the following special tokens
["<unk>","<pad>","<mask>","<cls>","<eos>","<bos>"]
or instantiation will fail. Additionally, if the ids in your dictionary
do not start at 0 then an error will also be raised. If this argument is
not specified, then ids are attributed automatically by the tokenizer
during initialization.
"""
kmers_tokens = _compute_k_mers(k_mers)
standard_tokens = kmers_tokens + NUCLEOTIDES + EXTRA_NUCLEOTIDES
StandardTokenizer.__init__(
self,
standard_tokens=standard_tokens,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
class_token=class_token,
eos_token=eos_token,
bos_token=bos_token,
prepend_bos_token=prepend_bos_token,
prepend_cls_token=prepend_cls_token,
append_eos_token=append_eos_token,
tokens_to_ids=tokens_to_ids,
)
self._k_mers = k_mers
def tokenize(self, sequence: str) -> Tuple[List[str], List[int]]:
"""
Tokenizes a sequence and returns the list of tokens as well
as the list of their IDs. The tokenization algorithm first splits up the
substrings of the input sequence in-between N characters.
Then these substrings are split into pieces of length k, and if it
is possible (edge cases) it adds up pieces of length 1.
If a single character that does not correspond
to any token is found, an error is raised.
Args:
sequence: Sequence to be tokenized.
Returns:
List of tokens.
List of token ids.
Example:
Find below two tokenization examples when k_mers=5.
ATCGAATGGCGATGCAC --> ATCGA ATGGC GATGC A C
ATCGAATNGGCGATGCAC -> ATCGA A T N GGCGA TGCAC
"""
splitted_seq = sequence.split("N")
len_splitted = len(splitted_seq)
tokens: List[str] = []
for i, split in enumerate(splitted_seq):
chunks = [
split[i * self._k_mers : (i + 1) * self._k_mers]
for i in range(len(split) // self._k_mers)
]
if len(split) % self._k_mers != 0:
chunks.append(split[(len(split) // self._k_mers) * self._k_mers :])
for chunk in chunks:
if len(chunk) == self._k_mers:
tokens.append(chunk)
else:
for nucl in chunk:
tokens.append(nucl)
if i < len_splitted - 1:
tokens.append("N")
if self._prepend_cls_token:
tokens = [self._class_token] + tokens
if self._prepend_bos_token:
tokens = [self._bos_token] + tokens
if self._append_eos_token:
tokens.append(self._eos_token)
tokens_ids = [self.token_to_id(tok) for tok in tokens]
return tokens, tokens_ids
class FixedSizeNucleotidesKmersTokenizer(NucleotidesKmersTokenizer):
"""
Simple tokenizer that naively extracts tokens. Used for amino-acids
and nucleotides. This tokenizer also tokenizes batches to a
fixed maximum length. If one of the sequences provided exceeds the maximum
length, an exception is raised.
"""
def __init__(
self,
k_mers: int,
fixed_length: int,
unk_token: str = "<unk>",
pad_token: str = "<pad>",
mask_token: str = "<mask>",
class_token: str = "<cls>",
eos_token: str = "<eos>",
bos_token: str = "<bos>",
prepend_bos_token: bool = False,
prepend_cls_token: bool = False,
append_eos_token: bool = False,
tokens_to_ids: Optional[Dict[str, int]] = None,
):
"""
Instantiates a FixedSizeNucleotideKmersTokenizer.
Args:
k_mers: How many nucleotides to consider for generating vocabulary.
unk_token: Unknown token.
pad_token: Pad token.
mask_token: Mask token.
class_token: Class token.
eos_token: End of speech tokens.
bos_token: Beginning of sentence token.
prepend_bos_token: Prepend beginning of sentence token.
prepend_cls_token: Prepend class token.
append_eos_token: Append end of speech token.
fixed_length: Fixed length to pad all sequences in batches.
"""
NucleotidesKmersTokenizer.__init__(
self,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
class_token=class_token,
eos_token=eos_token,
bos_token=bos_token,
prepend_bos_token=prepend_bos_token,
prepend_cls_token=prepend_cls_token,
append_eos_token=append_eos_token,
k_mers=k_mers,
tokens_to_ids=tokens_to_ids,
)
self._fixed_length = fixed_length
@property
def fixed_length(self) -> int:
"""
Property that returns the pre-defined fixed sequence length.
Returns:
The pre-defined fixed sequence length.
"""
return self._fixed_length
def pad_tokens_batch(
self, batch: List[Tuple[List[str], List[int]]]
) -> List[Tuple[List[str], List[int]]]:
"""
Takes tokens and tokens ids of a batch of sequences, and returns a batch of
padded sequences.
Args:
batch: List of tuples, each composed of a sequence's tokens and token ids.
Returns:
The padded list, where every sequence is padded to the fixed maximum length.
"""
lengths = [len(t[0]) for t in batch]
maximum_length = max(lengths)
if maximum_length > self._fixed_length:
raise ValueError(
f"Found a sequence with length {maximum_length} that "
f"exceeds the fixed length to tokenize ({self._fixed_length})."
)
deltas = [self._fixed_length - length for length in lengths]
padded_tokens = [
t[0] + ([self.pad_token] * delta) for t, delta in zip(batch, deltas)
]
padded_tokens_ids = [
t[1] + ([self.pad_token_id] * delta) for t, delta in zip(batch, deltas)
]
return [
(toks, toks_ids) for toks, toks_ids in zip(padded_tokens, padded_tokens_ids)
]
| nucleotide-transformer-main | nucleotide_transformer/tokenizers.py |
import pathlib
import re
import setuptools
_here = pathlib.Path(__file__).resolve().parent
name = "equinox"
# for simplicity we actually store the version in the __version__ attribute in the
# source
with open(_here / name / "__init__.py") as f:
meta_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M)
if meta_match:
version = meta_match.group(1)
else:
raise RuntimeError("Unable to find __version__ string.")
author = "Patrick Kidger"
author_email = "[email protected]"
description = "PyTorch-like neural networks in JAX"
with open(_here / "README.md", "r") as f:
readme = f.read()
url = "https://github.com/patrick-kidger/" + name
license = "Apache-2.0"
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Mathematics",
]
python_requires = "~=3.8"
install_requires = ["jax>=0.2.26"]
setuptools.setup(
name=name,
version=version,
author=author,
author_email=author_email,
maintainer=author,
maintainer_email=author_email,
description=description,
long_description=readme,
long_description_content_type="text/markdown",
url=url,
license=license,
classifiers=classifiers,
zip_safe=False,
python_requires=python_requires,
install_requires=install_requires,
packages=setuptools.find_packages(exclude=["examples", "tests"]),
)
| equinox-main | setup.py |
import jax
from .custom_types import PyTree
def _apply_update(u, p):
if u is None:
return p
else:
return p + u
def _is_none(x):
return x is None
def apply_updates(model: PyTree, updates: PyTree) -> PyTree:
"""A `jax.tree_map`-broadcasted version of
```python
model = model if update is None else model + update
```
This is often useful when updating a model's parameters via stochastic gradient
descent. (This function is essentially the same as `optax.apply_updates`, except
that it understands `None`.)
**Arguments:**
- `model`: An arbitrary PyTree.
- `updates`: Any PyTree that is a prefix of `model`.
**Returns:**
The updated model.
"""
# Assumes that updates is a prefix of model
return jax.tree_map(_apply_update, updates, model, is_leaf=_is_none)
| equinox-main | equinox/update.py |
from typing import Any, Callable, Sequence, Union
import jax
import jax.numpy as jnp
import numpy as np
from .custom_types import PyTree
_sentinel = object()
_Leaf = Any
def tree_at(
where: Callable[[PyTree], Union[_Leaf, Sequence[_Leaf]]],
pytree: PyTree,
replace: Union[_Leaf, Sequence[_Leaf]] = _sentinel,
replace_fn: Callable[[_Leaf], _Leaf] = _sentinel,
is_leaf: Callable[[_Leaf], bool] = None,
) -> PyTree:
"""Updates a PyTree out-of-place; a bit like using `.at[].set()` on a JAX array.
**Arguments:**
- `where`: A callable `PyTree -> Leaf` or `PyTree -> Sequence[Leaf]`. It should
consume a PyTree with the same structure as `pytree`, and return the leaf or
leaves that should be replaced. For example
`where = lambda mlp: mlp.layers[-1].linear.weight`.
- `pytree`: The PyTree to modify.
- `replace`: Either a single element, or a sequence of the same length as returned
by `where`. This specifies the replacements to make at the locations specified
by `where`. Mutually exclusive with `replace_fn`.
- `replace_fn`: A function `Leaf -> Any`. It will be called on every leaf specified
by `where`. The return value from `replace_fn` will be used in its place.
Mutually exclusive with `replace`.
**Returns:**
A copy of the input PyTree, with the appropriate modifications.
!!! example
This can be used to help specify the weights of a model to train or not to
train:
```python
model = ...
trainable = jax.tree_map(lambda _: False, model)
trainable = equinox.tree_at(lambda mlp: mlp.layers[-1].linear.weight, model, replace=True)
equinox.filter_grad(..., filter_spec=trainable)
```
!!! example
Sub-PyTrees can be replaced by flattening them to leaves first:
```python
equinox.tree_at(lambda t: jax.tree_leaves(t.subtree), pytree,
jax.tree_leaves(new_subtree))
```
"""
if (replace is _sentinel and replace_fn is _sentinel) or (
replace is not _sentinel and replace_fn is not _sentinel
):
raise ValueError(
"Precisely one of `replace` and `replace_fn` must be specified."
)
elif replace is _sentinel:
replace_passed = False
replacer = lambda j, i: replace_fn(flat[i])
else:
replace_passed = True
replacer = lambda j, i: replace[j]
# TODO: is there a neater way of accomplishing this?
flat, treedef = jax.tree_flatten(pytree, is_leaf=is_leaf)
flat_indices = list(range(len(flat)))
index_pytree = jax.tree_unflatten(treedef, flat_indices)
index = where(index_pytree)
# where can return either a single entry, or a sequence
if isinstance(index, int):
index = (index,)
replace = (replace,)
elif isinstance(index, Sequence):
for i in index:
if not isinstance(i, int):
raise ValueError(
r"""`where` must return a sequence of only leaves; not some subtree.
If you want to replace all of a subtree, you can do so by replacing
>>> eqx.tree_at(lambda t: t.subtree, tree, new_subtree) # buggy
with
>>> eqx.tree_at(lambda t: jax.tree_leaves(t.subtree), tree,
... jax.tree_leaves(new_subtree)) # fixed
"""
)
if replace_passed and len(index) != len(replace):
raise ValueError(
"`where` must return a sequence of leaves of the same length as `replace`."
)
for j, i in enumerate(index):
flat[i] = replacer(j, i)
return jax.tree_unflatten(treedef, flat)
def tree_equal(*pytrees: PyTree) -> bool:
"""Returns `True` if all input PyTrees are equal. Every PyTrees must have the same
structure. Any JAX or NumPy arrays (as leaves) must have the same shape, dtype, and
values to be considered equal. JAX arrays and NumPy arrays are not considered equal
to each other.
**Arguments:**
- `*pytrees`: Any number of PyTrees each with any structure.
**Returns:**
A boolean.
"""
flat, treedef = jax.tree_flatten(pytrees[0])
array_types = (jnp.ndarray, np.ndarray)
for pytree in pytrees[1:]:
flat_, treedef_ = jax.tree_flatten(pytree)
if treedef_ != treedef:
return False
for elem, elem_ in zip(flat, flat_):
if isinstance(elem, array_types):
if isinstance(elem_, array_types):
if (
(type(elem) != type(elem_))
or (elem.shape != elem_.shape)
or (elem.dtype != elem_.dtype)
or (elem != elem_).any()
):
return False
else:
return False
else:
if isinstance(elem_, array_types):
return False
else:
if elem != elem_:
return False
return True
| equinox-main | equinox/tree.py |
import functools as ft
from dataclasses import dataclass
from typing import Any
import jax
from .deprecated import deprecated
from .filters import combine, is_array, partition, validate_filters
from .module import Module, static_field
class _Static(Module):
value: Any = static_field()
@ft.lru_cache(maxsize=4096)
def _filter_jit_cache(f, **jitkwargs):
@ft.partial(jax.jit, static_argnums=(0, 1, 4), **jitkwargs)
def f_wrapped(
static_leaves, static_treedef, dynamic_args, dynamic_kwargs, filter_spec_return
):
static_args, static_kwargs = jax.tree_unflatten(static_treedef, static_leaves)
args = combine(dynamic_args, static_args)
kwargs = combine(dynamic_kwargs, static_kwargs)
out = f(*args, **kwargs)
dynamic_out, static_out = partition(out, filter_spec_return)
return dynamic_out, _Static(static_out)
return f_wrapped
def filter_jit(fun, *, filter_spec=is_array, filter_spec_return=is_array, **jitkwargs):
"""Wraps together [`equinox.partition`][] and `jax.jit`.
**Arguments:**
- `fun` is a pure function to JIT compile.
- `filter_spec` is a PyTree whose structure should be a prefix of the structure of
the inputs to `fun`. It behaves as the `filter_spec` argument to
[`equinox.filter`][]. Truthy values will be traced; falsey values will be held
static.
- `filter_spec_return` is a PyTree whose structure should be a prefix of the
structure of the outputs of `fun`. It behaves as the `filter_spec` argument to
[`equinox.filter`][]. Truthy values should be tracers; falsely values are any
(non-tracer) auxiliary information to return.
- `**jitkwargs` are any other keyword arguments to `jax.jit`.
!!! info
Specifically, if calling `fun(*args, **kwargs)`, then `filter_spec` must
have a structure which is a prefix for `(args, kwrgs)`.
**Returns:**
The JIT'd version of `fun`.
!!! info
A very important special case is to trace all JAX arrays and treat all other
objects as static.
This is accomplished with `filter_spec=equinox.is_array`,
`filter_spec_return=equinox.is_array` -- which are the defaults. (It is
unusual to need different behaviour to this.)
"""
if any(
x in jitkwargs for x in ("static_argnums", "static_argnames", "donate_argnums")
):
raise ValueError(
"`jitkwargs` cannot contain 'static_argnums', 'static_argnames' or "
"'donate_argnums'."
)
# We choose not to make a distinction between ([arg, ..., arg], kwargs) and ((arg, ..., arg), kwargs)
if (
isinstance(filter_spec, tuple)
and len(filter_spec) == 2
and isinstance(filter_spec[0], list)
):
filter_spec = (tuple(filter_spec[0]), filter_spec[1])
@ft.wraps(fun)
def fun_wrapper(*args, **kwargs):
(dynamic_args, dynamic_kwargs), (static_args, static_kwargs) = partition(
(args, kwargs), filter_spec
)
static_leaves, static_treedef = jax.tree_flatten((static_args, static_kwargs))
static_leaves = tuple(static_leaves)
dynamic_out, static_out = _filter_jit_cache(fun, **jitkwargs)(
static_leaves,
static_treedef,
dynamic_args,
dynamic_kwargs,
filter_spec_return,
)
return combine(dynamic_out, static_out.value)
return fun_wrapper
#
# Deprecated
#
@ft.lru_cache(maxsize=4096)
def _jitf_cache(f, args_treedef, **jitkwargs):
@ft.partial(jax.jit, **jitkwargs)
def f_wrapped(*args):
args = jax.tree_unflatten(args_treedef, args)
return f(*args)
return f_wrapped
@dataclass(frozen=True)
class _UnPyTreeAble:
value: Any
def __bool__(self):
return False
_marker_sentinel = object()
@deprecated(in_favour_of=filter_jit)
def jitf(
fun,
*,
filter_fn=None,
filter_tree=None,
static_argnums=None,
static_argnames=None,
donate_argnums=(),
**jitkwargs
):
if isinstance(static_argnums, int):
static_argnums = (static_argnums,)
if static_argnames is not None:
raise NotImplementedError(
"jitf does not yet support `static_argnames`. use static_argnums instead."
)
if donate_argnums != ():
raise NotImplementedError("jitf does not ye support `donate_argnums`.")
validate_filters("jitf", filter_fn, filter_tree)
if static_argnums is None:
len_static_argnums = 0
else:
len_static_argnums = len(static_argnums)
@ft.wraps(fun)
def f_wrapper(*args, **kwargs):
if len(kwargs):
raise NotImplementedError(
"jitf does not yet support keyword arguments. Use positional arguments instead."
)
if filter_tree is not None:
if len(args) - len_static_argnums == 1:
new_filter_tree = (filter_tree,)
else:
new_filter_tree = tuple(filter_tree)
# Mark the arguments that have been explicitly declared static via `static_argnums`
if static_argnums is not None:
args = list(args)
for index in static_argnums:
args[index] = _UnPyTreeAble(args[index])
if filter_tree is not None:
new_filter_tree = list(new_filter_tree)
for index in static_argnums:
new_filter_tree.insert(index, _UnPyTreeAble(None))
# Flatten everything else
args_flat, args_treedef = jax.tree_flatten(args)
if filter_tree is not None:
filter_flat, flat_treedef = jax.tree_flatten(new_filter_tree)
if flat_treedef != args_treedef:
raise ValueError(
"The tree stucture for the filters and the arguments must be the same."
)
# Figure out static argnums with respect to this new flattened structure.
new_static_argnums = []
if filter_tree is None:
# implies filter_fn is not None
for i, arg in enumerate(args_flat):
if isinstance(arg, _UnPyTreeAble) or not filter_fn(arg):
new_static_argnums.append(i)
else:
for i, (arg, filter) in enumerate(zip(args_flat, filter_flat)):
if not filter:
new_static_argnums.append(i)
new_static_argnums = tuple(new_static_argnums)
if static_argnums is not None:
args_flat = [
arg.value if isinstance(arg, _UnPyTreeAble) else arg
for arg in args_flat
]
f_jitted = _jitf_cache(
fun, args_treedef, static_argnums=new_static_argnums, **jitkwargs
)
return f_jitted(*args_flat)
return f_wrapper
| equinox-main | equinox/jit.py |
import functools as ft
import warnings
def deprecated(*, in_favour_of):
if not isinstance(in_favour_of, str):
in_favour_of = in_favour_of.__name__
def decorator(fn):
msg = f"{fn.__name__} is deprecated in favour of {in_favour_of}"
@ft.wraps(fn)
def wrapper(*args, **kwargs):
warnings.warn(msg)
return fn(*args, **kwargs)
return wrapper
return decorator
| equinox-main | equinox/deprecated.py |
from . import nn
from .filters import (
combine,
filter,
is_array,
is_array_like,
is_inexact_array,
is_inexact_array_like,
merge,
partition,
split,
)
from .grad import (
filter_custom_vjp,
filter_grad,
filter_value_and_grad,
gradf,
value_and_grad_f,
)
from .jit import filter_jit, jitf
from .module import Module, static_field
from .tree import tree_at, tree_equal
from .update import apply_updates
__version__ = "0.1.5"
| equinox-main | equinox/__init__.py |
import typing
from typing import Any
import jax
import jax.numpy as jnp
if getattr(typing, "GENERATING_DOCUMENTATION", False):
Array = "jax.numpy.ndarray"
else:
Array = jnp.ndarray
PyTree = Any
TreeDef = type(jax.tree_structure(0))
| equinox-main | equinox/custom_types.py |
import abc
import functools as ft
import inspect
from dataclasses import dataclass, field, fields
import jax
from .tree import tree_equal
def static_field(**kwargs):
"""Used for marking that a field should _not_ be treated as part of the PyTree
of a [`equinox.Module`][]. (And is instead just treated as extra metadata.)
!!! example
```python
class MyModule(equinox.Module):
normal_field: int
static_field: int = equinox.static_field()
mymodule = MyModule()
leaves, treedef = jax.tree_flatten(mymodule)
assert len(leaves) == 1
```
In practice this should rarely be used; it is usually preferential to just filter
out each field with `eqx.filter` whenever you need to select only some fields.
**Arguments:**
- `**kwargs`: If any are passed then they are passed on to `datacalss.field`.
(Recall that Equinox uses dataclasses for its modules.)
"""
try:
metadata = dict(kwargs["metadata"])
except KeyError:
metadata = kwargs["metadata"] = {}
if "static" in metadata:
raise ValueError("Cannot use metadata with `static` already set.")
metadata["static"] = True
return field(**kwargs)
class _wrap_method:
def __init__(self, method):
self.method = method
if getattr(self.method, "__isabstractmethod__", False):
self.__isabstractmethod__ = self.method.__isabstractmethod__
def __get__(self, instance, owner):
if instance is None:
return self.method
return jax.tree_util.Partial(self.method, instance)
@ft.lru_cache(maxsize=128)
def _make_initable(cls):
field_names = {field.name for field in fields(cls)}
class _InitableModule(cls):
pass
# Done like this to avoid dataclasses complaining about overriding setattr on a
# frozen class.
def __setattr__(self, name, value):
if name in field_names:
object.__setattr__(self, name, value)
else:
raise AttributeError(f"Cannot set attribute {name}")
_InitableModule.__setattr__ = __setattr__
return _InitableModule
def _has_dataclass_init(cls):
if "__init__" in cls.__dict__:
return False
return cls._has_dataclass_init
def _not_magic(k):
return not (k.startswith("__") and k.endswith("__"))
# Inherits from abc.ABCMeta as a convenience for a common use-case.
# It's not a feature we use ourselve.
class _ModuleMeta(abc.ABCMeta):
def __new__(mcs, name, bases, dict_):
dict_ = {
k: _wrap_method(v) if _not_magic(k) and inspect.isfunction(v) else v
for k, v in dict_.items()
}
cls = super().__new__(mcs, name, bases, dict_)
# Do override subclasses' dataclass-__init__-s. (None of which call super, so
# they must be overriden.)
# Don't override custom __init__'s, which leads to poor ergonomics:
# e.g. if `B` has a custom init then `class A(B): pass` would otherwise set a
# dataclass init that overrides the custom __init__.
_init = cls._has_dataclass_init = _has_dataclass_init(cls)
if _init:
init_doc = cls.__init__.__doc__
cls = dataclass(eq=False, frozen=True, init=_init)(cls)
if _init:
cls.__init__.__doc__ = init_doc
jax.tree_util.register_pytree_node_class(cls)
return cls
def __call__(cls, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
# Defreeze it during __init__
initable_cls = _make_initable(cls)
object.__setattr__(self, "__class__", initable_cls)
cls.__init__(self, *args, **kwargs)
object.__setattr__(self, "__class__", cls)
missing_names = {
field.name
for field in fields(cls)
if field.init and field.name not in dir(self)
}
if len(missing_names):
raise ValueError(
f"The following fields were not initialised during __init__: {missing_names}"
)
return self
class Module(metaclass=_ModuleMeta):
"""Base class. Created your model by inheriting from this.
**Fields**
Specify all its fields at the class level (identical to
[dataclasses](https://docs.python.org/3/library/dataclasses.html)). This defines
its children as a PyTree.
```python
class MyModule(equinox.Module):
weight: jax.numpy.ndarray
bias: jax.numpy.ndarray
submodule: equinox.Module
```
**Initialisation**
A default `__init__` is automatically provided, which just fills in fields with the
arguments passed. For example `MyModule(weight, bias, submodule)`.
Alternatively (quite commonly) you can provide an `__init__` method yourself:
```python
class MyModule(equinox.Module):
weight: jax.numpy.ndarray
bias: jax.numpy.ndarray
submodule: equinox.Module
def __init__(self, in_size, out_size, key):
wkey, bkey, skey = jax.random.split(key, 3)
self.weight = jax.random.normal(wkey, (out_size, in_size))
self.bias = jax.random.normal(bkey, (out_size,))
self.submodule = equinox.nn.Linear(in_size, out_size, key=skey)
```
**Methods**
It is common to create some methods on the class -- for example to define the forward pass of a model.
```python
class MyModule(equinox.Module):
... # as above
def __call__(self, x):
return self.submodule(x) + self.weight @ x + self.bias
```
!!! tip
You don't have to define `__call__`:
- You can define other methods if you want.
- You can define multiple methods if you want.
- You can define no methods if you want. (And just use `equinox.Module` as a
nice syntax for custom PyTrees.)
No method is special-cased.
**Usage**
After you have defined your model, then you can use it just like any other PyTree
-- that just happens to have some methods attached. In particular you can pass it
around across `jax.jit`, `jax.grad` etc. in exactly the way that you're used to.
!!! example
If you wanted to, then it would be completely safe to do
```python
class MyModule(equinox.Module):
...
@jax.jit
def __call__(self, x):
...
```
because `self` is just a PyTree. Unlike most other neural network libaries, you
can mix Equinox and native JAX without any difficulties at all.
"""
_has_dataclass_init = True
def __hash__(self):
return hash(tuple(jax.tree_leaves(self)))
def __eq__(self, other):
return tree_equal(self, other)
def tree_flatten(self):
dynamic_field_names = []
dynamic_field_values = []
static_field_names = []
static_field_values = []
for field_ in fields(self):
name = field_.name
try:
value = self.__dict__[name]
except KeyError:
continue
if field_.metadata.get("static", False):
static_field_names.append(name)
static_field_values.append(value)
else:
dynamic_field_names.append(name)
dynamic_field_values.append(value)
return tuple(dynamic_field_values), (
tuple(dynamic_field_names),
tuple(static_field_names),
tuple(static_field_values),
)
@classmethod
def tree_unflatten(cls, aux, dynamic_field_values):
self = cls.__new__(cls)
dynamic_field_names, static_field_names, static_field_values = aux
for name, value in zip(dynamic_field_names, dynamic_field_values):
object.__setattr__(self, name, value)
for name, value in zip(static_field_names, static_field_values):
object.__setattr__(self, name, value)
return self
| equinox-main | equinox/module.py |
import functools as ft
import types
import typing
import jax
from .deprecated import deprecated
from .filters import (
combine,
is_array,
is_inexact_array,
merge,
partition,
split,
validate_filters,
)
def filter_value_and_grad(
fun, *, filter_spec=is_inexact_array, argnums=None, **gradkwargs
):
"""As [`equinox.filter_grad`][], except that it is `jax.value_and_grad` that is
wrapped.
"""
if argnums is not None:
raise ValueError(
"`argnums` should not be passed. If you need to differentiate "
"multiple objects then collect them into a tuple and pass that "
"as the first argument."
)
@ft.partial(jax.value_and_grad, argnums=0, **gradkwargs)
def fun_value_and_grad(diff_x, nondiff_x, *args, **kwargs):
x = combine(diff_x, nondiff_x)
return fun(x, *args, **kwargs)
def fun_value_and_grad_wrapper(x, *args, **kwargs):
diff_x, nondiff_x = partition(x, filter_spec)
return fun_value_and_grad(diff_x, nondiff_x, *args, **kwargs)
return fun_value_and_grad_wrapper
def filter_grad(fun, *, filter_spec=is_inexact_array, **gradkwargs):
"""Wraps together [`equinox.partition`][] and `jax.grad`.
**Arguments:**
- `fun` is a pure function to JIT compile.
- `filter_spec` is a PyTree whose structure should be a prefix of the structure of
the **first** argument to `fun`. It behaves as the `filter_spec` argument to
[`equinox.filter`][]. Truthy values will be differentiated; falsey values will
not.
- `**gradkwargs` are any other keyword arguments to `jax.grad`.
**Returns:**
A function computing the derivative of `fun` with respect to its first input. Any
nondifferentiable leaves will have `None` as the gradient. See
`equinox.apply_updates` for a convenience function that will only attempt to apply
non-`None` updates.
!!! info
A very important special case is to trace all inexact (i.e. floating point)
JAX arrays and treat all other objects as nondifferentiable.
This is accomplished with `filter_spec=equinox.is_inexact_array`, which is the
default.
!!! tip
If you need to differentiate multiple objects, then put them together into a
tuple and pass that through the first argument:
```python
# We want to differentiate `func` with respect to both `x` and `y`.
def func(x, y):
...
@equinox.filter_grad
def grad_func(x__y):
x, y = x__y
return func(x, y)
```
"""
has_aux = gradkwargs.get("has_aux", False)
fun_value_and_grad = filter_value_and_grad(
fun, filter_spec=filter_spec, **gradkwargs
)
def fun_grad(*args, **kwargs):
value, grad = fun_value_and_grad(*args, **kwargs)
if has_aux:
value, aux = value
return aux, grad
else:
return grad
return fun_grad
class filter_custom_vjp:
"""Provides an easier API for `jax.custom_vjp`, by using filtering.
Usage is:
```python
@equinox.filter_custom_vjp
def fn(vjp_arg, *args, **kwargs):
# vjp_arg is some PyTree of arbitrary Python objects.
# args, kwargs contain arbitrary Python objects.
...
return obj # some PyTree of arbitrary Python objects.
def fn_fwd(vjp_arg, *args, **kwargs):
...
# Should return `obj` as before. `residuals` can be any collection of JAX
# arrays you want to keep around for the backward pass.
return obj, residuals
def fn_bwd(residuals, grad_obj, vjp_arg, *args, **kwargs):
# grad_obj will have `None` as the gradient for any leaves of `obj` that were
# not JAX arrays
...
# grad_vjp_arg should have `None` as the gradient for any leaves of `vjp_arg`
# that were not JAX arrays.
return grad_vjp_arg
fn.defvjp(fn_fwd, fn_bwd)
```
The key differences to `jax.custom_vjp` are that:
- Only the gradient of the first argument, `vjp_arg`, should be computed on the
backward pass. Everything else will automatically have zero gradient.
- You do not need to distinguish differentiable from nondifferentiable manually.
Instead you should return gradients for all inexact JAX arrays in the first
argument. (And just put `None` on every other leaf of the PyTree.)
- As a convenience, all of the inputs from the forward pass are additionally made
available to you on the backward pass.
!!! tip
If you need gradients with respect to multiple arguments, then just pack them
together as a tuple via the first argument `vjp_arg`. (See also
[`equinox.filter_grad`][] for a similar trick.)
"""
def __init__(self, fn):
self.fn = fn
self.fn_wrapped = None
def defvjp(self, fn_fwd, fn_bwd):
def fn_wrapped(
nonarray_vjp_arg,
nonarray_args_kwargs,
diff_array_vjp_arg,
nondiff_array_vjp_arg,
array_args_kwargs,
):
vjp_arg = combine(
nonarray_vjp_arg, diff_array_vjp_arg, nondiff_array_vjp_arg
)
args, kwargs = combine(nonarray_args_kwargs, array_args_kwargs)
return self.fn(vjp_arg, *args, **kwargs)
def fn_fwd_wrapped(
nonarray_vjp_arg,
nonarray_args_kwargs,
diff_array_vjp_arg,
nondiff_array_vjp_arg,
array_args_kwargs,
):
vjp_arg = combine(
nonarray_vjp_arg, diff_array_vjp_arg, nondiff_array_vjp_arg
)
args, kwargs = combine(nonarray_args_kwargs, array_args_kwargs)
out, residuals = fn_fwd(vjp_arg, *args, **kwargs)
return out, (
residuals,
diff_array_vjp_arg,
nondiff_array_vjp_arg,
array_args_kwargs,
)
def fn_bwd_wrapped(nonarray_vjp_arg, nonarray_args_kwargs, residuals, grad_out):
(
residuals,
diff_array_vjp_arg,
nondiff_array_vjp_arg,
array_args_kwargs,
) = residuals
vjp_arg = combine(
nonarray_vjp_arg, diff_array_vjp_arg, nondiff_array_vjp_arg
)
args, kwargs = combine(nonarray_args_kwargs, array_args_kwargs)
out = fn_bwd(residuals, grad_out, vjp_arg, *args, **kwargs)
if jax.tree_structure(out) != jax.tree_structure(diff_array_vjp_arg):
raise RuntimeError(
"custom_vjp gradients must have the same structure as "
"`equinox.filter(vjp_arg, equinox.is_inexact_array)`, where "
"`vjp_arg` is the first argument used in the forward pass."
)
# None is the gradient through nondiff_array_vjp_arg and array_args_kwargs
return out, None, None
fn_wrapped = jax.custom_vjp(fn_wrapped, nondiff_argnums=(0, 1))
fn_wrapped.defvjp(fn_fwd_wrapped, fn_bwd_wrapped)
self.fn_wrapped = fn_wrapped
def __call__(self, vjp_arg, /, *args, **kwargs):
if self.fn_wrapped is None:
raise RuntimeError(f"defvjp not yet called for {self.fn.__name__}")
array_vjp_arg, nonarray_vjp_arg = partition(vjp_arg, is_array)
diff_array_vjp_arg, nondiff_array_vjp_arg = partition(
array_vjp_arg, is_inexact_array
)
array_args_kwargs, nonarray_args_kwargs = partition((args, kwargs), is_array)
return self.fn_wrapped(
nonarray_vjp_arg,
nonarray_args_kwargs,
diff_array_vjp_arg,
nondiff_array_vjp_arg,
array_args_kwargs,
)
if getattr(typing, "GENERATING_DOCUMENTATION", False):
_filter_custom_vjp_doc = filter_custom_vjp.__doc__
def defvjp(fn_fwd, fn_bwd):
pass
def filter_custom_vjp(fn):
return types.SimpleNamespace(defvjp=defvjp)
filter_custom_vjp.__doc__ = _filter_custom_vjp_doc
#
# Deprecated
#
@deprecated(in_favour_of=filter_value_and_grad)
def value_and_grad_f(fun, *, filter_fn=None, filter_tree=None, argnums=0, **gradkwargs):
if isinstance(argnums, int):
unwrap = True
argnums = (argnums,)
if filter_tree is not None:
filter_tree = (filter_tree,)
else:
unwrap = False
validate_filters("value_and_grad_f", filter_fn, filter_tree)
@ft.partial(jax.value_and_grad, argnums=argnums, **gradkwargs)
def f_value_and_grad(*args, **kwargs):
*args, notes = args
args = list(args)
for i, (arg_nograd, which, treedef) in notes.items():
arg_grad = args[i]
arg = merge(arg_grad, arg_nograd, which, treedef)
args[i] = arg
return fun(*args, **kwargs)
def f_value_and_grad_wrapper(*args, **kwargs):
args = list(args)
notes = {}
for j, i in enumerate(argnums):
arg = args[i]
if filter_fn is None:
# implies filter_tree is not None
arg_grad, arg_nograd, which, treedef = split(
arg, filter_tree=filter_tree[j]
)
else:
arg_grad, arg_nograd, which, treedef = split(arg, filter_fn=filter_fn)
args[i] = arg_grad
notes[i] = (arg_nograd, which, treedef)
value, grad = f_value_and_grad(*args, notes, **kwargs)
grad = list(grad)
for j, i in enumerate(argnums):
g = grad[j]
arg_nograd, which, treedef = notes[i]
none_grad = [None for _ in arg_nograd]
grad[j] = merge(g, none_grad, which, treedef)
if unwrap:
(grad,) = grad
return value, grad
return f_value_and_grad_wrapper
@deprecated(in_favour_of=filter_grad)
def gradf(fun, *, has_aux=False, **gradkwargs):
f_value_and_grad = value_and_grad_f(fun, has_aux=has_aux, **gradkwargs)
def f_grad(*args, **kwargs):
value, grad = f_value_and_grad(*args, **kwargs)
if has_aux:
value, aux = value
return aux, grad
else:
return grad
return f_grad
| equinox-main | equinox/grad.py |
from typing import Any, Callable, List, Optional, Tuple, Union
import jax
import jax.numpy as jnp
import numpy as np
from .custom_types import PyTree, TreeDef
from .deprecated import deprecated
#
# Filter functions
#
def is_array(element: Any) -> bool:
"""Returns `True` if `element` is a JAX array (but not a NumPy array)."""
return isinstance(element, jnp.ndarray)
# Does _not_ do a try/except on jnp.asarray(element) because that's very slow.
# Chosen to match
# https://github.com/google/jax/blob/4a17c78605e7fc69a69a999e2f6298db79d3837a/jax/_src/numpy/lax_numpy.py#L542 # noqa: E501
def is_array_like(element: Any) -> bool:
"""Returns `True` if `element` is a JAX array, a NumPy array, or a Python
`float`/`complex`/`bool`/`int`.
"""
return isinstance(
element, (jnp.ndarray, np.ndarray, float, complex, bool, int)
) or hasattr(element, "__jax_array__")
def is_inexact_array(element: Any) -> bool:
"""Returns `True` if `element` is an inexact (i.e. floating point) JAX array."""
return is_array(element) and jnp.issubdtype(element.dtype, jnp.inexact)
def is_inexact_array_like(element: Any) -> bool:
"""Returns `True` if `element` is an inexact JAX array, an inexact NumPy array, or
a Python `float` or `complex`.
"""
if hasattr(element, "__jax_array__"):
element = element.__jax_array__()
return (
isinstance(element, (jnp.ndarray, np.ndarray))
and jnp.issubdtype(element.dtype, jnp.inexact)
) or isinstance(element, (float, complex))
#
# Filtering/combining
#
def _make_filter_tree(mask: Union[bool, Callable[[Any], bool]], arg: Any) -> bool:
if isinstance(mask, bool):
return mask
elif callable(mask):
return jax.tree_map(mask, arg)
else:
raise ValueError("`filter_spec` must consist of booleans and callables only.")
def filter(
pytree: PyTree, filter_spec: PyTree, inverse: bool = False, replace: Any = None
) -> PyTree:
"""
Filters out the leaves of a PyTree not satisfying a condition. Those not satisfying
the condition are replaced with `replace`.
**Arguments:**
- `pytree` is any PyTree.
- `filter_spec` is a PyTree whose structure should be a prefix of the structure of
`pytree`. Each of its leaves should either be:
- `True`, in which case the leaf or subtree is kept;
- `False`, in which case the leaf or subtree is replaced with `replace`;
- a callable `Leaf -> bool`, in which case this is evaluted on the leaf or
mapped over the subtree, and the leaf kept or replaced as appropriate.
- `inverse` switches the truthy/falsey behaviour: falsey results are kept and
truthy results are replaced.
- `replace` is what to replace any falsey leaves with. Defaults to `None`.
**Returns:**
A PyTree of the same structure as `pytree`.
!!! info
An common special case is `equinox.filter(pytree, equinox.is_array)`. Then
`equinox.is_array` is evaluted on all of `pytree`'s leaves, and each leaf then
kept or replaced.
!!! info
See also [`equinox.combine`][] to reconstitute the PyTree again.
"""
inverse = bool(inverse) # just in case, to make the != trick below work reliably
filter_tree = jax.tree_map(_make_filter_tree, filter_spec, pytree)
return jax.tree_map(
lambda mask, x: x if bool(mask) != inverse else replace, filter_tree, pytree
)
def partition(pytree: PyTree, filter_spec: PyTree, replace: Any = None) -> PyTree:
"""Equivalent to `filter(...), filter(..., inverse=True)`, but slightly more
efficient.
"""
filter_tree = jax.tree_map(_make_filter_tree, filter_spec, pytree)
left = jax.tree_map(lambda mask, x: x if mask else replace, filter_tree, pytree)
right = jax.tree_map(lambda mask, x: replace if mask else x, filter_tree, pytree)
return left, right
def _combine(*args):
for arg in args:
if arg is not None:
return arg
return None
def _is_none(x):
return x is None
def combine(*pytrees: PyTree) -> PyTree:
"""Combines multiple PyTrees into one PyTree, by replacing `None` leaves.
!!! example
```python
pytree1 = [None, 1, 2]
pytree2 = [0, None, None]
equinox.combine(pytree1, pytree2) # [0, 1, 2]
```
!!! tip
The idea is that `equinox.combine` should be used to undo a call to
[`equinox.filter`][] or [`equinox.partition`][].
**Arguments:**
- `*pytrees*: a sequence of PyTrees all with the same structure.
**Returns:**
A PyTree with the same structure as its inputs. Each leaf will be the first
non-`None` leaf found in the corresponding leaves of `pytrees` as they are
iterated over.
"""
return jax.tree_map(_combine, *pytrees, is_leaf=_is_none)
#
# Deprecated
#
@deprecated(in_favour_of=filter)
def split(
pytree: PyTree,
filter_fn: Optional[Callable[[Any], bool]] = None,
filter_tree: Optional[PyTree] = None,
) -> Tuple[List[Any], List[Any], List[bool], TreeDef]:
validate_filters("split", filter_fn, filter_tree)
flat, treedef = jax.tree_flatten(pytree)
flat_true = []
flat_false = []
if filter_fn is None:
which, treedef_filter = jax.tree_flatten(filter_tree)
if treedef != treedef_filter:
raise ValueError(
"filter_tree must have the same tree structure as the PyTree being split."
)
for f, w in zip(flat, which):
if w:
flat_true.append(f)
else:
flat_false.append(f)
else:
which = []
for f in flat:
if filter_fn(f):
flat_true.append(f)
which.append(True)
else:
flat_false.append(f)
which.append(False)
return flat_true, flat_false, which, treedef
@deprecated(in_favour_of=combine)
def merge(
flat_true: List[Any], flat_false: List[Any], which: List[bool], treedef: TreeDef
):
flat = []
flat_true = iter(flat_true)
flat_false = iter(flat_false)
for element in which:
if element:
flat.append(next(flat_true))
else:
flat.append(next(flat_false))
return jax.tree_unflatten(treedef, flat)
# Internal and only used by deprecated functions
def validate_filters(fn_name, filter_fn, filter_tree):
if (filter_fn is None and filter_tree is None) or (
filter_fn is not None and filter_tree is not None
):
raise ValueError(
f"Precisely one of `filter_fn` and `filter_tree` should be passed to {fn_name}"
)
| equinox-main | equinox/filters.py |
import typing
from typing import Any, Callable, List, Optional, Sequence
import jax
import jax.nn as jnn
import jax.random as jrandom
from ..custom_types import Array
from ..module import Module, static_field
from .linear import Linear
def _identity(x):
return x
if getattr(typing, "GENERATING_DOCUMENTATION", False):
def relu(_):
pass
jnn.relu = relu
_identity.__qualname__ = "identity" # Renamed for nicer documentation.
class MLP(Module):
"""Standard Multi-Layer Perceptron; also known as a feed-forward network."""
layers: List[Linear]
activation: Callable
final_activation: Callable
in_size: int = static_field()
out_size: int = static_field()
width_size: int = static_field()
depth: int = static_field()
def __init__(
self,
in_size: int,
out_size: int,
width_size: int,
depth: int,
activation: Callable = jnn.relu,
final_activation: Callable = _identity,
*,
key: "jax.random.PRNGKey",
**kwargs
):
"""**Arguments**:
- `in_size`: The size of the input layer.
- `out_size`: The size of the output layer.
- `width_size`: The size of each hidden layer.
- `depth`: The number of hidden layers.
- `activation`: The activation function after each hidden layer. Defaults to
ReLU.
- `final_activation`: The activation function after the output layer. Defaults
to the identity.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
super().__init__(**kwargs)
keys = jrandom.split(key, depth + 1)
layers = []
if depth == 0:
layers.append(Linear(in_size, out_size, key=keys[0]))
else:
layers.append(Linear(in_size, width_size, key=keys[0]))
for i in range(depth - 1):
layers.append(Linear(width_size, width_size, key=keys[i + 1]))
layers.append(Linear(width_size, out_size, key=keys[-1]))
self.layers = layers
self.in_size = in_size
self.out_size = out_size
self.width_size = width_size
self.depth = depth
self.activation = activation
self.final_activation = final_activation
def __call__(
self, x: Array, *, key: Optional["jax.random.PRNGKey"] = None
) -> Array:
"""**Arguments:**
- `x`: A JAX array with shape `(in_size,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array with shape `(out_size,)`.
"""
for layer in self.layers[:-1]:
x = layer(x)
x = self.activation(x)
x = self.layers[-1](x)
x = self.final_activation(x)
return x
class Sequential(Module):
"""A sequence of [`equinox.Module`][]s applied in order."""
layers: Sequence[Module]
def __call__(self, x: Any, *, key: Optional["jax.random.PRNGKey"] = None) -> Any:
"""**Arguments:**
- `x`: Argument passed to the first member of the sequence.
- `key`: A `jax.random.PRNGKey`, which will be split and passed to every layer
to provide any desired randomness. (Optional. Keyword only argument.)
**Returns:**
The output of the last member of the sequence.
"""
if key is None:
keys = [None] * len(self.layers)
else:
keys = jrandom.split(key, len(self.layers))
for layer, key in zip(self.layers, keys):
x = layer(x, key=key)
return x
Sequential.__init__.__doc__ = """**Arguments:**
- `layers`: A sequence of [`equinox.Module`][]s.
"""
| equinox-main | equinox/nn/composed.py |
import math
from typing import Optional, TypeVar
import jax
import jax.random as jrandom
from ..custom_types import Array
from ..module import Module, static_field
class Linear(Module):
"""Performs a linear transformation."""
weight: Array
bias: Optional[Array]
in_features: int = static_field()
out_features: int = static_field()
use_bias: bool = static_field()
def __init__(
self,
in_features: int,
out_features: int,
use_bias: bool = True,
*,
key: "jax.random.PRNGKey"
):
"""**Arguments:**
- `in_features`: The input size.
- `out_features`: The output size.
- `use_bias`: Whether to add on a bias as well.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
super().__init__()
wkey, bkey = jrandom.split(key, 2)
lim = 1 / math.sqrt(in_features)
self.weight = jrandom.uniform(
wkey, (out_features, in_features), minval=-lim, maxval=lim
)
if use_bias:
self.bias = jrandom.uniform(bkey, (out_features,), minval=-lim, maxval=lim)
else:
self.bias = None
self.in_features = in_features
self.out_features = out_features
self.use_bias = use_bias
def __call__(
self, x: Array, *, key: Optional["jax.random.PRNGKey"] = None
) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(in_features,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(out_features,)`
"""
x = self.weight @ x
if self.bias is not None:
x = x + self.bias
return x
_T = TypeVar("T")
class Identity(Module):
"""Identity operation that does nothing. Sometimes useful as a placeholder for
another Module.
"""
def __init__(self, *args, **kwargs):
"""Consumes arbitrary `*args` and `**kwargs` but ignores them."""
# Ignores args and kwargs
super().__init__()
def __call__(self, x: _T, *, key: Optional["jax.random.PRNGKey"] = None) -> _T:
"""**Arguments:**
- `x`: The input, of any type.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
The input, unchanged.
"""
return x
| equinox-main | equinox/nn/linear.py |
from .composed import MLP, Sequential
from .conv import Conv, Conv1d, Conv2d, Conv3d
from .dropout import Dropout
from .linear import Identity, Linear
from .rnn import GRUCell, LSTMCell
| equinox-main | equinox/nn/__init__.py |
from typing import Optional
import jax
import jax.numpy as jnp
import jax.random as jrandom
from ..custom_types import Array
from ..module import Module
class Dropout(Module):
"""Applies dropout."""
# Not static_fields as it makes sense to want to modify them via equinox.tree_at.
p: float = 0.5
deterministic: bool = False
def __call__(
self,
x: Array,
*,
key: Optional["jax.random.PRNGKey"] = None,
deterministic: Optional[bool] = None
) -> Array:
"""**Arguments:**
- `x`: An any-dimensional JAX array to dropout.
- `key`: A `jax.random.PRNGKey` used to provide randomness for calculating
which elements to dropout. (Keyword only argument.)
- `deterministic`: As per [`equinox.nn.Dropout.__init__`][]. If `True` or
`False` then it will take priority over `self.deterministic`. If `None`
then the value from `self.deterministic` will be used.
"""
if deterministic is None:
deterministic = self.deterministic
if deterministic:
return x
elif key is None:
raise RuntimeError(
"Dropout requires a key when running in non-deterministic mode."
)
else:
q = 1 - self.p
mask = jrandom.bernoulli(key, q, x.shape)
return jnp.where(mask, x / q, 0)
Dropout.__init__.__doc__ = """**Arguments:**
- `p`: The fraction of entries to set to zero. (On average.)
- `deterministic`: Whether to actually apply dropout at all. If `True` then dropout
is *not* applied. If `False` then dropout is applied.
!!! info
The `deterministic` flag is provided as it is common to only apply dropout
during training, but not to apply it during inference. If you want to change this
flag between training and inference, then you can either:
- Override it with the `__call__`-time `deterministic` flag, see below.
- Modify the `deterministic` flag directly -- possible to do because `Dropout` is
just a PyTree. For example this sets all `deterministic` flags to `True`:
```python
model = ... # some model featuring a Dropout layer somewhere
is_dropout = lambda x: isinstance(x, Dropout)
def find_deterministic(m):
return tuple(d.deterministic
for d in jax.tree_flatten(m, is_leaf=is_dropout)[0]
if is_dropout(d))
model = eqx.tree_at(find_deterministic, model, replace_fn=lambda _: True)
```
"""
| equinox-main | equinox/nn/dropout.py |
import collections
from itertools import repeat
from typing import Any, Optional, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
from jax.lax import conv_general_dilated
from ..custom_types import Array
from ..module import Module, static_field
def _ntuple(n: int) -> callable:
def parse(x: Any) -> tuple:
if isinstance(x, collections.abc.Iterable):
if len(x) == n:
return tuple(x)
else:
raise ValueError(
f"Length of {x} (length = {len(x)}) is not equal to {n}"
)
return tuple(repeat(x, n))
return parse
class Conv(Module):
"""General N-dimensional convolution."""
num_spatial_dims: int = static_field()
weight: Array
bias: Optional[Array]
in_channels: int = static_field()
out_channels: int = static_field()
kernel_size: Tuple[int] = static_field()
stride: Tuple[int] = static_field()
padding: Tuple[int] = static_field()
dilation: Tuple[int] = static_field()
use_bias: bool = static_field()
def __init__(
self,
num_spatial_dims: int,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence[int]],
stride: Union[int, Sequence[int]] = 1,
padding: Union[int, Sequence[int]] = 0,
dilation: Union[int, Sequence[int]] = 1,
use_bias: bool = True,
*,
key: "jax.random.PRNGKey",
**kwargs,
):
"""**Arguments:**
- `num_spatial_dims`: The number of spatial dimensions. For example traditional
convolutions for image processing have this set to `2`.
- `in_channels`: The number of input channels.
- `out_channels`: The number of output channels.
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each spatial
dimension. The same amount of padding is applied both before and after.
- `dilation`: The dilation of the convolution.
- `use_bias`: Whether to add on a bias after the convolution.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
!!! info
All of `kernel_size`, `stride`, `padding`, `dilation` can be either an
integer or a sequence of integers. If they are a sequence then the sequence
should be of length equal to `num_spatial_dims`, and specify the value of
each property down each spatial dimension in turn.. If they are an integer
then the same kernel size / stride / padding / dilation will be used along
every spatial dimension.
"""
super().__init__(**kwargs)
self.num_spatial_dims = num_spatial_dims
parse = _ntuple(self.num_spatial_dims)
wkey, bkey = jrandom.split(key, 2)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = parse(kernel_size)
self.use_bias = use_bias
lim = 1 / np.sqrt(self.in_channels * np.prod(self.kernel_size))
self.weight = jrandom.uniform(
wkey,
(
self.out_channels,
self.in_channels,
)
+ self.kernel_size,
minval=-lim,
maxval=lim,
)
if self.use_bias:
self.bias = jrandom.uniform(
bkey,
(self.out_channels,) + (1,) * self.num_spatial_dims,
minval=-lim,
maxval=lim,
)
else:
self.bias = None
self.stride = parse(stride)
if isinstance(padding, int):
self.padding = tuple(
(padding, padding) for _ in range(self.num_spatial_dims)
)
elif isinstance(padding, Sequence) and len(padding) == self.num_spatial_dims:
self.padding = tuple((p, p) for p in padding)
else:
raise ValueError(
"`padding` must either be an int or tuple of length "
f"{self.num_spatial_dims}."
)
self.dilation = parse(dilation)
def __call__(
self, x: Array, *, key: Optional["jax.random.PRNGKey"] = None
) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape `(in_channels, dim_1, ..., dim_N)`, where
`N = num_spatial_dims`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(out_channels, new_dim_1, ..., new_dim_N)`.
"""
unbatched_rank = self.num_spatial_dims + 1
if x.ndim != unbatched_rank:
raise ValueError(
f"Input to `Conv` needs to have rank {unbatched_rank},",
f" but input has shape {x.shape}.",
)
x = jnp.expand_dims(x, axis=0)
x = conv_general_dilated(
lhs=x,
rhs=self.weight,
window_strides=self.stride,
padding=self.padding,
rhs_dilation=self.dilation,
)
if self.use_bias:
x += jnp.broadcast_to(self.bias, x.shape)
x = jnp.squeeze(x, axis=0)
return x
class Conv1d(Conv):
"""As [`equinox.nn.Conv`][] with `num_spatial_dims=1`."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
use_bias=True,
*,
key,
**kwargs,
):
super().__init__(
num_spatial_dims=1,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
use_bias=use_bias,
key=key,
**kwargs,
)
class Conv2d(Conv):
"""As [`equinox.nn.Conv`][] with `num_spatial_dims=2`."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
use_bias=True,
*,
key,
**kwargs,
):
super().__init__(
num_spatial_dims=2,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
use_bias=use_bias,
key=key,
**kwargs,
)
class Conv3d(Conv):
"""As [`equinox.nn.Conv`][] with `num_spatial_dims=3`."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=(1, 1, 1),
padding=(0, 0, 0),
dilation=(1, 1, 1),
use_bias=True,
*,
key,
**kwargs,
):
super().__init__(
num_spatial_dims=3,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
use_bias=use_bias,
key=key,
**kwargs,
)
| equinox-main | equinox/nn/conv.py |
import math
import warnings
from typing import Optional
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from ..custom_types import Array
from ..module import Module, static_field
class GRUCell(Module):
"""A single step of a Gated Recurrent Unit (GRU).
!!! example
This is often used by wrapping it into a `jax.lax.scan`. For example:
```python
class Model(Module):
cell: GRUCell
def __init__(self, ...):
self.cell = GRUCell(...)
def __call__(self, xs):
scan_fn = lambda state, input: (cell(input, state), None)
init_state = jnp.zeros(self.cell.hidden_size)
final_state, _ = jax.lax.scan(scan_fn init_state, xs)
return final_state
```
"""
weight_ih: Array
weight_hh: Array
bias: Optional[Array]
bias_n: Optional[Array]
input_size: int = static_field()
hidden_size: int = static_field()
use_bias: bool = static_field()
def __init__(
self,
input_size: int,
hidden_size: int,
use_bias: bool = True,
bias=None,
*,
key: Optional["jax.random.PRNGKey"],
**kwargs
):
"""**Arguments:**
- `input_size`: The dimensionality of the input vector at each time step.
- `hidden_size`: The dimensionality of the hidden state passed along between
time steps.
- `use_bias`: Whether to add on a bias after each update.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
super().__init__(**kwargs)
if bias is not None:
warnings.warn("`bias` is deprecated in favour of `use_bias`.")
use_bias = bias
ihkey, hhkey, bkey, bkey2 = jrandom.split(key, 4)
lim = math.sqrt(1 / hidden_size)
self.weight_ih = jrandom.uniform(
ihkey, (3 * hidden_size, input_size), minval=-lim, maxval=lim
)
self.weight_hh = jrandom.uniform(
hhkey, (3 * hidden_size, hidden_size), minval=-lim, maxval=lim
)
if use_bias:
self.bias = jrandom.uniform(
bkey, (3 * hidden_size,), minval=-lim, maxval=lim
)
self.bias_n = jrandom.uniform(
bkey2, (hidden_size,), minval=-lim, maxval=lim
)
else:
self.bias = None
self.bias_n = None
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
def __call__(
self, input: Array, hidden: Array, *, key: Optional["jax.random.PRNGKey"] = None
):
"""**Arguments:**
- `input`: The input, which should be a JAX array of shape `(input_size,)`.
- `hidden`: The hidden state, which should be a JAX array of shape
`(hidden_size,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
The updated hidden state, which is a JAX array of shape `(hidden_size,)`.
"""
if self.bias is None:
bias = 0
bias_n = 0
else:
bias = self.bias
bias_n = self.bias_n
igates = jnp.split(self.weight_ih @ input + bias, 3)
hgates = jnp.split(self.weight_hh @ hidden, 3)
reset = jnn.sigmoid(igates[0] + hgates[0])
inp = jnn.sigmoid(igates[1] + hgates[1])
new = jnn.tanh(igates[2] + reset * (hgates[2] + bias_n))
return new + inp * (hidden - new)
class LSTMCell(Module):
"""A single step of a Long-Short Term Memory unit (LSTM).
!!! example
This is often used by wrapping it into a `jax.lax.scan`. For example:
```python
class Model(Module):
cell: LSTMCell
def __init__(self, ...):
self.cell = LSTMCell(...)
def __call__(self, xs):
scan_fn = lambda state, input: (cell(input, state), None)
init_state = (jnp.zeros(self.cell.hidden_size),
jnp.zeros(self.cell.hidden_size))
final_state, _ = jax.lax.scan(scan_fn init_state, xs)
return final_state
```
"""
weight_ih: Array
weight_hh: Array
bias: Optional[Array]
input_size: int = static_field()
hidden_size: int = static_field()
use_bias: bool = static_field()
def __init__(
self,
input_size: int,
hidden_size: int,
use_bias: bool = True,
bias=None,
*,
key: "jax.random.PRNGKey",
**kwargs
):
"""**Arguments:**
- `input_size`: The dimensionality of the input vector at each time step.
- `hidden_size`: The dimensionality of the hidden state passed along between
time steps.
- `use_bias`: Whether to add on a bias after each update.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
super().__init__(**kwargs)
if bias is not None:
warnings.warn("`bias` is deprecated in favour of `use_bias`.")
use_bias = bias
ihkey, hhkey, bkey = jrandom.split(key, 3)
lim = math.sqrt(1 / hidden_size)
self.weight_ih = jrandom.uniform(
ihkey, (4 * hidden_size, input_size), minval=-lim, maxval=lim
)
self.weight_hh = jrandom.uniform(
hhkey, (4 * hidden_size, hidden_size), minval=-lim, maxval=lim
)
if bias is None:
self.bias = jrandom.uniform(
bkey, (4 * hidden_size,), minval=-lim, maxval=lim
)
else:
self.bias = None
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
def __call__(self, input, hidden, *, key=None):
"""**Arguments:**
- `input`: The input, which should be a JAX array of shape `(input_size,)`.
- `hidden`: The hidden state, which should be a 2-tuple of JAX arrays, each of
shape `(hidden_size,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
The updated hidden state, which is a 2-tuple of JAX arrays, each of shape
`(hidden_size,)`.
"""
h, c = hidden
lin = self.weight_ih @ input + self.weight_hh @ h
if self.bias is not None:
lin = lin + self.bias
i, f, g, o = jnp.split(lin, 4)
i = jnn.sigmoid(i)
f = jnn.sigmoid(f)
g = jnn.tanh(g)
o = jnn.sigmoid(o)
c = f * c + i * g
h = o * jnn.tanh(c)
return (h, c)
| equinox-main | equinox/nn/rnn.py |
import random
import jax.random as jrandom
import pytest
@pytest.fixture()
def getkey():
def _getkey():
# Not sure what the maximum actually is but this will do
return jrandom.PRNGKey(random.randint(0, 2 ** 31 - 1))
return _getkey
| equinox-main | tests/conftest.py |
import jax.numpy as jnp
import jax.random as jrandom
import pytest
import equinox as eqx
def test_tree_at_replace(getkey):
key = getkey()
key1, key2 = jrandom.split(key, 2)
pytree = [1, 2, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key1)]
true_pytree1 = [1, 2, {"a": "hi"}, eqx.nn.Linear(1, 2, key=key1)]
true_pytree2 = [1, 2, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key2)]
where1 = lambda tree: tree[2]["a"]
where2 = lambda tree: (tree[3].weight, tree[3].bias)
weight2 = true_pytree2[3].weight
bias2 = true_pytree2[3].bias
pytree1 = eqx.tree_at(where1, pytree, replace="hi")
pytree2 = eqx.tree_at(where2, pytree, replace=(weight2, bias2))
assert pytree1[:-2] == true_pytree1[:-2]
assert pytree2[:-2] == true_pytree2[:-2]
assert jnp.all(pytree1[-2]["a"] == true_pytree1[-2]["a"])
assert jnp.all(pytree2[-2]["a"] == true_pytree2[-2]["a"])
assert jnp.all(pytree1[-1].weight == true_pytree1[-1].weight)
assert jnp.all(pytree1[-1].bias == true_pytree1[-1].bias)
assert jnp.all(pytree2[-1].weight == true_pytree2[-1].weight)
assert jnp.all(pytree2[-1].bias == true_pytree2[-1].bias)
true_pytree3 = ["hi", 2, {"a": 4}, eqx.nn.Linear(1, 2, key=key1)]
where3 = lambda tree: (tree[0], tree[2]["a"])
pytree3 = eqx.tree_at(where3, pytree, replace=("hi", 4))
assert pytree3[:-1] == true_pytree3[:-1]
assert jnp.all(pytree3[-1].weight == true_pytree3[-1].weight)
assert jnp.all(pytree3[-1].bias == true_pytree3[-1].bias)
with pytest.raises(TypeError):
eqx.tree_at(where3, pytree, replace=4)
with pytest.raises(ValueError):
eqx.tree_at(where3, pytree, replace=(3, 4, 5))
def test_tree_at_replace_fn(getkey):
key = getkey()
pytree = [1, 2, 3, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key)]
def replace_fn(x):
if isinstance(x, int):
return "found an int"
else:
return x
true_pytree1 = [
"found an int",
"found an int",
3,
{"a": jnp.array([1.0, 2.0])},
eqx.nn.Linear(1, 2, key=key),
]
where = lambda tree: (tree[0], tree[1])
pytree1 = eqx.tree_at(where, pytree, replace_fn=replace_fn)
assert pytree1[:3] == true_pytree1[:3]
assert jnp.all(pytree1[3]["a"] == true_pytree1[3]["a"])
assert jnp.all(pytree1[-1].weight == true_pytree1[-1].weight)
assert jnp.all(pytree1[-1].bias == true_pytree1[-1].bias)
with pytest.raises(ValueError):
eqx.tree_at(where, pytree, replace=(0, 1), replace_fn=replace_fn)
def test_tree_equal():
key1 = jrandom.PRNGKey(0)
key2 = jrandom.PRNGKey(1)
# Not using getkey as ever-so-in-principle two random keys could produce the same weights
# (like that's ever going to happen)
pytree1 = [1, 2, 3, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key1)]
pytree2 = [1, 2, 3, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key1)]
pytree3 = [1, 2, 3, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key2)]
pytree4 = [1, 2, 3, {"a": jnp.array([1.0, 4.0])}, eqx.nn.Linear(1, 2, key=key1)]
pytree5 = [1, 2, 4, {"a": jnp.array([1.0, 2.0])}, eqx.nn.Linear(1, 2, key=key1)]
assert eqx.tree_equal(pytree1, pytree1, pytree1)
assert eqx.tree_equal(pytree1, pytree2)
assert not eqx.tree_equal(pytree1, pytree3)
assert not eqx.tree_equal(pytree1, pytree4)
assert not eqx.tree_equal(pytree1, pytree5)
| equinox-main | tests/test_tree.py |
from typing import Any
import jax
import pytest
import equinox as eqx
def test_module_not_enough_attributes():
class MyModule1(eqx.Module):
weight: Any
with pytest.raises(TypeError):
MyModule1()
class MyModule2(eqx.Module):
weight: Any
def __init__(self):
pass
with pytest.raises(ValueError):
MyModule2()
with pytest.raises(TypeError):
MyModule2(1)
def test_module_too_many_attributes():
class MyModule1(eqx.Module):
weight: Any
with pytest.raises(TypeError):
MyModule1(1, 2)
class MyModule2(eqx.Module):
weight: Any
def __init__(self, weight):
self.weight = weight
self.something_else = True
with pytest.raises(AttributeError):
MyModule2(1)
def test_module_setattr_after_init():
class MyModule(eqx.Module):
weight: Any
m = MyModule(1)
with pytest.raises(AttributeError):
m.asdf = True
def test_wrong_attribute():
class MyModule(eqx.Module):
weight: Any
def __init__(self, value):
self.not_weight = value
with pytest.raises(AttributeError):
MyModule(1)
# The main part of this test is to check that __init__ works correctly.
def test_inheritance():
# no custom init / no custom init
class MyModule(eqx.Module):
weight: Any
class MyModule2(MyModule):
weight2: Any
m = MyModule2(1, 2)
assert m.weight == 1
assert m.weight2 == 2
m = MyModule2(1, weight2=2)
assert m.weight == 1
assert m.weight2 == 2
m = MyModule2(weight=1, weight2=2)
assert m.weight == 1
assert m.weight2 == 2
with pytest.raises(TypeError):
m = MyModule2(2, weight=2)
# not custom init / custom init
class MyModule3(MyModule):
weight3: Any
def __init__(self, *, weight3, **kwargs):
self.weight3 = weight3
super().__init__(**kwargs)
m = MyModule3(weight=1, weight3=3)
assert m.weight == 1
assert m.weight3 == 3
# custom init / no custom init
class MyModule4(eqx.Module):
weight4: Any
def __init__(self, value4, **kwargs):
self.weight4 = value4
super().__init__(**kwargs)
class MyModule5(MyModule4):
weight5: Any
with pytest.raises(TypeError):
m = MyModule5(value4=1, weight5=2)
class MyModule6(MyModule4):
pass
m = MyModule6(value4=1)
assert m.weight4 == 1
# custom init / custom init
class MyModule7(MyModule4):
weight7: Any
def __init__(self, value7, **kwargs):
self.weight7 = value7
super().__init__(**kwargs)
m = MyModule7(value4=1, value7=2)
assert m.weight4 == 1
assert m.weight7 == 2
def test_static_field():
class MyModule(eqx.Module):
field1: int
field2: int = eqx.static_field()
field3: int = eqx.static_field(default=3)
m = MyModule(1, 2)
flat, treedef = jax.tree_flatten(m)
assert len(flat) == 1
assert flat[0] == 1
rm = jax.tree_unflatten(treedef, flat)
assert rm.field1 == 1
assert rm.field2 == 2
assert rm.field3 == 3
def test_wrap_method():
class MyModule(eqx.Module):
a: int
def f(self, b):
return self.a + b
m = MyModule(13)
assert isinstance(m.f, jax.tree_util.Partial)
flat, treedef = jax.tree_flatten(m.f)
assert len(flat) == 1
assert flat[0] == 13
assert jax.tree_unflatten(treedef, flat)(2) == 15
def test_init_subclass():
ran = []
class MyModule(eqx.Module):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
ran.append(True)
class AnotherModule(MyModule):
pass
assert ran == [True]
| equinox-main | tests/test_module.py |
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import pytest
import equinox as eqx
def _eq(a, b):
return (type(a) is type(b)) and (a == b)
def test_jitf_filter_fn(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (3,))
c = jrandom.normal(getkey(), (1, 4))
general_tree = [
1,
True,
object(),
{"a": a, "tuple": (2.0, b)},
c,
eqx.nn.MLP(2, 2, 2, 2, key=getkey()),
]
array_tree = [{"a": a, "b": b}, (c,)]
_mlp = jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, general_tree[-1])
@ft.partial(eqx.jitf, filter_fn=lambda _: True)
def f(x):
return x
assert jnp.all(a == f(a))
f1 = f(array_tree)
assert jnp.all(f1[0]["a"] == a)
assert jnp.all(f1[0]["b"] == b)
assert jnp.all(f1[1][0] == c)
with pytest.raises(TypeError):
f(general_tree)
@ft.partial(eqx.jitf, filter_fn=eqx.is_inexact_array)
def g(x):
return jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, x)
assert jnp.all(a == g(a))
g1 = g(array_tree)
assert jnp.all(g1[0]["a"] == a)
assert jnp.all(g1[0]["b"] == b)
assert jnp.all(g1[1][0] == c)
g2 = g(general_tree)
assert _eq(g2[0], jnp.array(1))
assert _eq(g2[1], jnp.array(True))
assert _eq(g2[2], None)
assert jnp.all(g2[3]["a"] == a)
assert _eq(g2[3]["tuple"][0], jnp.array(2.0))
assert jnp.all(g2[3]["tuple"][1] == b)
assert jnp.all(g2[4] == c)
assert _eq(g2[5], _mlp)
@ft.partial(eqx.jitf, filter_fn=eqx.is_array_like)
def h(x):
return jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, x)
assert jnp.all(a == h(a))
h1 = h(array_tree)
assert jnp.all(h1[0]["a"] == a)
assert jnp.all(h1[0]["b"] == b)
assert jnp.all(h1[1][0] == c)
h2 = h(general_tree)
assert _eq(h2[0], jnp.array(1))
assert _eq(h2[1], jnp.array(True))
assert _eq(h2[2], None)
assert jnp.all(h2[3]["a"] == a)
assert _eq(g2[3]["tuple"][0], jnp.array(2.0))
assert jnp.all(g2[3]["tuple"][1] == b)
assert jnp.all(g2[4] == c)
assert _eq(g2[5], _mlp)
@ft.partial(eqx.jitf, static_argnums=1, filter_fn=eqx.is_array_like)
def i(x, y, z):
return x * y * z
assert i(1, 1, 1) == 1
@ft.partial(eqx.jitf, static_argnums=(1, 2), filter_fn=eqx.is_array_like)
def j(x, y, z):
return x * y * z
assert j(1, 1, 1) == 1
def test_jitf_filter_tree(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (3,))
c = jrandom.normal(getkey(), (1, 4))
general_tree = [
1,
True,
object(),
{"a": a, "tuple": (2.0, b)},
c,
eqx.nn.MLP(2, 2, 2, 2, key=getkey()),
]
_mlp = jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, general_tree[-1])
_filter_mlp = jax.tree_map(eqx.is_inexact_array, general_tree[-1])
@ft.partial(
eqx.jitf,
filter_tree=[
True,
True,
False,
{"a": True, "tuple": (False, True)},
True,
_filter_mlp,
],
)
def f(x):
return jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, x)
f1 = f(general_tree)
assert _eq(f1[0], jnp.array(1))
assert _eq(f1[1], jnp.array(True))
assert _eq(f1[2], None)
assert jnp.all(f1[3]["a"] == a)
assert _eq(f1[3]["tuple"][0], jnp.array(2.0))
assert jnp.all(f1[3]["tuple"][1] == b)
assert jnp.all(f1[4] == c)
assert _eq(f1[5], _mlp)
@ft.partial(eqx.jitf, static_argnums=1, filter_tree=True)
def g(x, y):
return x * y
assert g(1, 1) == 1
@ft.partial(eqx.jitf, static_argnums=1, filter_tree=[True])
def g2(x, y):
return x * y
_g = g2([1], 1)
assert isinstance(_g, list)
assert len(_g) == 1
assert _g[0] == 1
with pytest.raises(ValueError):
g2(1, 1) # filter tree doesn't match up
@ft.partial(eqx.jitf, static_argnums=1, filter_tree=[True, True])
def h(x, y, z):
return x * y * z
assert h(1, 1, 1) == 1
with pytest.raises(ValueError):
h([1], 1, 1) # filter tree doesn't match up
def test_num_traces():
num_traces = 0
@ft.partial(eqx.jitf, filter_fn=lambda _: True)
def f(x):
nonlocal num_traces
num_traces += 1
f(jnp.zeros(2))
f(jnp.zeros(2))
assert num_traces == 1
f(jnp.zeros(3))
f(jnp.zeros(3))
assert num_traces == 2
f([jnp.zeros(2)])
f([jnp.zeros(2), jnp.zeros(3)])
f([jnp.zeros(2), True])
assert num_traces == 5
num_traces = 0
@ft.partial(eqx.jitf, static_argnums=1, filter_fn=eqx.is_array_like)
def g(x, y):
nonlocal num_traces
num_traces += 1
g(jnp.zeros(2), True)
g(jnp.zeros(2), False)
assert num_traces == 2
num_traces = 0
@ft.partial(
eqx.jitf, static_argnums=(0, 2), filter_tree=[{"a": True, "b": False}, False]
)
def h(x, y, z, w):
nonlocal num_traces
num_traces += 1
h(True, {"a": 1, "b": 1}, True, True)
h(False, {"a": 1, "b": 1}, True, True)
h(True, {"a": 1, "b": 0}, True, True)
h(True, {"a": 1, "b": 1}, True, 2)
h(True, {"a": 1, "b": 1}, 5, True)
assert num_traces == 5
h(True, {"a": 2, "b": 1}, True, True)
assert num_traces == 5
| equinox-main | tests/test_jitf.py |
import jax.numpy as jnp
import jax.random as jrandom
import pytest
import equinox as eqx
def test_custom_init():
with pytest.raises(TypeError):
eqx.nn.Linear(1, 1, 1) # Matches the number of dataclass fields Linear has
with pytest.raises(TypeError):
eqx.nn.Linear(3, 4)
with pytest.raises(TypeError):
eqx.nn.Linear(3)
with pytest.raises(TypeError):
eqx.nn.Linear(out_features=4)
def test_linear(getkey):
# Positional arguments
linear = eqx.nn.Linear(3, 4, key=getkey())
x = jrandom.normal(getkey(), (3,))
assert linear(x).shape == (4,)
# Some keyword arguments
linear = eqx.nn.Linear(3, out_features=4, key=getkey())
x = jrandom.normal(getkey(), (3,))
assert linear(x).shape == (4,)
# All keyword arguments
linear = eqx.nn.Linear(in_features=3, out_features=4, key=getkey())
x = jrandom.normal(getkey(), (3,))
assert linear(x).shape == (4,)
def test_identity(getkey):
identity1 = eqx.nn.Identity()
identity2 = eqx.nn.Identity(1)
identity3 = eqx.nn.Identity(2, hi=True)
identity4 = eqx.nn.Identity(eqx.nn.Identity())
assert identity1 == identity2
assert identity1 == identity3
assert identity1 == identity4
x = jrandom.normal(getkey(), (3, 5, 9))
assert jnp.all(x == identity1(x))
assert jnp.all(x == identity2(x))
assert jnp.all(x == identity3(x))
assert jnp.all(x == identity4(x))
def test_dropout(getkey):
dropout = eqx.nn.Dropout()
x = jrandom.normal(getkey(), (3, 4, 5))
y = dropout(x, key=getkey())
assert jnp.all((y == 0) | (y == x / 0.5))
z1 = dropout(x, key=getkey(), deterministic=True)
z2 = dropout(x, deterministic=True)
assert jnp.all(x == z1)
assert jnp.all(x == z2)
dropout2 = eqx.nn.Dropout(deterministic=True)
assert jnp.all(x == dropout2(x))
dropout3 = eqx.tree_at(lambda d: d.deterministic, dropout2, replace=False)
assert jnp.any(x != dropout3(x, key=jrandom.PRNGKey(0)))
def test_gru_cell(getkey):
gru = eqx.nn.GRUCell(2, 8, key=getkey())
h = jrandom.normal(getkey(), (8,))
x = jrandom.normal(getkey(), (5, 2))
for xi in x:
h = gru(xi, h)
assert h.shape == (8,)
def test_lstm_cell(getkey):
gru = eqx.nn.LSTMCell(2, 8, key=getkey())
h = jrandom.normal(getkey(), (8,)), jrandom.normal(getkey(), (8,))
x = jrandom.normal(getkey(), (5, 2))
for xi in x:
h = gru(xi, h)
h_, c_ = h
assert h_.shape == (8,)
assert c_.shape == (8,)
def test_sequential(getkey):
seq = eqx.nn.Sequential(
[
eqx.nn.Linear(2, 4, key=getkey()),
eqx.nn.Linear(4, 1, key=getkey()),
eqx.nn.Linear(1, 3, key=getkey()),
]
)
x = jrandom.normal(getkey(), (2,))
assert seq(x).shape == (3,)
def test_mlp(getkey):
mlp = eqx.nn.MLP(2, 3, 8, 2, key=getkey())
x = jrandom.normal(getkey(), (2,))
assert mlp(x).shape == (3,)
mlp = eqx.nn.MLP(in_size=2, out_size=3, width_size=8, depth=2, key=getkey())
x = jrandom.normal(getkey(), (2,))
assert mlp(x).shape == (3,)
def test_conv1d(getkey):
# Positional arguments
conv = eqx.nn.Conv1d(1, 3, 3, key=getkey())
x = jrandom.normal(getkey(), (1, 32))
assert conv(x).shape == (3, 30)
# Some keyword arguments
conv = eqx.nn.Conv1d(1, out_channels=3, kernel_size=(3,), key=getkey())
x = jrandom.normal(getkey(), (1, 32))
assert conv(x).shape == (3, 30)
# All keyword arguments
conv = eqx.nn.Conv1d(
in_channels=1,
out_channels=3,
kernel_size=(3,),
padding=1,
use_bias=False,
key=getkey(),
)
x = jrandom.normal(getkey(), (1, 32))
assert conv(x).shape == (3, 32)
# Test strides
conv = eqx.nn.Conv1d(
in_channels=3,
out_channels=1,
kernel_size=(3,),
stride=2,
padding=1,
use_bias=True,
key=getkey(),
)
x = jrandom.normal(getkey(), (3, 32))
assert conv(x).shape == (1, 16)
# Test value matches
conv = eqx.nn.Conv1d(1, 3, kernel_size=3, padding=1, key=getkey())
new_weight = jnp.arange(9).reshape(3, 1, 3)
new_bias = jnp.array([1, 2, 3]).reshape(3, 1)
data = jnp.arange(-3, 3).reshape(1, -1)
assert new_weight.shape == conv.weight.shape
assert new_bias.shape == conv.bias.shape
conv = eqx.tree_at(lambda x: (x.weight, x.bias), conv, (new_weight, new_bias))
answer = jnp.array(
[-6, -3, 0, 3, 6, 3, -20, -20, -8, 4, 16, 13, -34, -37, -16, 5, 26, 23]
).reshape(3, 6)
assert jnp.allclose(conv(data), answer)
def test_conv2d(getkey):
# Positional arguments
conv = eqx.nn.Conv2d(1, 3, 3, key=getkey())
x = jrandom.normal(getkey(), (1, 32, 32))
assert conv(x).shape == (3, 30, 30)
# Some keyword arguments
conv = eqx.nn.Conv2d(1, out_channels=3, kernel_size=(3, 3), key=getkey())
x = jrandom.normal(getkey(), (1, 32, 32))
assert conv(x).shape == (3, 30, 30)
# All keyword arguments
conv = eqx.nn.Conv2d(
in_channels=1,
out_channels=3,
kernel_size=(3, 3),
padding=1,
use_bias=False,
key=getkey(),
)
x = jrandom.normal(getkey(), (1, 32, 32))
assert conv(x).shape == (3, 32, 32)
# Test strides
conv = eqx.nn.Conv2d(
in_channels=3,
out_channels=1,
kernel_size=(3, 3),
stride=2,
padding=1,
use_bias=True,
key=getkey(),
)
x = jrandom.normal(getkey(), (3, 32, 32))
assert conv(x).shape == (1, 16, 16)
# Test value matches
conv = eqx.nn.Conv2d(1, 1, kernel_size=3, padding=1, key=getkey())
new_weight = jnp.arange(9).reshape(1, 1, 3, 3)
new_bias = jnp.array([1]).reshape(1, 1, 1)
data = jnp.arange(-4, 5).reshape(1, 3, 3)
assert new_weight.shape == conv.weight.shape
assert new_bias.shape == conv.bias.shape
conv = eqx.tree_at(lambda x: (x.weight, x.bias), conv, (new_weight, new_bias))
answer = jnp.array([-37, -31, -9, 25, 61, 49, 23, 41, 27]).reshape(1, 3, 3)
assert jnp.allclose(conv(data), answer)
def test_conv3d(getkey):
# Positional arguments
conv = eqx.nn.Conv3d(1, 3, 3, key=getkey())
x = jrandom.normal(getkey(), (1, 3, 32, 32))
assert conv(x).shape == (3, 1, 30, 30)
# Some keyword arguments
conv = eqx.nn.Conv3d(1, out_channels=3, kernel_size=(3, 3, 3), key=getkey())
x = jrandom.normal(getkey(), (1, 3, 32, 32))
assert conv(x).shape == (3, 1, 30, 30)
# All keyword arguments
conv = eqx.nn.Conv3d(
in_channels=1,
out_channels=3,
kernel_size=(3, 3, 3),
padding=1,
use_bias=False,
key=getkey(),
)
x = jrandom.normal(getkey(), (1, 3, 32, 32))
assert conv(x).shape == (3, 3, 32, 32)
# Test strides
conv = eqx.nn.Conv3d(
in_channels=3,
out_channels=1,
kernel_size=(3, 3, 3),
stride=2,
padding=1,
use_bias=True,
key=getkey(),
)
x = jrandom.normal(getkey(), (3, 3, 32, 32))
assert conv(x).shape == (1, 2, 16, 16)
# Test value matches
conv = eqx.nn.Conv3d(1, 1, kernel_size=(2, 1, 1), padding=(1, 0, 0), key=getkey())
new_weight = jnp.arange(2).reshape(1, 1, 2, 1, 1)
new_bias = jnp.array([1]).reshape(1, 1, 1, 1)
data = jnp.arange(-4, 4).reshape(1, 2, 2, 2)
assert new_weight.shape == conv.weight.shape
assert new_bias.shape == conv.bias.shape
conv = eqx.tree_at(lambda x: (x.weight, x.bias), conv, (new_weight, new_bias))
answer = jnp.array([-3, -2, -1, 0, 1, 2, 3, 4, 1, 1, 1, 1]).reshape(1, 3, 2, 2)
assert jnp.allclose(conv(data), answer)
| equinox-main | tests/test_nn.py |
import jax.numpy as jnp
import pytest
import equinox as eqx
def test_apply_updates1():
params = [jnp.array([5]), jnp.array([2])]
grads = [-1, 1]
new_params = eqx.apply_updates(params, grads)
assert new_params == [jnp.array([4]), jnp.array([3])]
def test_apply_updates2():
o = object()
params = [o, jnp.array(3.0), jnp.array(2.0)]
def f(p):
return p[1] + p[2]
grads = eqx.gradf(f, filter_fn=lambda x: x == 3)(params)
new_params = eqx.apply_updates(params, grads)
assert new_params == [o, jnp.array([4.0]), jnp.array([2.0])]
def test_apply_updates3():
o = object()
params = [o, jnp.array([2])]
grads = [0, 1]
with pytest.raises(TypeError):
eqx.apply_updates(params, grads)
| equinox-main | tests/test_update.py |
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
import pytest
import equinox as eqx
def test_gradf_filter_fn(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.gradf, filter_fn=lambda _: True)
def f(x):
return jnp.sum(x)
grad_f = f(a)
assert jnp.all(grad_f == 1)
@ft.partial(eqx.gradf, argnums=(0, 1), filter_fn=lambda _: True)
def g1(x, y):
return jnp.sum(x)
grad_g1a, grad_g1b = g1(a, b)
assert jnp.all(grad_g1a == 1)
assert jnp.all(grad_g1b == 0)
@ft.partial(eqx.gradf, argnums=(0, 1), filter_fn=lambda _: False)
def g2(x, y):
return jnp.sum(x)
grad_g2a, grad_g2b = g2(a, b)
assert grad_g2a is None
assert grad_g2b is None
@ft.partial(eqx.gradf, argnums=1, filter_fn=lambda _: True)
def h(x, y):
return jnp.sum(x + y)
grad_h1b = h(a, b)
assert jnp.all(grad_h1b == 1)
@ft.partial(eqx.gradf, argnums=(0, 1), filter_fn=lambda _: True)
def i(x, y):
return jnp.sum(x(y))
with pytest.raises(Exception):
i(a, b) # there's no way to take a gradient wrt a
with pytest.raises(Exception):
i(lambda v: v, b) # there's no way to take a gradient wrt the lambda
@ft.partial(eqx.gradf, filter_fn=eqx.is_inexact_array)
def j(x):
sum = 0.0
for arg in jax.tree_leaves(x):
if eqx.is_array_like(arg):
sum = sum + jnp.sum(arg)
return sum
ga, gb = j([a, b])
assert jnp.all(ga == 1)
assert jnp.all(gb == 1)
gtrue, ghi, gobject, ga = j([True, "hi", object(), a])
assert gtrue is None
assert ghi is None
assert gobject is None
assert jnp.all(ga == 1)
gtrue, gdict, (g5, g1), gnp = j(
[
True,
{"hi": eqx.nn.Linear(1, 1, key=getkey())},
(5, 1.0),
np.array([2.0, 3.0]),
]
)
assert gtrue is None
assert list(gdict.keys()) == ["hi"]
assert isinstance(gdict["hi"], eqx.nn.Linear)
assert jnp.all(gdict["hi"].weight == 1)
assert jnp.all(gdict["hi"].bias == 1)
assert g5 is None
assert g1 is None
assert gnp is None
@ft.partial(eqx.gradf, filter_fn=eqx.is_array_like)
def k(x):
sum = 0.0
for arg in jax.tree_leaves(x):
if eqx.is_array_like(arg):
sum = sum + jnp.sum(arg)
return sum
gx, gy = k([a, b])
assert jnp.all(ga == 1)
assert jnp.all(gb == 1)
ghi, gobject, ga = k(["hi", object(), a])
assert ghi is None
assert gobject is None
assert jnp.all(ga == 1)
gdict, (g1,), gnp = k(
[{"hi": eqx.nn.Linear(1, 1, key=getkey())}, (1.0,), np.array([2.0, 3.0])]
)
assert list(gdict.keys()) == ["hi"]
assert isinstance(gdict["hi"], eqx.nn.Linear)
assert jnp.all(gdict["hi"].weight == 1)
assert jnp.all(gdict["hi"].bias == 1)
assert g1 == 1
assert gnp.shape == (2,)
assert np.all(gnp == 1)
def test_gradf_filter_tree(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (1, 2))
c = jrandom.normal(getkey(), ())
@ft.partial(eqx.gradf, filter_tree=[True, False])
def f(x):
return jnp.sum(x[0]) + jnp.sum(x[1])
ga, gb = f([a, b])
assert jnp.all(ga == 1)
assert gb is None
@ft.partial(eqx.gradf, argnums=(0, 1), filter_tree=[True, False])
def g(x, y):
return jnp.sum(x) + jnp.sum(y)
ga, gb = g(a, b)
assert jnp.all(ga == 1)
assert gb is None
@ft.partial(eqx.gradf, argnums=0, filter_tree={"a": True, "b": False})
def h1(x, y):
return jnp.sum(x["a"]) * jnp.sum(x["b"]) * y
@ft.partial(eqx.gradf, argnums=1, filter_tree={"a": True, "b": False})
def h2(x, y):
return jnp.sum(y["a"]) * jnp.sum(y["b"]) * x
grad = h1({"a": a, "b": b}, c)
assert jnp.allclose(grad["a"], jnp.sum(b) * c)
assert grad["b"] is None
grad = h2(c, {"a": a, "b": b})
assert jnp.allclose(grad["a"], jnp.sum(b) * c)
assert grad["b"] is None
with pytest.raises(ValueError):
grad = h1(c, {"a": a, "b": b})
with pytest.raises(ValueError):
grad = h2({"a": a, "b": b}, c)
@ft.partial(eqx.gradf, argnums=(2, 0), filter_tree=(True,))
def i(x, y, z):
return jnp.sum(x) * jnp.sum(y) * jnp.sum(z)
with pytest.raises(IndexError):
i(a, b, c)
@ft.partial(eqx.gradf, argnums=(2, 0), filter_tree=(True, {"a": True, "b": False}))
def j(x, y, z):
return jnp.sum(x["a"]) * jnp.sum(x["b"]) * jnp.sum(y) * jnp.sum(z)
gradc, graddict = j({"a": a, "b": b}, 2.0, c)
assert jnp.allclose(gradc, jnp.sum(a) * jnp.sum(b) * 2)
assert jnp.allclose(graddict["a"], jnp.sum(b) * jnp.sum(c) * 2)
assert graddict["b"] is None
def test_both_filter():
with pytest.raises(ValueError):
@ft.partial(eqx.gradf, filter_tree=True, filter_fn=lambda _: True)
def f(x):
return x
def test_no_filter():
with pytest.raises(ValueError):
@eqx.gradf
def f(x):
return x
# TODO: more comprehensive tests on this.
def test_value_and_grad_f(getkey):
a = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.value_and_grad_f, filter_fn=eqx.is_inexact_array)
def f(x):
return jnp.sum(x)
val, grad = f(a)
assert val == jnp.sum(a)
assert jnp.all(grad == 1)
def test_aux(getkey):
a = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.gradf, has_aux=True, filter_fn=eqx.is_inexact_array)
def f(x):
return jnp.sum(x), "hi"
aux, grad = f(a)
assert aux == "hi"
assert jnp.all(grad == 1)
@ft.partial(eqx.value_and_grad_f, has_aux=True, filter_fn=eqx.is_inexact_array)
def f(x):
return jnp.sum(x), "hi"
(value, aux), grad = f(a)
assert value == jnp.sum(a)
assert aux == "hi"
assert jnp.all(grad == 1)
| equinox-main | tests/test_gradf.py |
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import pytest
import equinox as eqx
def _eq(a, b):
return (type(a) is type(b)) and (a == b)
def test_filter_jit1(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (3,))
c = jrandom.normal(getkey(), (1, 4))
general_tree = [
1,
True,
object(),
{"a": a, "tuple": (2.0, b)},
c,
eqx.nn.MLP(2, 2, 2, 2, key=getkey()),
]
array_tree = [{"a": a, "b": b}, (c,)]
_mlp = jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, general_tree[-1])
@ft.partial(eqx.filter_jit, filter_spec=lambda _: True)
def f(x):
return x
assert jnp.all(a == f(a))
f1 = f(array_tree)
assert jnp.all(f1[0]["a"] == a)
assert jnp.all(f1[0]["b"] == b)
assert jnp.all(f1[1][0] == c)
with pytest.raises(TypeError):
f(general_tree)
@ft.partial(eqx.filter_jit, filter_spec=eqx.is_inexact_array)
def g(x):
return jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, x)
assert jnp.all(a == g(a))
g1 = g(array_tree)
assert jnp.all(g1[0]["a"] == a)
assert jnp.all(g1[0]["b"] == b)
assert jnp.all(g1[1][0] == c)
g2 = g(general_tree)
assert _eq(g2[0], 1)
assert _eq(g2[1], True)
assert _eq(g2[2], None)
assert jnp.all(g2[3]["a"] == a)
assert _eq(g2[3]["tuple"][0], 2.0)
assert jnp.all(g2[3]["tuple"][1] == b)
assert jnp.all(g2[4] == c)
assert _eq(g2[5], _mlp)
@ft.partial(eqx.filter_jit, filter_spec=eqx.is_array_like)
def h(x):
return jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, x)
assert jnp.all(a == h(a))
h1 = h(array_tree)
assert jnp.all(h1[0]["a"] == a)
assert jnp.all(h1[0]["b"] == b)
assert jnp.all(h1[1][0] == c)
h2 = h(general_tree)
assert _eq(h2[0], jnp.array(1))
assert _eq(h2[1], jnp.array(True))
assert _eq(h2[2], None)
assert jnp.all(h2[3]["a"] == a)
assert _eq(h2[3]["tuple"][0], jnp.array(2.0))
assert jnp.all(h2[3]["tuple"][1] == b)
assert jnp.all(h2[4] == c)
assert _eq(h2[5], _mlp)
def test_filter_jit2(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (3,))
c = jrandom.normal(getkey(), (1, 4))
general_tree = [
1,
True,
object(),
{"a": a, "tuple": (2.0, b)},
c,
eqx.nn.MLP(2, 2, 2, 2, key=getkey()),
]
_mlp = jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, general_tree[-1])
@ft.partial(
eqx.filter_jit,
filter_spec=(
(
[
True,
True,
False,
{"a": True, "tuple": (False, True)},
True,
eqx.is_inexact_array,
],
),
{},
),
)
def f(x):
return jax.tree_map(lambda u: u if eqx.is_array_like(u) else None, x)
f1 = f(general_tree)
assert _eq(f1[0], jnp.array(1))
assert _eq(f1[1], jnp.array(True))
assert _eq(f1[2], None)
assert jnp.all(f1[3]["a"] == a)
assert _eq(f1[3]["tuple"][0], 2.0)
assert jnp.all(f1[3]["tuple"][1] == b)
assert jnp.all(f1[4] == c)
assert _eq(f1[5], _mlp)
def test_num_traces():
num_traces = 0
@ft.partial(eqx.filter_jit, filter_spec=lambda _: True)
def f(x):
nonlocal num_traces
num_traces += 1
f(jnp.zeros(2))
f(jnp.zeros(2))
assert num_traces == 1
f(jnp.zeros(3))
f(jnp.zeros(3))
assert num_traces == 2
f([jnp.zeros(2)])
f([jnp.zeros(2), jnp.zeros(3)])
f([jnp.zeros(2), True])
assert num_traces == 5
num_traces = 0
@ft.partial(eqx.filter_jit, filter_spec=([eqx.is_array_like, False], {}))
def g(x, y):
nonlocal num_traces
num_traces += 1
g(jnp.zeros(2), True)
g(jnp.zeros(2), False)
assert num_traces == 2
num_traces = 0
@ft.partial(
eqx.filter_jit, filter_spec=([False, {"a": True, "b": False}, False, False], {})
)
def h(x, y, z, w):
nonlocal num_traces
num_traces += 1
h(True, {"a": 1, "b": 1}, True, True)
h(False, {"a": 1, "b": 1}, True, True)
h(True, {"a": 1, "b": 0}, True, True)
h(True, {"a": 1, "b": 1}, True, 2)
h(True, {"a": 1, "b": 1}, 5, True)
assert num_traces == 5
h(True, {"a": 2, "b": 1}, True, True)
assert num_traces == 5
| equinox-main | tests/test_filter_jit.py |
import jax
import jax.numpy as jnp
import numpy as np
import pytest
import equinox as eqx
def test_is_array(getkey):
objs = [
1,
2.0,
[2.0],
True,
object(),
jnp.array([1]),
jnp.array(1.0),
np.array(1.0),
np.array(1),
eqx.nn.Linear(1, 1, key=getkey()),
]
results = [False, False, False, False, False, True, True, False, False, False]
for o, r in zip(objs, results):
assert eqx.is_array(o) == r
def test_is_array_like(getkey):
objs = [
1,
2.0,
[2.0],
True,
object(),
jnp.array([1]),
jnp.array(1.0),
np.array(1.0),
np.array(1),
eqx.nn.Linear(1, 1, key=getkey()),
]
results = [True, True, False, True, False, True, True, True, True, False]
for o, r in zip(objs, results):
assert eqx.is_array_like(o) == r
def test_is_inexact_array(getkey):
objs = [
1,
2.0,
[2.0],
True,
object(),
jnp.array([1]),
jnp.array(1.0),
np.array(1.0),
np.array(1),
eqx.nn.Linear(1, 1, key=getkey()),
]
results = [False, False, False, False, False, False, True, False, False, False]
for o, r in zip(objs, results):
assert eqx.is_inexact_array(o) == r
def test_is_inexact_array_like(getkey):
objs = [
1,
2.0,
[2.0],
True,
object(),
jnp.array([1]),
jnp.array(1.0),
np.array(1.0),
np.array(1),
eqx.nn.Linear(1, 1, key=getkey()),
]
results = [False, True, False, False, False, False, True, True, False, False]
for o, r in zip(objs, results):
assert eqx.is_inexact_array_like(o) == r
def test_filter(getkey):
filter_fn = lambda x: isinstance(x, int)
for pytree in (
[
1,
2,
[
3,
"hi",
{"a": jnp.array(1), "b": 4, "c": eqx.nn.MLP(2, 2, 2, 2, key=getkey())},
],
],
[1, 1, 1, 1, "hi"],
):
filtered = eqx.filter(pytree, filter_spec=filter_fn)
for arg in jax.tree_leaves(filtered):
assert isinstance(arg, int)
num_int_leaves = sum(
1 for leaf in jax.tree_leaves(filtered) if isinstance(leaf, int)
)
assert len(jax.tree_leaves(filtered)) == num_int_leaves
filter_spec = [False, True, [filter_fn, True]]
sentinel = object()
pytree = [
eqx.nn.Linear(1, 1, key=getkey()),
eqx.nn.Linear(1, 1, key=getkey()),
[eqx.nn.Linear(1, 1, key=getkey()), sentinel],
]
filtered = eqx.filter(pytree, filter_spec=filter_spec)
none_linear = jax.tree_map(lambda _: None, eqx.nn.Linear(1, 1, key=getkey()))
assert filtered[0] is None
assert filtered[1] == pytree[1]
assert filtered[2][0] == none_linear
assert filtered[2][1] is sentinel
with pytest.raises(ValueError):
eqx.filter(pytree, filter_spec=filter_spec[1:])
def test_partition_and_combine(getkey):
filter_fn = lambda x: isinstance(x, int)
for pytree in (
[
1,
2,
[
3,
"hi",
{"a": jnp.array(1), "b": 4, "c": eqx.nn.MLP(2, 2, 2, 2, key=getkey())},
],
],
[1, 1, 1, 1, "hi"],
):
filtered, unfiltered = eqx.partition(pytree, filter_spec=filter_fn)
for arg in jax.tree_leaves(filtered):
assert isinstance(arg, int)
for arg in jax.tree_leaves(unfiltered):
assert not isinstance(arg, int)
assert eqx.combine(filtered, unfiltered) == pytree
assert eqx.combine(unfiltered, filtered) == pytree
def test_splitfn_and_merge(getkey):
filter_fn = lambda x: isinstance(x, int)
for pytree in (
[
1,
2,
[
3,
"hi",
{"a": jnp.array(1), "b": 4, "c": eqx.nn.MLP(2, 2, 2, 2, key=getkey())},
],
],
[1, 1, 1, 1, "hi"],
):
int_args, notint_args, which, treedef = eqx.split(pytree, filter_fn=filter_fn)
for arg in int_args:
assert isinstance(arg, int)
for arg in notint_args:
assert not isinstance(arg, int)
assert sum(which) == 4
re_pytree = eqx.merge(int_args, notint_args, which, treedef)
assert re_pytree == pytree
def test_splittree_and_merge(getkey):
linear = eqx.nn.Linear(1, 1, key=getkey())
linear_tree = jax.tree_map(lambda _: True, linear)
filter_tree = [
True,
False,
[False, False, {"a": True, "b": False, "c": linear_tree}],
]
for i, pytree in enumerate(
(
[1, 2, [3, True, {"a": jnp.array(1), "b": 4, "c": linear}]],
[1, 1, [1, 1, {"a": 1, "b": 1, "c": linear}]],
)
):
keep_args, notkeep_args, which, treedef = eqx.split(
pytree, filter_tree=filter_tree
)
if i == 0:
assert set(notkeep_args) == {2, 3, True, 4}
else:
assert notkeep_args == [1, 1, 1, 1]
assert sum(which) == 4
re_pytree = eqx.merge(keep_args, notkeep_args, which, treedef)
assert re_pytree == pytree
filter_tree = [True, [False, False]]
pytree = [True, None]
with pytest.raises(ValueError):
eqx.split(pytree, filter_tree=filter_tree)
| equinox-main | tests/test_filters.py |
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
import pytest
import equinox as eqx
def test_filter_grad1(getkey):
a = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.filter_grad, filter_spec=lambda _: True)
def f(x):
return jnp.sum(x)
grad_f = f(a)
assert jnp.all(grad_f == 1)
def test_filter_grad2(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.filter_grad, filter_spec=eqx.is_inexact_array)
def f(x):
sum = 0.0
for arg in jax.tree_leaves(x):
if eqx.is_array_like(arg):
sum = sum + jnp.sum(arg)
return sum
ga, gb = f([a, b])
assert jnp.all(ga == 1)
assert jnp.all(gb == 1)
gtrue, ghi, gobject, ga = f([True, "hi", object(), a])
assert gtrue is None
assert ghi is None
assert gobject is None
assert jnp.all(ga == 1)
gtrue, gdict, (g5, g1), gnp = f(
[
True,
{"hi": eqx.nn.Linear(1, 1, key=getkey())},
(5, 1.0),
np.array([2.0, 3.0]),
]
)
assert gtrue is None
assert list(gdict.keys()) == ["hi"]
assert isinstance(gdict["hi"], eqx.nn.Linear)
assert jnp.all(gdict["hi"].weight == 1)
assert jnp.all(gdict["hi"].bias == 1)
assert g5 is None
assert g1 is None
assert gnp is None
def test_filter_grad3(getkey):
a = jrandom.normal(getkey(), (2, 3))
b = jrandom.normal(getkey(), (1, 2))
c = jrandom.normal(getkey(), ())
@ft.partial(eqx.filter_grad, filter_spec=[True, False])
def f(x):
return jnp.sum(x[0]) + jnp.sum(x[1])
ga, gb = f([a, b])
assert jnp.all(ga == 1)
assert gb is None
@ft.partial(eqx.filter_grad, filter_spec={"a": True, "b": False})
def h(x, y):
return jnp.sum(x["a"]) * jnp.sum(x["b"]) * y
grad = h({"a": a, "b": b}, c)
assert jnp.allclose(grad["a"], jnp.sum(b) * c)
assert grad["b"] is None
with pytest.raises(ValueError):
grad = h(c, {"a": a, "b": b})
# TODO: more comprehensive tests on this.
def test_filter_value_and_grad_(getkey):
a = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.filter_value_and_grad, filter_spec=eqx.is_inexact_array)
def f(x):
return jnp.sum(x)
val, grad = f(a)
assert val == jnp.sum(a)
assert jnp.all(grad == 1)
def test_aux(getkey):
a = jrandom.normal(getkey(), (2, 3))
@ft.partial(eqx.filter_grad, has_aux=True, filter_spec=eqx.is_inexact_array)
def f(x):
return jnp.sum(x), "hi"
aux, grad = f(a)
assert aux == "hi"
assert jnp.all(grad == 1)
@ft.partial(
eqx.filter_value_and_grad, has_aux=True, filter_spec=eqx.is_inexact_array
)
def f(x):
return jnp.sum(x), "hi"
(value, aux), grad = f(a)
assert value == jnp.sum(a)
assert aux == "hi"
assert jnp.all(grad == 1)
| equinox-main | tests/test_filter_grad.py |
"""
Usage: python test.py <frameworks>
1. Installs part of dependencies (make sure `which pip` points to correct location)
2. Installs current version of einops in editable mode
3. Runs the tests
"""
import os
import shutil
import sys
from subprocess import Popen, PIPE
from pathlib import Path
__author__ = "Alex Rogozhnikov"
def run(cmd, **env):
# keeps printing output when testing
cmd = cmd.split(" ") if isinstance(cmd, str) else cmd
p = Popen(cmd, cwd=str(Path(__file__).parent), env={**os.environ, **env})
p.communicate()
return p.returncode
# check we have nvidia-smi
have_cuda = False
if shutil.which("nvidia-smi") is not None:
output, _ = Popen("nvidia-smi".split(" "), stdout=PIPE).communicate()
if b"failed because" not in output:
have_cuda = True
def main():
_executable, *frameworks = sys.argv
framework_name2installation = {
"numpy": ["numpy"],
"torch": ["torch"],
"jax": ["jax", "jaxlib", "flax"],
"tensorflow": ["tensorflow"],
"chainer": ["chainer"],
"cupy": ["cupy"],
"paddle": ["paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html"],
"oneflow": ["oneflow==0.9.0+cpu -f https://release.oneflow.info"],
}
usage = f"""
Usage: python test.py <frameworks>
Example: python test.py numpy pytorch
Available frameworks: {list(framework_name2installation)}
"""
if len(frameworks) == 0:
print(usage)
return
else:
synonyms = {
"tf": "tensorflow",
"pytorch": "torch",
"paddlepaddle": "paddle",
}
frameworks = [synonyms.get(f, f) for f in frameworks]
wrong_frameworks = [f for f in frameworks if f not in framework_name2installation]
if wrong_frameworks:
print(usage)
raise RuntimeError(f"Unrecognized frameworks: {wrong_frameworks}")
other_dependencies = [
"nbformat",
"nbconvert",
"jupyter",
"parameterized",
"pillow",
"pytest",
]
for framework in frameworks:
print(f"Installing {framework}")
pip_instructions = framework_name2installation[framework]
assert 0 == run("pip install {} --progress-bar off".format(" ".join(pip_instructions)))
print("Install testing infra")
assert 0 == run("pip install {} --progress-bar off".format(" ".join(other_dependencies)))
# install einops
assert 0 == run("pip install -e .")
# we need to inform testing script which frameworks to use
# this is done by setting a flag EINOPS_TEST_BACKENDS
from tests import unparse_backends
flag_name, flag_value = unparse_backends(backend_names=frameworks)
return_code = run(
"python -m pytest tests",
**{flag_name: flag_value},
)
assert return_code == 0
if __name__ == "__main__":
main()
| einops-master | test.py |
import pytest
from einops import EinopsError
from einops.parsing import ParsedExpression, AnonymousAxis, _ellipsis
__author__ = 'Alex Rogozhnikov'
class AnonymousAxisPlaceholder:
def __init__(self, value: int):
self.value = value
assert isinstance(self.value, int)
def __eq__(self, other):
return isinstance(other, AnonymousAxis) and self.value == other.value
def test_anonymous_axes():
a, b = AnonymousAxis('2'), AnonymousAxis('2')
assert a != b
c, d = AnonymousAxisPlaceholder(2), AnonymousAxisPlaceholder(3)
assert a == c and b == c
assert a != d and b != d
assert [a, 2, b] == [c, 2, c]
def test_elementary_axis_name():
for name in ['a', 'b', 'h', 'dx', 'h1', 'zz', 'i9123', 'somelongname',
'Alex', 'camelCase', 'u_n_d_e_r_score', 'unreasonablyLongAxisName']:
assert ParsedExpression.check_axis_name(name)
for name in ['', '2b', '12', '_startWithUnderscore', 'endWithUnderscore_', '_', '...', _ellipsis]:
assert not ParsedExpression.check_axis_name(name)
def test_invalid_expressions():
# double ellipsis should raise an error
ParsedExpression('... a b c d')
with pytest.raises(EinopsError):
ParsedExpression('... a b c d ...')
with pytest.raises(EinopsError):
ParsedExpression('... a b c (d ...)')
with pytest.raises(EinopsError):
ParsedExpression('(... a) b c (d ...)')
# double/missing/enclosed parenthesis
ParsedExpression('(a) b c (d ...)')
with pytest.raises(EinopsError):
ParsedExpression('(a)) b c (d ...)')
with pytest.raises(EinopsError):
ParsedExpression('(a b c (d ...)')
with pytest.raises(EinopsError):
ParsedExpression('(a) (()) b c (d ...)')
with pytest.raises(EinopsError):
ParsedExpression('(a) ((b c) (d ...))')
# invalid identifiers
ParsedExpression('camelCase under_scored cApiTaLs ß ...')
with pytest.raises(EinopsError):
ParsedExpression('1a')
with pytest.raises(EinopsError):
ParsedExpression('_pre')
with pytest.raises(EinopsError):
ParsedExpression('...pre')
with pytest.raises(EinopsError):
ParsedExpression('pre...')
def test_parse_expression():
parsed = ParsedExpression('a1 b1 c1 d1')
assert parsed.identifiers == {'a1', 'b1', 'c1', 'd1'}
assert parsed.composition == [['a1'], ['b1'], ['c1'], ['d1']]
assert not parsed.has_non_unitary_anonymous_axes
assert not parsed.has_ellipsis
parsed = ParsedExpression('() () () ()')
assert parsed.identifiers == set()
assert parsed.composition == [[], [], [], []]
assert not parsed.has_non_unitary_anonymous_axes
assert not parsed.has_ellipsis
parsed = ParsedExpression('1 1 1 ()')
assert parsed.identifiers == set()
assert parsed.composition == [[], [], [], []]
assert not parsed.has_non_unitary_anonymous_axes
assert not parsed.has_ellipsis
aap = AnonymousAxisPlaceholder
parsed = ParsedExpression('5 (3 4)')
assert len(parsed.identifiers) == 3 and {i.value for i in parsed.identifiers} == {3, 4, 5}
assert parsed.composition == [[aap(5)], [aap(3), aap(4)]]
assert parsed.has_non_unitary_anonymous_axes
assert not parsed.has_ellipsis
parsed = ParsedExpression('5 1 (1 4) 1')
assert len(parsed.identifiers) == 2 and {i.value for i in parsed.identifiers} == {4, 5}
assert parsed.composition == [[aap(5)], [], [aap(4)], []]
parsed = ParsedExpression('name1 ... a1 12 (name2 14)')
assert len(parsed.identifiers) == 6
assert parsed.identifiers.difference({'name1', _ellipsis, 'a1', 'name2'}).__len__() == 2
assert parsed.composition == [['name1'], _ellipsis, ['a1'], [aap(12)], ['name2', aap(14)]]
assert parsed.has_non_unitary_anonymous_axes
assert parsed.has_ellipsis
assert not parsed.has_ellipsis_parenthesized
parsed = ParsedExpression('(name1 ... a1 12) name2 14')
assert len(parsed.identifiers) == 6
assert parsed.identifiers.difference({'name1', _ellipsis, 'a1', 'name2'}).__len__() == 2
assert parsed.composition == [['name1', _ellipsis, 'a1', aap(12)], ['name2'], [aap(14)]]
assert parsed.has_non_unitary_anonymous_axes
assert parsed.has_ellipsis
assert parsed.has_ellipsis_parenthesized
| einops-master | tests/test_parsing.py |
import pickle
import tempfile
from collections import namedtuple
import numpy
import pytest
from einops import rearrange, reduce
from einops.einops import _reductions
from . import collect_test_backends, is_backend_tested
__author__ = "Alex Rogozhnikov"
testcase = namedtuple("testcase", ["pattern", "axes_lengths", "input_shape", "wrong_shapes"])
rearrangement_patterns = [
testcase(
"b c h w -> b (c h w)",
dict(c=20),
(10, 20, 30, 40),
[(), (10,), (10, 10, 10), (10, 21, 30, 40), [1, 20, 1, 1, 1]],
),
testcase(
"b c (h1 h2) (w1 w2) -> b (c h2 w2) h1 w1",
dict(h2=2, w2=2),
(10, 20, 30, 40),
[(), (1, 1, 1, 1), (1, 10, 3), ()],
),
testcase(
"b ... c -> c b ...",
dict(b=10),
(10, 20, 30),
[(), (10,), (5, 10)],
),
]
def test_rearrange_imperative():
for backend in collect_test_backends(symbolic=False, layers=True):
print("Test layer for ", backend.framework_name)
for pattern, axes_lengths, input_shape, wrong_shapes in rearrangement_patterns:
x = numpy.arange(numpy.prod(input_shape), dtype="float32").reshape(input_shape)
result_numpy = rearrange(x, pattern, **axes_lengths)
layer = backend.layers().Rearrange(pattern, **axes_lengths)
for shape in wrong_shapes:
try:
layer(backend.from_numpy(numpy.zeros(shape, dtype="float32")))
except:
pass
else:
raise AssertionError("Failure expected")
# simple pickling / unpickling
layer2 = pickle.loads(pickle.dumps(layer))
result1 = backend.to_numpy(layer(backend.from_numpy(x)))
result2 = backend.to_numpy(layer2(backend.from_numpy(x)))
assert numpy.allclose(result_numpy, result1)
assert numpy.allclose(result1, result2)
just_sum = backend.layers().Reduce("...->", reduction="sum")
variable = backend.from_numpy(x)
result = just_sum(layer(variable))
result.backward()
assert numpy.allclose(backend.to_numpy(variable.grad), 1)
def test_rearrange_symbolic():
for backend in collect_test_backends(symbolic=True, layers=True):
print("Test layer for ", backend.framework_name)
for pattern, axes_lengths, input_shape, wrong_shapes in rearrangement_patterns:
x = numpy.arange(numpy.prod(input_shape), dtype="float32").reshape(input_shape)
result_numpy = rearrange(x, pattern, **axes_lengths)
layer = backend.layers().Rearrange(pattern, **axes_lengths)
input_shape_of_nones = [None] * len(input_shape)
shapes = [input_shape, input_shape_of_nones]
for shape in shapes:
symbol = backend.create_symbol(shape)
eval_inputs = [(symbol, x)]
result_symbol1 = layer(symbol)
result1 = backend.eval_symbol(result_symbol1, eval_inputs)
assert numpy.allclose(result_numpy, result1)
layer2 = pickle.loads(pickle.dumps(layer))
result_symbol2 = layer2(symbol)
result2 = backend.eval_symbol(result_symbol2, eval_inputs)
assert numpy.allclose(result1, result2)
# now testing back-propagation
just_sum = backend.layers().Reduce("...->", reduction="sum")
result_sum1 = backend.eval_symbol(just_sum(result_symbol1), eval_inputs)
result_sum2 = numpy.sum(x)
assert numpy.allclose(result_sum1, result_sum2)
reduction_patterns = rearrangement_patterns + [
testcase("b c h w -> b ()", dict(b=10), (10, 20, 30, 40), [(10,), (10, 20, 30)]),
testcase("b c (h1 h2) (w1 w2) -> b c h1 w1", dict(h1=15, h2=2, w2=2), (10, 20, 30, 40), [(10, 20, 31, 40)]),
testcase("b ... c -> b", dict(b=10), (10, 20, 30, 40), [(10,), (11, 10)]),
]
def test_reduce_imperative():
for backend in collect_test_backends(symbolic=False, layers=True):
print("Test layer for ", backend.framework_name)
for reduction in _reductions:
for pattern, axes_lengths, input_shape, wrong_shapes in reduction_patterns:
print(backend, reduction, pattern, axes_lengths, input_shape, wrong_shapes)
x = numpy.arange(1, 1 + numpy.prod(input_shape), dtype="float32").reshape(input_shape)
x /= x.mean()
result_numpy = reduce(x, pattern, reduction, **axes_lengths)
layer = backend.layers().Reduce(pattern, reduction, **axes_lengths)
for shape in wrong_shapes:
try:
layer(backend.from_numpy(numpy.zeros(shape, dtype="float32")))
except:
pass
else:
raise AssertionError("Failure expected")
# simple pickling / unpickling
layer2 = pickle.loads(pickle.dumps(layer))
result1 = backend.to_numpy(layer(backend.from_numpy(x)))
result2 = backend.to_numpy(layer2(backend.from_numpy(x)))
assert numpy.allclose(result_numpy, result1)
assert numpy.allclose(result1, result2)
just_sum = backend.layers().Reduce("...->", reduction="sum")
variable = backend.from_numpy(x)
result = just_sum(layer(variable))
result.backward()
grad = backend.to_numpy(variable.grad)
if reduction == "sum":
assert numpy.allclose(grad, 1)
if reduction == "mean":
assert numpy.allclose(grad, grad.min())
if reduction in ["max", "min"]:
assert numpy.all(numpy.in1d(grad, [0, 1]))
assert numpy.sum(grad) > 0.5
def test_reduce_symbolic():
for backend in collect_test_backends(symbolic=True, layers=True):
print("Test layer for ", backend.framework_name)
for reduction in _reductions:
for pattern, axes_lengths, input_shape, wrong_shapes in reduction_patterns:
x = numpy.arange(1, 1 + numpy.prod(input_shape), dtype="float32").reshape(input_shape)
x /= x.mean()
result_numpy = reduce(x, pattern, reduction, **axes_lengths)
layer = backend.layers().Reduce(pattern, reduction, **axes_lengths)
input_shape_of_nones = [None] * len(input_shape)
shapes = [input_shape, input_shape_of_nones]
for shape in shapes:
symbol = backend.create_symbol(shape)
eval_inputs = [(symbol, x)]
result_symbol1 = layer(symbol)
result1 = backend.eval_symbol(result_symbol1, eval_inputs)
assert numpy.allclose(result_numpy, result1)
layer2 = pickle.loads(pickle.dumps(layer))
result_symbol2 = layer2(symbol)
result2 = backend.eval_symbol(result_symbol2, eval_inputs)
assert numpy.allclose(result1, result2)
def create_torch_model(use_reduce=False, add_scripted_layer=False):
if not is_backend_tested("torch"):
pytest.skip()
else:
from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, ReLU
from einops.layers.torch import Rearrange, Reduce, EinMix
import torch.jit
return Sequential(
Conv2d(3, 6, kernel_size=(5, 5)),
Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2) if use_reduce else MaxPool2d(kernel_size=2),
Conv2d(6, 16, kernel_size=(5, 5)),
Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
torch.jit.script(Rearrange("b c h w -> b (c h w)"))
if add_scripted_layer
else Rearrange("b c h w -> b (c h w)"),
Linear(16 * 5 * 5, 120),
ReLU(),
Linear(120, 84),
ReLU(),
EinMix("b c1 -> (b c2)", weight_shape="c1 c2", bias_shape="c2", c1=84, c2=84),
EinMix("(b c2) -> b c3", weight_shape="c2 c3", bias_shape="c3", c2=84, c3=84),
Linear(84, 10),
)
def test_torch_layer():
if not is_backend_tested("torch"):
pytest.skip()
else:
# checked that torch present
import torch
import torch.jit
model1 = create_torch_model(use_reduce=True)
model2 = create_torch_model(use_reduce=False)
input = torch.randn([10, 3, 32, 32])
# random models have different predictions
assert not torch.allclose(model1(input), model2(input))
model2.load_state_dict(pickle.loads(pickle.dumps(model1.state_dict())))
assert torch.allclose(model1(input), model2(input))
# tracing (freezing)
model3 = torch.jit.trace(model2, example_inputs=input)
torch.testing.assert_close(model1(input), model3(input), atol=1e-3, rtol=1e-3)
torch.testing.assert_close(model1(input + 1), model3(input + 1), atol=1e-3, rtol=1e-3)
model4 = torch.jit.trace(model2, example_inputs=input)
torch.testing.assert_close(model1(input), model4(input), atol=1e-3, rtol=1e-3)
torch.testing.assert_close(model1(input + 1), model4(input + 1), atol=1e-3, rtol=1e-3)
def test_torch_layers_scripting():
if not is_backend_tested("torch"):
pytest.skip()
else:
import torch
for script_layer in [False, True]:
model1 = create_torch_model(use_reduce=True, add_scripted_layer=script_layer)
model2 = torch.jit.script(model1)
input = torch.randn([10, 3, 32, 32])
torch.testing.assert_close(model1(input), model2(input), atol=1e-3, rtol=1e-3)
def test_keras_layer():
if not is_backend_tested("tensorflow"):
pytest.skip()
else:
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D as Conv2d, Dense as Linear, ReLU
from einops.layers.keras import Rearrange, Reduce, EinMix, keras_custom_objects
def create_keras_model():
return Sequential(
[
Conv2d(6, kernel_size=5, input_shape=[32, 32, 3]),
Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
Conv2d(16, kernel_size=5),
Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
Rearrange("b c h w -> b (c h w)"),
Linear(120),
ReLU(),
Linear(84),
ReLU(),
EinMix("b c1 -> (b c2)", weight_shape="c1 c2", bias_shape="c2", c1=84, c2=84),
EinMix("(b c2) -> b c3", weight_shape="c2 c3", bias_shape="c3", c2=84, c3=84),
Linear(10),
]
)
model1 = create_keras_model()
model2 = create_keras_model()
input = numpy.random.normal(size=[10, 32, 32, 3]).astype("float32")
assert not numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
# get some temp filename
with tempfile.NamedTemporaryFile(mode="r+b") as f:
tmp_filename = f.name
# save arch + weights
print("temp_path_keras1", tmp_filename)
tf.keras.models.save_model(model1, tmp_filename)
model3 = tf.keras.models.load_model(tmp_filename, custom_objects=keras_custom_objects)
assert numpy.allclose(model1.predict_on_batch(input), model3.predict_on_batch(input))
# save arch as json
model4 = tf.keras.models.model_from_json(model1.to_json(), custom_objects=keras_custom_objects)
model1.save_weights(tmp_filename)
model4.load_weights(tmp_filename)
model2.load_weights(tmp_filename)
assert numpy.allclose(model1.predict_on_batch(input), model4.predict_on_batch(input))
assert numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
def test_chainer_layer():
chainer_is_present = any(
"chainer" in backend.framework_name for backend in collect_test_backends(symbolic=False, layers=True)
)
if chainer_is_present:
# checked that chainer is present
import chainer
import chainer.links as L
import chainer.functions as F
from einops.layers.chainer import Rearrange, Reduce, EinMix
from einops import asnumpy
import numpy as np
def create_model():
return chainer.Sequential(
L.Convolution2D(3, 6, ksize=(5, 5)),
Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
L.Convolution2D(6, 16, ksize=(5, 5)),
Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
Rearrange("b c h w -> b (c h w)"),
L.Linear(16 * 5 * 5, 120),
L.Linear(120, 84),
F.relu,
EinMix("b c1 -> (b c2)", weight_shape="c1 c2", bias_shape="c2", c1=84, c2=84),
EinMix("(b c2) -> b c3", weight_shape="c2 c3", bias_shape="c3", c2=84, c3=84),
L.Linear(84, 10),
)
model1 = create_model()
model2 = create_model()
x = np.random.normal(size=[10, 3, 32, 32]).astype("float32")
x = chainer.Variable(x)
assert not numpy.allclose(asnumpy(model1(x)), asnumpy(model2(x)))
with tempfile.TemporaryDirectory() as dir:
filename = f"{dir}/file.npz"
chainer.serializers.save_npz(filename, model1)
chainer.serializers.load_npz(filename, model2)
assert numpy.allclose(asnumpy(model1(x)), asnumpy(model2(x)))
def test_flax_layers():
"""
One-off simple tests for Flax layers.
Unfortunately, Flax layers have a different interface from other layers.
"""
if not is_backend_tested("jax"):
pytest.skip()
else:
import jax
import jax.numpy as jnp
import flax
from flax import linen as nn
from einops.layers.flax import EinMix, Reduce, Rearrange
class NN(nn.Module):
@nn.compact
def __call__(self, x):
x = EinMix(
"b (h h2) (w w2) c -> b h w c_out", "h2 w2 c c_out", "c_out", sizes=dict(h2=2, w2=3, c=4, c_out=5)
)(x)
x = Rearrange("b h w c -> b (w h c)", sizes=dict(c=5))(x)
x = Reduce("b hwc -> b", "mean", dict(hwc=2 * 3 * 5))(x)
return x
model = NN()
fixed_input = jnp.ones([10, 2 * 2, 3 * 3, 4])
params = model.init(jax.random.PRNGKey(0), fixed_input)
eval_at_point = lambda params: jnp.linalg.norm(model.apply(params, fixed_input))
vandg = jax.value_and_grad(eval_at_point)
value0 = eval_at_point(params)
value1, grad1 = vandg(params)
assert jnp.allclose(value0, value1)
params2 = jax.tree_map(lambda x1, x2: x1 - x2 * 0.001, params, grad1)
value2 = eval_at_point(params2)
assert value0 >= value2, (value0, value2)
# check serialization
fbytes = flax.serialization.to_bytes(params)
_loaded = flax.serialization.from_bytes(params, fbytes)
| einops-master | tests/test_layers.py |
import logging
import os
from functools import lru_cache
from typing import List, Tuple
from einops import _backends
import warnings
__author__ = "Alex Rogozhnikov"
# minimize noise in tests logging
logging.getLogger("tensorflow").disabled = True
logging.getLogger("matplotlib").disabled = True
def find_names_of_all_frameworks() -> List[str]:
backend_subclasses = []
backends = _backends.AbstractBackend.__subclasses__()
while backends:
backend = backends.pop()
backends += backend.__subclasses__()
backend_subclasses.append(backend)
return [b.framework_name for b in backend_subclasses]
FLAG_NAME = "EINOPS_TEST_BACKENDS"
@lru_cache(maxsize=1)
def parse_backends_to_test() -> List[str]:
if FLAG_NAME not in os.environ:
raise RuntimeError(f"Testing frameworks were not specified, flag {FLAG_NAME} not set")
parsed_backends = os.environ[FLAG_NAME].split(",")
_known_backends = find_names_of_all_frameworks()
for backend_name in parsed_backends:
if backend_name not in _known_backends:
raise RuntimeError(f"Unknown framework: {backend_name}")
return parsed_backends
def is_backend_tested(backend: str) -> bool:
if backend not in find_names_of_all_frameworks():
raise RuntimeError(f'Unknown framework {backend}')
return backend in parse_backends_to_test()
def unparse_backends(backend_names: List[str]) -> Tuple[str, str]:
_known_backends = find_names_of_all_frameworks()
for backend_name in backend_names:
if backend_name not in _known_backends:
raise RuntimeError(f"Unknown framework: {backend_name}")
return FLAG_NAME, ",".join(backend_names)
def collect_test_backends(symbolic=False, layers=False) -> List[_backends.AbstractBackend]:
"""
:param symbolic: symbolic or imperative frameworks?
:param layers: layers or operations?
:return: list of backends satisfying set conditions
"""
if not symbolic:
if not layers:
backend_types = [
_backends.NumpyBackend,
_backends.JaxBackend,
_backends.TorchBackend,
_backends.ChainerBackend,
_backends.TensorflowBackend,
_backends.OneFlowBackend,
_backends.PaddleBackend,
_backends.CupyBackend,
]
else:
backend_types = [
_backends.TorchBackend,
_backends.ChainerBackend,
_backends.OneFlowBackend,
_backends.PaddleBackend,
]
else:
if not layers:
backend_types = []
else:
backend_types = [
_backends.KerasBackend,
]
backend_names_to_test = parse_backends_to_test()
result = []
for backend_type in backend_types:
if backend_type.framework_name not in backend_names_to_test:
continue
try:
result.append(backend_type())
except ImportError:
# problem with backend installation fails a specific test function,
# but will be skipped in all other test cases
warnings.warn("backend could not be initialized for tests: {}".format(backend_type))
return result
| einops-master | tests/__init__.py |
from typing import Dict
from io import StringIO
from tests import parse_backends_to_test, is_backend_tested
__author__ = "Alex Rogozhnikov"
from pathlib import Path
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
def render_notebook(filename: Path, replacements: Dict[str, str]) -> str:
"""Takes path to the notebook, returns executed and rendered version
:param filename: notebook
:param replacements: dictionary with text replacements done before executing
:return: notebook, rendered as string
"""
with filename.open("r") as f:
nb_as_str = f.read()
for original, replacement in replacements.items():
nb_as_str = nb_as_str.replace(original, replacement)
nb = nbformat.read(StringIO(nb_as_str), nbformat.NO_CONVERT)
ep = ExecutePreprocessor(timeout=60, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": str(filename.parent.absolute())}})
result_as_stream = StringIO()
nbformat.write(nb, result_as_stream)
return result_as_stream.getvalue()
def test_notebook_1():
[notebook] = Path(__file__).parent.with_name("docs").glob("1-*.ipynb")
render_notebook(notebook, replacements={})
def test_notebook_2_with_all_backends():
[notebook] = Path(__file__).parent.with_name("docs").glob("2-*.ipynb")
backends = []
if is_backend_tested("torch"):
# notebook uses name pytorch
backends.append("pytorch")
if is_backend_tested("tensorflow"):
backends.append("tensorflow")
if is_backend_tested("chainer"):
backends.append("chainer")
if len(backends) == 0:
pytest.skip()
for backend in backends:
print("Testing {} with backend {}".format(notebook, backend))
replacements = {"flavour = 'pytorch'": "flavour = '{}'".format(backend)}
expected_string = "selected {} backend".format(backend)
result = render_notebook(notebook, replacements=replacements)
assert expected_string in result
def test_notebook_3():
[notebook] = Path(__file__).parent.with_name("docs").glob("3-*.ipynb")
if not is_backend_tested("torch"):
pytest.skip()
render_notebook(notebook, replacements={})
def test_notebook_4():
[notebook] = Path(__file__).parent.with_name("docs").glob("4-*.ipynb")
if not is_backend_tested("torch"):
pytest.skip()
render_notebook(notebook, replacements={})
| einops-master | tests/test_notebooks.py |
import dataclasses
import typing
import numpy as np
import pytest
from einops import EinopsError, asnumpy, pack, unpack
from tests import collect_test_backends
def pack_unpack(xs, pattern):
x, ps = pack(xs, pattern)
unpacked = unpack(xs, ps, pattern)
assert len(unpacked) == len(xs)
for a, b in zip(unpacked, xs):
assert np.allclose(asnumpy(a), asnumpy(b))
def unpack_and_pack(x, ps, pattern: str):
unpacked = unpack(x, ps, pattern)
packed, ps2 = pack(unpacked, pattern=pattern)
assert np.allclose(asnumpy(packed), asnumpy(x))
return unpacked
def unpack_and_pack_against_numpy(x, ps, pattern: str):
capturer_backend = CaptureException()
capturer_numpy = CaptureException()
with capturer_backend:
unpacked = unpack(x, ps, pattern)
packed, ps2 = pack(unpacked, pattern=pattern)
with capturer_numpy:
x_np = asnumpy(x)
unpacked_np = unpack(x_np, ps, pattern)
packed_np, ps3 = pack(unpacked_np, pattern=pattern)
assert type(capturer_numpy.exception) == type(capturer_backend.exception)
if capturer_numpy.exception is not None:
# both failed
return
else:
# neither failed, check results are identical
assert np.allclose(asnumpy(packed), asnumpy(x))
assert np.allclose(asnumpy(packed_np), asnumpy(x))
assert len(unpacked) == len(unpacked_np)
for a, b in zip(unpacked, unpacked_np):
assert np.allclose(asnumpy(a), b)
class CaptureException:
def __enter__(self):
self.exception = None
def __exit__(self, exc_type, exc_val, exc_tb):
self.exception = exc_val
return True
def test_numpy_trivial(H=13, W=17):
def rand(*shape):
return np.random.random(shape)
def check(a, b):
assert a.dtype == b.dtype
assert a.shape == b.shape
assert np.all(a == b)
r, g, b = rand(3, H, W)
embeddings = rand(H, W, 32)
check(
np.stack([r, g, b], axis=2),
pack([r, g, b], "h w *")[0],
)
check(
np.stack([r, g, b], axis=1),
pack([r, g, b], "h * w")[0],
)
check(
np.stack([r, g, b], axis=0),
pack([r, g, b], "* h w")[0],
)
check(
np.concatenate([r, g, b], axis=1),
pack([r, g, b], "h *")[0],
)
check(
np.concatenate([r, g, b], axis=0),
pack([r, g, b], "* w")[0],
)
i = np.index_exp[:, :, None]
check(
np.concatenate([r[i], g[i], b[i], embeddings], axis=2),
pack([r, g, b, embeddings], "h w *")[0],
)
with pytest.raises(EinopsError):
pack([r, g, b, embeddings], "h w nonexisting_axis *")
pack([r, g, b], "some_name_for_H some_name_for_w1 *")
with pytest.raises(EinopsError):
pack([r, g, b, embeddings], "h _w *") # no leading underscore
with pytest.raises(EinopsError):
pack([r, g, b, embeddings], "h_ w *") # no trailing underscore
with pytest.raises(EinopsError):
pack([r, g, b, embeddings], "1h_ w *")
with pytest.raises(EinopsError):
pack([r, g, b, embeddings], "1 w *")
with pytest.raises(EinopsError):
pack([r, g, b, embeddings], "h h *")
# capital and non-capital are different
pack([r, g, b, embeddings], "h H *")
@dataclasses.dataclass
class UnpackTestCase:
shape: typing.Tuple[int, ...]
pattern: str
def dim(self):
return self.pattern.split().index("*")
def selfcheck(self):
assert self.shape[self.dim()] == 5
cases = [
# NB: in all cases unpacked axis is of length 5.
# that's actively used in tests below
UnpackTestCase((5,), "*"),
UnpackTestCase((5, 7), "* seven"),
UnpackTestCase((7, 5), "seven *"),
UnpackTestCase((5, 3, 4), "* three four"),
UnpackTestCase((4, 5, 3), "four * three"),
UnpackTestCase((3, 4, 5), "three four *"),
]
def test_pack_unpack_with_numpy():
case: UnpackTestCase
for case in cases:
shape = case.shape
pattern = case.pattern
x = np.random.random(shape)
# all correct, no minus 1
unpack_and_pack(x, [[2], [1], [2]], pattern)
# no -1, asking for wrong shapes
with pytest.raises(BaseException):
unpack_and_pack(x, [[2], [1], [2]], pattern + " non_existent_axis")
with pytest.raises(BaseException):
unpack_and_pack(x, [[2], [1], [1]], pattern)
with pytest.raises(BaseException):
unpack_and_pack(x, [[4], [1], [1]], pattern)
# all correct, with -1
unpack_and_pack(x, [[2], [1], [-1]], pattern)
unpack_and_pack(x, [[2], [-1], [2]], pattern)
unpack_and_pack(x, [[-1], [1], [2]], pattern)
_, _, last = unpack_and_pack(x, [[2], [3], [-1]], pattern)
assert last.shape[case.dim()] == 0
# asking for more elements than available
with pytest.raises(BaseException):
unpack(x, [[2], [4], [-1]], pattern)
# this one does not raise, because indexing x[2:1] just returns zero elements
# with pytest.raises(BaseException):
# unpack(x, [[2], [-1], [4]], pattern)
with pytest.raises(BaseException):
unpack(x, [[-1], [1], [5]], pattern)
# all correct, -1 nested
rs = unpack_and_pack(x, [[1, 2], [1, 1], [-1, 1]], pattern)
assert all(len(r.shape) == len(x.shape) + 1 for r in rs)
rs = unpack_and_pack(x, [[1, 2], [1, -1], [1, 1]], pattern)
assert all(len(r.shape) == len(x.shape) + 1 for r in rs)
rs = unpack_and_pack(x, [[2, -1], [1, 2], [1, 1]], pattern)
assert all(len(r.shape) == len(x.shape) + 1 for r in rs)
# asking for more elements, -1 nested
with pytest.raises(BaseException):
unpack(x, [[-1, 2], [1], [5]], pattern)
with pytest.raises(BaseException):
unpack(x, [[2, 2], [2], [5, -1]], pattern)
# asking for non-divisible number of elements
with pytest.raises(BaseException):
unpack(x, [[2, 1], [1], [3, -1]], pattern)
with pytest.raises(BaseException):
unpack(x, [[2, 1], [3, -1], [1]], pattern)
with pytest.raises(BaseException):
unpack(x, [[3, -1], [2, 1], [1]], pattern)
# -1 takes zero
unpack_and_pack(x, [[0], [5], [-1]], pattern)
unpack_and_pack(x, [[0], [-1], [5]], pattern)
unpack_and_pack(x, [[-1], [5], [0]], pattern)
# -1 takes zero, -1
unpack_and_pack(x, [[2, -1], [1, 5]], pattern)
def test_pack_unpack_against_numpy():
for backend in collect_test_backends(symbolic=False, layers=False):
print(f"test packing against numpy for {backend.framework_name}")
check_zero_len = True
for case in cases:
unpack_and_pack = unpack_and_pack_against_numpy
shape = case.shape
pattern = case.pattern
x = np.random.random(shape)
x = backend.from_numpy(x)
# all correct, no minus 1
unpack_and_pack(x, [[2], [1], [2]], pattern)
# no -1, asking for wrong shapes
with pytest.raises(BaseException):
unpack(x, [[2], [1], [1]], pattern)
with pytest.raises(BaseException):
unpack(x, [[4], [1], [1]], pattern)
# all correct, with -1
unpack_and_pack(x, [[2], [1], [-1]], pattern)
unpack_and_pack(x, [[2], [-1], [2]], pattern)
unpack_and_pack(x, [[-1], [1], [2]], pattern)
# asking for more elements than available
with pytest.raises(BaseException):
unpack(x, [[2], [4], [-1]], pattern)
# this one does not raise, because indexing x[2:1] just returns zero elements
# with pytest.raises(BaseException):
# unpack(x, [[2], [-1], [4]], pattern)
with pytest.raises(BaseException):
unpack(x, [[-1], [1], [5]], pattern)
# all correct, -1 nested
unpack_and_pack(x, [[1, 2], [1, 1], [-1, 1]], pattern)
unpack_and_pack(x, [[1, 2], [1, -1], [1, 1]], pattern)
unpack_and_pack(x, [[2, -1], [1, 2], [1, 1]], pattern)
# asking for more elements, -1 nested
with pytest.raises(BaseException):
unpack(x, [[-1, 2], [1], [5]], pattern)
with pytest.raises(BaseException):
unpack(x, [[2, 2], [2], [5, -1]], pattern)
# asking for non-divisible number of elements
with pytest.raises(BaseException):
unpack(x, [[2, 1], [1], [3, -1]], pattern)
with pytest.raises(BaseException):
unpack(x, [[2, 1], [3, -1], [1]], pattern)
with pytest.raises(BaseException):
unpack(x, [[3, -1], [2, 1], [1]], pattern)
if check_zero_len:
# -1 takes zero
unpack_and_pack(x, [[2], [3], [-1]], pattern)
unpack_and_pack(x, [[0], [5], [-1]], pattern)
unpack_and_pack(x, [[0], [-1], [5]], pattern)
unpack_and_pack(x, [[-1], [5], [0]], pattern)
# -1 takes zero, -1
unpack_and_pack(x, [[2, -1], [1, 5]], pattern)
def test_pack_unpack_array_api():
from einops import array_api as AA
import numpy.array_api as xp
for case in cases:
shape = case.shape
pattern = case.pattern
x_np = np.random.random(shape)
x_xp = xp.from_dlpack(x_np)
for ps in [
[[2], [1], [2]],
[[1], [1], [-1]],
[[1], [1], [-1, 3]],
[[2, 1], [1, 1, 1], [-1]],
]:
x_np_split = unpack(x_np, ps, pattern)
x_xp_split = AA.unpack(x_xp, ps, pattern)
for a, b in zip(x_np_split, x_xp_split):
assert np.allclose(a, AA.asnumpy(b + 0))
x_agg_np, ps1 = pack(x_np_split, pattern)
x_agg_xp, ps2 = AA.pack(x_xp_split, pattern)
assert ps1 == ps2
assert np.allclose(x_agg_np, AA.asnumpy(x_agg_xp))
for ps in [
[[2, 3]],
[[1], [5]],
[[1], [5], [-1]],
[[1], [2, 3]],
[[1], [5], [-1, 2]],
]:
with pytest.raises(BaseException):
unpack(x_np, ps, pattern)
| einops-master | tests/test_packing.py |
from typing import Any, Callable
from venv import create
from . import collect_test_backends
from einops.einops import _compactify_pattern_for_einsum, einsum, EinopsError
import numpy as np
import pytest
import string
class Arguments:
def __init__(self, *args: Any, **kargs: Any):
self.args = args
self.kwargs = kargs
def __call__(self, function: Callable):
return function(*self.args, **self.kwargs)
test_layer_cases = [
(
Arguments('b c_in h w -> w c_out h b', 'c_in c_out', bias_shape=None, c_out=13, c_in=12),
(2, 12, 3, 4),
(4, 13, 3, 2),
),
(
Arguments('b c_in h w -> w c_out h b', 'c_in c_out', bias_shape='c_out', c_out=13, c_in=12),
(2, 12, 3, 4),
(4, 13, 3, 2),
),
(
Arguments('b c_in h w -> w c_in h b', '', bias_shape=None, c_in=12),
(2, 12, 3, 4),
(4, 12, 3, 2),
),
(
Arguments('b c_in h w -> b c_out', 'c_in h w c_out', bias_shape=None, c_in=12, h=3, w=4, c_out=5),
(2, 12, 3, 4),
(2, 5),
),
(
Arguments('b t head c_in -> b t head c_out', 'head c_in c_out', bias_shape=None, head=4, c_in=5, c_out=6),
(2, 3, 4, 5),
(2, 3, 4, 6),
),
]
# Each of the form:
# (Arguments, true_einsum_pattern, in_shapes, out_shape)
test_functional_cases = [
(
# Basic:
"b c h w, b w -> b h",
"abcd,ad->ac",
((2, 3, 4, 5), (2, 5)),
(2, 4),
),
(
# Three tensors:
"b c h w, b w, b c -> b h",
"abcd,ad,ab->ac",
((2, 3, 40, 5), (2, 5), (2, 3)),
(2, 40),
),
(
# Ellipsis, and full names:
"... one two three, three four five -> ... two five",
"...abc,cde->...be",
((32, 5, 2, 3, 4), (4, 5, 6)),
(32, 5, 3, 6),
),
(
# Ellipsis at the end:
"one two three ..., three four five -> two five ...",
"abc...,cde->be...",
((2, 3, 4, 32, 5), (4, 5, 6)),
(3, 6, 32, 5),
),
(
# Ellipsis on multiple tensors:
"... one two three, ... three four five -> ... two five",
"...abc,...cde->...be",
((32, 5, 2, 3, 4), (32, 5, 4, 5, 6)),
(32, 5, 3, 6),
),
(
# One tensor, and underscores:
"first_tensor second_tensor -> first_tensor",
"ab->a",
((5, 4),),
(5,),
),
(
# Trace (repeated index)
"i i -> ",
"aa->",
((5, 5),),
(),
),
(
# Too many spaces in string:
" one two , three four->two four ",
"ab,cd->bd",
((2, 3), (4, 5)),
(3, 5),
),
# The following tests were inspired by numpy's einsum tests
# https://github.com/numpy/numpy/blob/v1.23.0/numpy/core/tests/test_einsum.py
(
# Trace with other indices
"i middle i -> middle",
"aba->b",
((5, 10, 5),),
(10,),
),
(
# Ellipsis in the middle:
"i ... i -> ...",
"a...a->...",
((5, 3, 2, 1, 4, 5),),
(3, 2, 1, 4),
),
(
# Product of first and last axes:
"i ... i -> i ...",
"a...a->a...",
((5, 3, 2, 1, 4, 5),),
(5, 3, 2, 1, 4),
),
(
# Triple diagonal
"one one one -> one",
"aaa->a",
((5, 5, 5),),
(5,),
),
(
# Axis swap:
"i j k -> j i k",
"abc->bac",
((1, 2, 3),),
(2, 1, 3),
),
(
# Identity:
"... -> ...",
"...->...",
((5, 4, 3, 2, 1),),
(5, 4, 3, 2, 1),
),
(
# Elementwise product of three tensors
"..., ..., ... -> ...",
"...,...,...->...",
((3, 2), (3, 2), (3, 2)),
(3, 2),
),
(
# Basic summation:
"index ->",
"a->",
((10,)),
(()),
),
]
def test_layer():
for backend in collect_test_backends(layers=True, symbolic=False):
if backend.framework_name in ['tensorflow', 'torch', 'chainer', 'oneflow', 'paddle']:
layer_type = backend.layers().EinMix
for args, in_shape, out_shape in test_layer_cases:
layer = args(layer_type)
print('Running', layer.einsum_pattern, 'for', backend.framework_name)
input = np.random.uniform(size=in_shape).astype('float32')
input_framework = backend.from_numpy(input)
output_framework = layer(input_framework)
output = backend.to_numpy(output_framework)
assert output.shape == out_shape
valid_backends_functional = ['tensorflow', 'torch', 'jax', 'numpy',
'chainer', 'oneflow', 'cupy', 'tensorflow.keras', 'paddle']
def test_functional():
# Functional tests:
backends = filter(lambda x: x.framework_name in valid_backends_functional,
collect_test_backends())
for backend in backends:
for einops_pattern, true_pattern, in_shapes, out_shape in test_functional_cases:
print(f"Running '{einops_pattern}' for {backend.framework_name}")
# Create pattern:
predicted_pattern = _compactify_pattern_for_einsum(einops_pattern)
assert predicted_pattern == true_pattern
# Generate example data:
rstate = np.random.RandomState(0)
in_arrays = [
rstate.uniform(size=shape).astype('float32')
for shape in in_shapes
]
in_arrays_framework = [
backend.from_numpy(array) for array in in_arrays
]
# Loop over whether we call it manually with the backend,
# or whether we use `einops.einsum`.
for do_manual_call in [True, False]:
# Actually run einsum:
if do_manual_call:
out_array = backend.einsum(predicted_pattern, *in_arrays_framework)
else:
out_array = einsum(*in_arrays_framework, einops_pattern)
# Check shape:
if tuple(out_array.shape) != out_shape:
raise ValueError(
f"Expected output shape {out_shape} but got {out_array.shape}"
)
# Check values:
true_out_array = np.einsum(true_pattern, *in_arrays)
predicted_out_array = backend.to_numpy(out_array)
np.testing.assert_array_almost_equal(predicted_out_array,
true_out_array,
decimal=5)
def test_functional_symbolic():
backends = filter(lambda x: x.framework_name in valid_backends_functional,
collect_test_backends(symbolic=True, layers=False))
for backend in backends:
for einops_pattern, true_pattern, in_shapes, out_shape in test_functional_cases:
print(f"Running '{einops_pattern}' for symbolic {backend.framework_name}")
# Create pattern:
predicted_pattern = _compactify_pattern_for_einsum(einops_pattern)
assert predicted_pattern == true_pattern
rstate = np.random.RandomState(0)
in_syms = [backend.create_symbol(in_shape) for in_shape in in_shapes]
in_data = [rstate.uniform(size=in_shape).astype('float32') for in_shape in in_shapes]
expected_out_data = np.einsum(true_pattern, *in_data)
for do_manual_call in [True, False]:
if do_manual_call:
predicted_out_symbol = backend.einsum(predicted_pattern, *in_syms)
else:
predicted_out_symbol = einsum(*in_syms, einops_pattern)
predicted_out_data = backend.eval_symbol(
predicted_out_symbol,
list(zip(in_syms, in_data)),
)
if predicted_out_data.shape != out_shape:
raise ValueError(
f"Expected output shape {out_shape} but got {predicted_out_data.shape}"
)
assert np.testing.assert_array_almost_equal(predicted_out_data,
expected_out_data,
decimal=5)
def test_functional_errors():
# Specific backend does not matter, as errors are raised
# during the pattern creation.
rstate = np.random.RandomState(0)
create_tensor = lambda *shape: rstate.uniform(size=shape).astype('float32')
# raise NotImplementedError("Singleton () axes are not yet supported in einsum.")
with pytest.raises(NotImplementedError, match="^Singleton"):
einsum(
create_tensor(5, 1),
"i () -> i",
)
# raise NotImplementedError("Shape rearrangement is not yet supported in einsum.")
with pytest.raises(NotImplementedError, match="^Shape rearrangement"):
einsum(
create_tensor(5, 1),
"a b -> (a b)",
)
with pytest.raises(NotImplementedError, match="^Shape rearrangement"):
einsum(
create_tensor(10, 1),
"(a b) -> a b",
)
# raise RuntimeError("Encountered empty axis name in einsum.")
# raise RuntimeError("Axis name in einsum must be a string.")
# ^ Not tested, these are just a failsafe in case an unexpected error occurs.
# raise NotImplementedError("Anonymous axes are not yet supported in einsum.")
with pytest.raises(NotImplementedError, match="^Anonymous axes"):
einsum(
create_tensor(5, 1),
"i 2 -> i",
)
# ParsedExpression error:
with pytest.raises(EinopsError, match="^Invalid axis identifier"):
einsum(
create_tensor(5, 1),
"i 2j -> i",
)
# raise ValueError("Einsum pattern must contain '->'.")
with pytest.raises(ValueError, match="^Einsum pattern"):
einsum(
create_tensor(5, 3, 2),
"i j k",
)
# raise RuntimeError("Too many axes in einsum.")
with pytest.raises(RuntimeError, match="^Too many axes"):
einsum(
create_tensor(1),
" ".join(string.ascii_letters) + " extra ->",
)
# raise RuntimeError("Unknown axis on right side of einsum.")
with pytest.raises(RuntimeError, match="^Unknown axis"):
einsum(
create_tensor(5, 1),
"i j -> k",
)
# raise ValueError(
# "The last argument passed to `einops.einsum` must be a string,"
# " representing the einsum pattern."
# )
with pytest.raises(ValueError, match="^The last argument"):
einsum(
"i j k -> i",
create_tensor(5, 4, 3),
)
# raise ValueError(
# "`einops.einsum` takes at minimum two arguments: the tensors,"
# " followed by the pattern."
# )
with pytest.raises(ValueError, match="^`einops.einsum` takes"):
einsum(
"i j k -> i",
)
with pytest.raises(ValueError, match="^`einops.einsum` takes"):
einsum(
create_tensor(5, 1),
)
# TODO: Include check for giving normal einsum pattern rather than einops.
| einops-master | tests/test_einsum.py |
import itertools
import numpy
import pytest
from einops import EinopsError
from einops.einops import rearrange, reduce, repeat, _enumerate_directions, _reductions
from . import collect_test_backends, is_backend_tested
imp_op_backends = collect_test_backends(symbolic=False, layers=False)
sym_op_backends = collect_test_backends(symbolic=True, layers=False)
identity_patterns = [
"...->...",
"a b c d e-> a b c d e",
"a b c d e ...-> ... a b c d e",
"a b c d e ...-> a ... b c d e",
"... a b c d e -> ... a b c d e",
"a ... e-> a ... e",
"a ... -> a ... ",
"a ... c d e -> a (...) c d e",
]
equivalent_rearrange_patterns = [
("a b c d e -> (a b) c d e", "a b ... -> (a b) ... "),
("a b c d e -> a b (c d) e", "... c d e -> ... (c d) e"),
("a b c d e -> a b c d e", "... -> ... "),
("a b c d e -> (a b c d e)", "... -> (...)"),
("a b c d e -> b (c d e) a", "a b ... -> b (...) a"),
("a b c d e -> b (a c d) e", "a b ... e -> b (a ...) e"),
]
equivalent_reduction_patterns = [
("a b c d e -> ", " ... -> "),
("a b c d e -> (e a)", "a ... e -> (e a)"),
("a b c d e -> d (a e)", " a b c d e ... -> d (a e) "),
("a b c d e -> (a b)", " ... c d e -> (...) "),
]
def test_collapsed_ellipsis_errors_out():
x = numpy.zeros([1, 1, 1, 1, 1])
rearrange(x, "a b c d ... -> a b c ... d")
with pytest.raises(EinopsError):
rearrange(x, "a b c d (...) -> a b c ... d")
rearrange(x, "... -> (...)")
with pytest.raises(EinopsError):
rearrange(x, "(...) -> (...)")
def test_ellipsis_ops_numpy():
x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
for pattern in identity_patterns:
assert numpy.array_equal(x, rearrange(x, pattern)), pattern
for pattern1, pattern2 in equivalent_rearrange_patterns:
assert numpy.array_equal(rearrange(x, pattern1), rearrange(x, pattern2))
for reduction in ["min", "max", "sum"]:
for pattern1, pattern2 in equivalent_reduction_patterns:
assert numpy.array_equal(reduce(x, pattern1, reduction=reduction), reduce(x, pattern2, reduction=reduction))
# now just check coincidence with numpy
all_rearrange_patterns = [*identity_patterns]
for pattern_pairs in equivalent_rearrange_patterns:
all_rearrange_patterns.extend(pattern_pairs)
def check_op_against_numpy(backend, numpy_input, pattern, axes_lengths, reduction="rearrange", is_symbolic=False):
"""
Helper to test result of operation (rearrange or transpose) against numpy
if reduction == 'rearrange', rearrange op is tested, otherwise reduce
"""
def operation(x):
if reduction == "rearrange":
return rearrange(x, pattern, **axes_lengths)
else:
return reduce(x, pattern, reduction, **axes_lengths)
numpy_result = operation(numpy_input)
check_equal = numpy.array_equal
p_none_dimension = 0.5
if is_symbolic:
symbol_shape = [d if numpy.random.random() >= p_none_dimension else None for d in numpy_input.shape]
symbol = backend.create_symbol(shape=symbol_shape)
result_symbol = operation(symbol)
backend_result = backend.eval_symbol(result_symbol, [(symbol, numpy_input)])
else:
backend_result = operation(backend.from_numpy(numpy_input))
backend_result = backend.to_numpy(backend_result)
check_equal(numpy_result, backend_result)
def test_ellipsis_ops_imperative():
"""Checking various patterns against numpy"""
x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
for is_symbolic in [True, False]:
for backend in collect_test_backends(symbolic=is_symbolic, layers=False):
for pattern in identity_patterns + list(itertools.chain(*equivalent_rearrange_patterns)):
check_op_against_numpy(
backend, x, pattern, axes_lengths={}, reduction="rearrange", is_symbolic=is_symbolic
)
for reduction in ["min", "max", "sum"]:
for pattern in itertools.chain(*equivalent_reduction_patterns):
check_op_against_numpy(
backend, x, pattern, axes_lengths={}, reduction=reduction, is_symbolic=is_symbolic
)
def test_rearrange_array_api():
import numpy.array_api as xp
from einops import array_api as AA
x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
for pattern in identity_patterns + list(itertools.chain(*equivalent_rearrange_patterns)):
expected = rearrange(x, pattern)
result = AA.rearrange(xp.from_dlpack(x), pattern)
assert numpy.array_equal(AA.asnumpy(result + 0), expected)
def test_reduce_array_api():
import numpy.array_api as xp
from einops import array_api as AA
x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
for pattern in itertools.chain(*equivalent_reduction_patterns):
for reduction in ["min", "max", "sum"]:
expected = reduce(x, pattern, reduction=reduction)
result = AA.reduce(xp.from_dlpack(x), pattern, reduction=reduction)
assert numpy.array_equal(AA.asnumpy(result + 0), expected)
def test_rearrange_consistency_numpy():
shape = [1, 2, 3, 5, 7, 11]
x = numpy.arange(numpy.prod(shape)).reshape(shape)
for pattern in [
"a b c d e f -> a b c d e f",
"b a c d e f -> a b d e f c",
"a b c d e f -> f e d c b a",
"a b c d e f -> (f e) d (c b a)",
"a b c d e f -> (f e d c b a)",
]:
result = rearrange(x, pattern)
assert len(numpy.setdiff1d(x, result)) == 0
assert result.dtype == x.dtype
result = rearrange(x, "a b c d e f -> a (b) (c d e) f")
assert numpy.array_equal(x.flatten(), result.flatten())
result = rearrange(x, "a aa aa1 a1a1 aaaa a11 -> a aa aa1 a1a1 aaaa a11")
assert numpy.array_equal(x, result)
result1 = rearrange(x, "a b c d e f -> f e d c b a")
result2 = rearrange(x, "f e d c b a -> a b c d e f")
assert numpy.array_equal(result1, result2)
result = rearrange(rearrange(x, "a b c d e f -> (f d) c (e b) a"), "(f d) c (e b) a -> a b c d e f", b=2, d=5)
assert numpy.array_equal(x, result)
sizes = dict(zip("abcdef", shape))
temp = rearrange(x, "a b c d e f -> (f d) c (e b) a", **sizes)
result = rearrange(temp, "(f d) c (e b) a -> a b c d e f", **sizes)
assert numpy.array_equal(x, result)
x2 = numpy.arange(2 * 3 * 4).reshape([2, 3, 4])
result = rearrange(x2, "a b c -> b c a")
assert x2[1, 2, 3] == result[2, 3, 1]
assert x2[0, 1, 2] == result[1, 2, 0]
def test_rearrange_permutations_numpy():
# tests random permutation of axes against two independent numpy ways
for n_axes in range(1, 10):
input = numpy.arange(2**n_axes).reshape([2] * n_axes)
permutation = numpy.random.permutation(n_axes)
left_expression = " ".join("i" + str(axis) for axis in range(n_axes))
right_expression = " ".join("i" + str(axis) for axis in permutation)
expression = left_expression + " -> " + right_expression
result = rearrange(input, expression)
for pick in numpy.random.randint(0, 2, [10, n_axes]):
assert input[tuple(pick)] == result[tuple(pick[permutation])]
for n_axes in range(1, 10):
input = numpy.arange(2**n_axes).reshape([2] * n_axes)
permutation = numpy.random.permutation(n_axes)
left_expression = " ".join("i" + str(axis) for axis in range(n_axes)[::-1])
right_expression = " ".join("i" + str(axis) for axis in permutation[::-1])
expression = left_expression + " -> " + right_expression
result = rearrange(input, expression)
assert result.shape == input.shape
expected_result = numpy.zeros_like(input)
for original_axis, result_axis in enumerate(permutation):
expected_result |= ((input >> original_axis) & 1) << result_axis
assert numpy.array_equal(result, expected_result)
def test_reduction_imperatives():
for backend in imp_op_backends:
print("Reduction tests for ", backend.framework_name)
for reduction in _reductions:
# slight redundancy for simpler order - numpy version is evaluated multiple times
input = numpy.arange(2 * 3 * 4 * 5 * 6, dtype="int64").reshape([2, 3, 4, 5, 6])
if reduction in ["mean", "prod"]:
input = input / input.astype("float64").mean()
test_cases = [
["a b c d e -> ", {}, getattr(input, reduction)()],
["a ... -> ", {}, getattr(input, reduction)()],
["(a1 a2) ... (e1 e2) -> ", dict(a1=1, e2=2), getattr(input, reduction)()],
[
"a b c d e -> (e c) a",
{},
getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
],
[
"a ... c d e -> (e c) a",
{},
getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
],
[
"a b c d e ... -> (e c) a",
{},
getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
],
["a b c d e -> (e c a)", {}, getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1])],
["(a a2) ... -> (a2 a) ...", dict(a2=1), input],
]
for pattern, axes_lengths, expected_result in test_cases:
result = reduce(backend.from_numpy(input.copy()), pattern, reduction=reduction, **axes_lengths)
result = backend.to_numpy(result)
assert numpy.allclose(result, expected_result), f"Failed at {pattern}"
def test_reduction_symbolic():
for backend in sym_op_backends:
print("Reduction tests for ", backend.framework_name)
for reduction in _reductions:
input = numpy.arange(2 * 3 * 4 * 5 * 6, dtype="int64").reshape([2, 3, 4, 5, 6])
input = input / input.astype("float64").mean()
# slight redundancy for simpler order - numpy version is evaluated multiple times
test_cases = [
["a b c d e -> ", {}, getattr(input, reduction)()],
["a ... -> ", {}, getattr(input, reduction)()],
["(a a2) ... (e e2) -> ", dict(a2=1, e2=1), getattr(input, reduction)()],
[
"a b c d e -> (e c) a",
{},
getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
],
[
"a ... c d e -> (e c) a",
{},
getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
],
[
"a b c d e ... -> (e c) a",
{},
getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
],
["a b c d e -> (e c a)", {}, getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1])],
["(a a2) ... -> (a2 a) ...", dict(a2=1), input],
]
for pattern, axes_lengths, expected_numpy_result in test_cases:
shapes = [input.shape, [None for _ in input.shape]]
for shape in shapes:
sym = backend.create_symbol(shape)
result_sym = reduce(sym, pattern, reduction=reduction, **axes_lengths)
result = backend.eval_symbol(result_sym, [(sym, input)])
assert numpy.allclose(result, expected_numpy_result)
if True:
shape = []
_axes_lengths = {**axes_lengths}
for axis, length in zip("abcde", input.shape):
# filling as much as possible with Nones
if axis in pattern:
shape.append(None)
_axes_lengths[axis] = length
else:
shape.append(length)
sym = backend.create_symbol(shape)
result_sym = reduce(sym, pattern, reduction=reduction, **_axes_lengths)
result = backend.eval_symbol(result_sym, [(sym, input)])
assert numpy.allclose(result, expected_numpy_result)
def test_reduction_stress_imperatives():
for backend in imp_op_backends:
print("Stress-testing reduction for ", backend.framework_name)
for reduction in _reductions + ("rearrange",):
dtype = "int64"
coincide = numpy.array_equal
if reduction in ["mean", "prod"]:
dtype = "float64"
coincide = numpy.allclose
max_dim = 11
if "oneflow" in backend.framework_name:
max_dim = 7
if "paddle" in backend.framework_name:
max_dim = 9
for n_axes in range(max_dim):
shape = numpy.random.randint(2, 4, size=n_axes)
permutation = numpy.random.permutation(n_axes)
skipped = 0 if reduction == "rearrange" else numpy.random.randint(n_axes + 1)
left = " ".join("x" + str(i) for i in range(n_axes))
right = " ".join("x" + str(i) for i in permutation[skipped:])
pattern = left + "->" + right
x = numpy.arange(1, 1 + numpy.prod(shape), dtype=dtype).reshape(shape)
if reduction == "prod":
x /= x.mean() # to avoid overflows
result1 = reduce(x, pattern, reduction=reduction)
result2 = x.transpose(permutation)
if skipped > 0:
result2 = getattr(result2, reduction)(axis=tuple(range(skipped)))
assert coincide(result1, result2)
check_op_against_numpy(backend, x, pattern, reduction=reduction, axes_lengths={}, is_symbolic=False)
def test_reduction_with_callable_imperatives():
x_numpy = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6]).astype("float32")
x_numpy /= x_numpy.max()
def logsumexp_torch(x, tuple_of_axes):
return x.logsumexp(tuple_of_axes)
def logsumexp_tf(x, tuple_of_axes):
import tensorflow as tf
return tf.reduce_logsumexp(x, tuple_of_axes)
def logsumexp_chainer(x, tuple_of_axes):
import chainer
return chainer.functions.logsumexp(x, tuple_of_axes)
def logsumexp_keras(x, tuple_of_axes):
import tensorflow.keras.backend as k
return k.logsumexp(x, tuple_of_axes)
def logsumexp_numpy(x, tuple_of_axes):
# very naive logsumexp to compare to
minused = x.max(tuple_of_axes)
y = x - x.max(tuple_of_axes, keepdims=True)
y = numpy.exp(y)
y = numpy.sum(y, axis=tuple_of_axes)
return numpy.log(y) + minused
from einops._backends import TorchBackend, ChainerBackend, TensorflowBackend, KerasBackend, NumpyBackend
backend2callback = {
TorchBackend.framework_name: logsumexp_torch,
ChainerBackend.framework_name: logsumexp_chainer,
TensorflowBackend.framework_name: logsumexp_tf,
KerasBackend.framework_name: logsumexp_keras,
NumpyBackend.framework_name: logsumexp_numpy,
}
for backend in imp_op_backends:
if backend.framework_name not in backend2callback:
continue
backend_callback = backend2callback[backend.framework_name]
x_backend = backend.from_numpy(x_numpy)
for pattern1, pattern2 in equivalent_reduction_patterns:
print("Test reduction with callable for ", backend.framework_name, pattern1, pattern2)
output_numpy = reduce(x_numpy, pattern1, reduction=logsumexp_numpy)
output_backend = reduce(x_backend, pattern1, reduction=backend_callback)
assert numpy.allclose(
output_numpy,
backend.to_numpy(output_backend),
)
def test_enumerating_directions():
for backend in imp_op_backends:
print("testing directions for", backend.framework_name)
for shape in [[], [1], [1, 1, 1], [2, 3, 5, 7]]:
x = numpy.arange(numpy.prod(shape)).reshape(shape)
axes1 = _enumerate_directions(x)
axes2 = _enumerate_directions(backend.from_numpy(x))
assert len(axes1) == len(axes2) == len(shape)
for ax1, ax2 in zip(axes1, axes2):
ax2 = backend.to_numpy(ax2)
assert ax1.shape == ax2.shape
assert numpy.allclose(ax1, ax2)
def test_concatenations_and_stacking():
for backend in imp_op_backends:
print("testing shapes for ", backend.framework_name)
for n_arrays in [1, 2, 5]:
shapes = [[], [1], [1, 1], [2, 3, 5, 7], [1] * 6]
for shape in shapes:
arrays1 = [numpy.arange(i, i + numpy.prod(shape)).reshape(shape) for i in range(n_arrays)]
arrays2 = [backend.from_numpy(array) for array in arrays1]
result0 = numpy.asarray(arrays1)
result1 = rearrange(arrays1, "...->...")
result2 = rearrange(arrays2, "...->...")
assert numpy.array_equal(result0, result1)
assert numpy.array_equal(result1, backend.to_numpy(result2))
result1 = rearrange(arrays1, "b ... -> ... b")
result2 = rearrange(arrays2, "b ... -> ... b")
assert numpy.array_equal(result1, backend.to_numpy(result2))
def test_gradients_imperatives():
# lazy - just checking reductions
for reduction in _reductions:
x = numpy.arange(1, 1 + 2 * 3 * 4).reshape([2, 3, 4]).astype("float32")
results = {}
for backend in imp_op_backends:
y0 = backend.from_numpy(x)
if not hasattr(y0, "grad"):
continue
y1 = reduce(y0, "a b c -> c a", reduction=reduction)
y2 = reduce(y1, "c a -> a c", reduction=reduction)
y3 = reduce(y2, "a (c1 c2) -> a", reduction=reduction, c1=2)
y4 = reduce(y3, "... -> ", reduction=reduction)
y4.backward()
grad = backend.to_numpy(y0.grad)
results[backend.framework_name] = grad
print("comparing gradients for", results.keys())
for name1, grad1 in results.items():
for name2, grad2 in results.items():
assert numpy.allclose(grad1, grad2), [name1, name2, "provided different gradients"]
def test_tiling_imperatives():
for backend in imp_op_backends:
print("Tiling tests for ", backend.framework_name)
input = numpy.arange(2 * 3 * 5, dtype="int64").reshape([2, 1, 3, 1, 5])
test_cases = [
(1, 1, 1, 1, 1),
(1, 2, 1, 3, 1),
(3, 1, 1, 4, 1),
]
for repeats in test_cases:
expected = numpy.tile(input, repeats)
converted = backend.from_numpy(input)
repeated = backend.tile(converted, repeats)
result = backend.to_numpy(repeated)
assert numpy.array_equal(result, expected)
def test_tiling_symbolic():
for backend in sym_op_backends:
print("Tiling tests for ", backend.framework_name)
input = numpy.arange(2 * 3 * 5, dtype="int64").reshape([2, 1, 3, 1, 5])
test_cases = [
(1, 1, 1, 1, 1),
(1, 2, 1, 3, 1),
(3, 1, 1, 4, 1),
]
for repeats in test_cases:
expected = numpy.tile(input, repeats)
sym = backend.create_symbol(input.shape)
result = backend.eval_symbol(backend.tile(sym, repeats), [[sym, input]])
assert numpy.array_equal(result, expected)
sym = backend.create_symbol([None] * len(input.shape))
result = backend.eval_symbol(backend.tile(sym, repeats), [[sym, input]])
assert numpy.array_equal(result, expected)
repeat_test_cases = [
# all assume that input has shape [2, 3, 5]
("a b c -> c a b", dict()),
("a b c -> (c copy a b)", dict(copy=2, a=2, b=3, c=5)),
("a b c -> (a copy) b c ", dict(copy=1)),
("a b c -> (c a) (copy1 b copy2)", dict(a=2, copy1=1, copy2=2)),
("a ... -> a ... copy", dict(copy=4)),
("... c -> ... (copy1 c copy2)", dict(copy1=1, copy2=2)),
("... -> ... ", dict()),
(" ... -> copy1 ... copy2 ", dict(copy1=2, copy2=3)),
("a b c -> copy1 a copy2 b c () ", dict(copy1=2, copy2=1)),
]
def check_reversion(x, repeat_pattern, **sizes):
"""Checks repeat pattern by running reduction"""
left, right = repeat_pattern.split("->")
reduce_pattern = right + "->" + left
repeated = repeat(x, repeat_pattern, **sizes)
reduced_min = reduce(repeated, reduce_pattern, reduction="min", **sizes)
reduced_max = reduce(repeated, reduce_pattern, reduction="max", **sizes)
assert numpy.array_equal(x, reduced_min)
assert numpy.array_equal(x, reduced_max)
def test_repeat_numpy():
# check repeat vs reduce. Repeat works ok if reverse reduction with min and max work well
x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
x1 = repeat(x, "a b c -> copy a b c ", copy=1)
assert numpy.array_equal(x[None], x1)
for pattern, axis_dimensions in repeat_test_cases:
check_reversion(x, pattern, **axis_dimensions)
def test_repeat_imperatives():
x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
for backend in imp_op_backends:
print("Repeat tests for ", backend.framework_name)
for pattern, axis_dimensions in repeat_test_cases:
expected = repeat(x, pattern, **axis_dimensions)
converted = backend.from_numpy(x)
repeated = repeat(converted, pattern, **axis_dimensions)
result = backend.to_numpy(repeated)
assert numpy.array_equal(result, expected)
def test_repeat_symbolic():
x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
for backend in sym_op_backends:
print("Repeat tests for ", backend.framework_name)
for pattern, axis_dimensions in repeat_test_cases:
expected = repeat(x, pattern, **axis_dimensions)
sym = backend.create_symbol(x.shape)
result = backend.eval_symbol(repeat(sym, pattern, **axis_dimensions), [[sym, x]])
assert numpy.array_equal(result, expected)
def test_repeat_array_api():
import numpy.array_api as xp
from einops import array_api as AA
x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
for pattern, axis_dimensions in repeat_test_cases:
expected = repeat(x, pattern, **axis_dimensions)
result = AA.repeat(xp.from_dlpack(x), pattern, **axis_dimensions)
assert numpy.array_equal(AA.asnumpy(result + 0), expected)
test_cases_repeat_anonymous = [
# all assume that input has shape [1, 2, 4, 6]
("a b c d -> c a d b", dict()),
("a b c d -> (c 2 d a b)", dict(a=1, c=4, d=6)),
("1 b c d -> (d copy 1) 3 b c ", dict(copy=3)),
("1 ... -> 3 ... ", dict()),
("() ... d -> 1 (copy1 d copy2) ... ", dict(copy1=2, copy2=3)),
("1 b c d -> (1 1) (1 b) 2 c 3 d (1 1)", dict()),
]
def test_anonymous_axes():
x = numpy.arange(1 * 2 * 4 * 6).reshape([1, 2, 4, 6])
for pattern, axis_dimensions in test_cases_repeat_anonymous:
check_reversion(x, pattern, **axis_dimensions)
def test_list_inputs():
x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
assert numpy.array_equal(
rearrange(list(x), "... -> (...)"),
rearrange(x, "... -> (...)"),
)
assert numpy.array_equal(
reduce(list(x), "a ... e -> (...)", "min"),
reduce(x, "a ... e -> (...)", "min"),
)
assert numpy.array_equal(
repeat(list(x), "... -> b (...)", b=3),
repeat(x, "... -> b (...)", b=3),
)
def test_torch_compile_with_dynamic_shape():
if not is_backend_tested("torch"):
pytest.skip()
import torch
# somewhat reasonable debug messages
torch._dynamo.config.verbose = True
def func1(x):
# test contains ellipsis
a, b, c, *other = x.shape
x = rearrange(x, '(a a2) b c ... -> b (c a2) (a ...)', a2=2)
# test contains passing expression as axis length
x = reduce(x, 'b ca2 A -> b A', 'sum', ca2=c * 2)
return x
# seems can't test static and dynamic in the same test run.
# func1_compiled_static = torch.compile(func1, dynamic=False, fullgraph=True, backend='aot_eager')
func1_compiled_dynamic = torch.compile(func1, dynamic=True, fullgraph=True, backend='aot_eager')
x = torch.randn(size=[4, 5, 6, 3])
assert torch.equal(func1_compiled_dynamic(x), func1(x))
# check with input of different dimensionality, and with all shape elements changed
x = torch.randn(size=[6, 3, 4, 2, 3])
assert torch.equal(func1_compiled_dynamic(x), func1(x))
| einops-master | tests/test_ops.py |
import sys
import unittest
from doctest import testmod
from typing import Dict, List, Optional
import numpy
import pytest
from parameterized import parameterized, parameterized_class
import einops
import einops.layers
import einops.parsing
from einops._backends import AbstractBackend
from einops.einops import rearrange, parse_shape, _optimize_transformation
from . import collect_test_backends, is_backend_tested
__author__ = "Alex Rogozhnikov"
def test_doctests_examples():
if sys.version_info >= (3, 6):
# python 3.5 and lower do not keep ordered dictionaries
testmod(einops.layers, raise_on_error=True, extraglobs=dict(np=numpy))
testmod(einops.einops, raise_on_error=True, extraglobs=dict(np=numpy))
def test_backends_installed():
"""
This test will fail if some of backends are not installed or can't be imported
Other tests will just work and only test installed backends.
"""
from . import parse_backends_to_test
backends_to_test = parse_backends_to_test()
errors = []
for backend_type in AbstractBackend.__subclasses__():
if backend_type.framework_name not in backends_to_test:
continue
try:
# instantiate
backend_type()
except Exception as e:
errors.append(e)
assert len(errors) == 0, errors
def test_optimize_transformations_numpy():
print("Testing optimizations")
shapes = [[2] * n_dimensions for n_dimensions in range(14)]
shapes += [[3] * n_dimensions for n_dimensions in range(6)]
shapes += [[2, 3, 5, 7]]
shapes += [[2, 3, 5, 7, 11, 17]]
for shape in shapes:
for attempt in range(5):
n_dimensions = len(shape)
x = numpy.random.randint(0, 2**12, size=shape).reshape([-1])
init_shape = shape[:]
n_reduced = numpy.random.randint(0, n_dimensions + 1)
reduced_axes = tuple(numpy.random.permutation(n_dimensions)[:n_reduced])
axes_reordering = numpy.random.permutation(n_dimensions - n_reduced)
final_shape = numpy.random.randint(0, 1024, size=333) # just random
init_shape2, reduced_axes2, axes_reordering2, final_shape2 = combination2 = _optimize_transformation(
init_shape, reduced_axes, axes_reordering, final_shape
)
assert numpy.array_equal(final_shape, final_shape2)
result1 = x.reshape(init_shape).sum(axis=reduced_axes).transpose(axes_reordering).reshape([-1])
result2 = x.reshape(init_shape2).sum(axis=reduced_axes2).transpose(axes_reordering2).reshape([-1])
assert numpy.array_equal(result1, result2)
# testing we can't optimize this formula again
combination3 = _optimize_transformation(*combination2)
for a, b in zip(combination2, combination3):
assert numpy.array_equal(a, b)
_IMPERATIVE_BACKENDS = [
{"backend": backend}
for backend in (
collect_test_backends(symbolic=False, layers=False) + collect_test_backends(symbolic=False, layers=True)
)
]
@parameterized_class(_IMPERATIVE_BACKENDS)
class TestParseShapeImperative(unittest.TestCase):
backend: AbstractBackend
def setUp(self):
self.x = numpy.zeros([10, 20, 30, 40])
def test_parse_shape_imperative(self):
print("Shape parsing for ", self.backend.framework_name)
parsed1 = parse_shape(self.x, "a b c d")
parsed2 = parse_shape(self.backend.from_numpy(self.x), "a b c d")
assert parsed1 == parsed2 == dict(a=10, b=20, c=30, d=40)
assert parsed1 != dict(a=1, b=20, c=30, d=40) != parsed2
def test_underscore(self):
parsed1 = parse_shape(self.x, "_ _ _ _")
parsed2 = parse_shape(self.backend.from_numpy(self.x), "_ _ _ _")
assert parsed1 == parsed2 == dict()
def test_underscore_one(self):
parsed1 = parse_shape(self.x, "_ _ _ hello")
parsed2 = parse_shape(self.backend.from_numpy(self.x), "_ _ _ hello")
assert parsed1 == parsed2 == dict(hello=40)
def test_underscore_several(self):
parsed1 = parse_shape(self.x, "_ _ a1 a1a111a")
parsed2 = parse_shape(self.backend.from_numpy(self.x), "_ _ a1 a1a111a")
assert parsed1 == parsed2 == dict(a1=30, a1a111a=40)
def test_repeating(self):
with pytest.raises(einops.EinopsError):
parse_shape(self.x, "a a b b")
with pytest.raises(einops.EinopsError):
parse_shape(self.backend.from_numpy(self.x), "a a b b")
@parameterized.expand(
[
([10, 20], "...", dict()),
([10], "... a", dict(a=10)),
([10, 20], "... a", dict(a=20)),
([10, 20, 30], "... a", dict(a=30)),
([10, 20, 30, 40], "... a", dict(a=40)),
([10], "a ...", dict(a=10)),
([10, 20], "a ...", dict(a=10)),
([10, 20, 30], "a ...", dict(a=10)),
([10, 20, 30, 40], "a ...", dict(a=10)),
([10, 20, 30, 40], " a ... b", dict(a=10, b=40)),
([10, 40], " a ... b", dict(a=10, b=40)),
]
)
def test_ellipsis(self, shape: List[int], pattern: str, expected: Dict[str, int]):
x = numpy.ones(shape)
parsed1 = parse_shape(x, pattern)
parsed2 = parse_shape(self.backend.from_numpy(x), pattern)
assert parsed1 == parsed2 == expected
_SYMBOLIC_BACKENDS = [
{"backend": backend}
for backend in (
collect_test_backends(symbolic=True, layers=False) + collect_test_backends(symbolic=True, layers=True)
)
if backend.framework_name != "tensorflow.keras"
]
# tensorflow.keras needs special way to compile,
# shape vars can be used only inside layers but not as outputs
@parameterized_class(_SYMBOLIC_BACKENDS)
class TestParseShapeSymbolic(unittest.TestCase):
backend: AbstractBackend
@parameterized.expand(
[
([10, 20, 30, 40],),
([10, 20, None, None],),
([None, None, None, None],),
]
)
def test_parse_shape_symbolic(self, shape):
print("special shape parsing for", self.backend.framework_name)
input_symbol = self.backend.create_symbol(shape)
shape_placeholder = parse_shape(input_symbol, "a b c d")
shape = {}
for name, symbol in shape_placeholder.items():
shape[name] = (
symbol
if isinstance(symbol, int)
else self.backend.eval_symbol(symbol, [(input_symbol, numpy.zeros([10, 20, 30, 40]))])
)
print(shape)
result_placeholder = rearrange(
input_symbol, "a b (c1 c2) (d1 d2) -> (a b d1) c1 (c2 d2)", **parse_shape(input_symbol, "a b c1 _"), d2=2
)
result = self.backend.eval_symbol(result_placeholder, [(input_symbol, numpy.zeros([10, 20, 30, 40]))])
print(result.shape)
assert result.shape == (10 * 20 * 20, 30, 1 * 2)
assert numpy.allclose(result, 0)
@parameterized.expand(
[
([10, 20], [None, None], "...", dict()),
([10], [None], "... a", dict(a=10)),
([10, 20], [None], "... a", dict(a=20)),
([10, 20, 30], [None, None, None], "... a", dict(a=30)),
([10, 20, 30, 40], [None, None, None, None], "... a", dict(a=40)),
([10], [None], "a ...", dict(a=10)),
([10, 20], [None, None], "a ...", dict(a=10)),
([10, 20, 30], [None, None, None], "a ...", dict(a=10)),
([10, 20, 30, 40], [None, None, None, None], "a ...", dict(a=10)),
([10, 20, 30, 40], [None, None, None, None], " a ... b", dict(a=10, b=40)),
([10, 40], [None, None], " a ... b", dict(a=10, b=40)),
]
)
def test_ellipsis(
self, static_shape: List[int], shape: List[Optional[int]], pattern: str, expected: Dict[str, int]
):
input_symbol = self.backend.create_symbol(shape)
shape_placeholder = parse_shape(input_symbol, pattern)
out_shape = {}
for name, symbol in shape_placeholder.items():
if isinstance(symbol, int):
out_shape[name] = symbol
else:
out_shape[name] = self.backend.eval_symbol(symbol, [(input_symbol, numpy.zeros(static_shape))])
assert out_shape == expected
def test_is_float_type():
backends = collect_test_backends(symbolic=False, layers=False)
backends += collect_test_backends(symbolic=False, layers=True)
for backend in backends:
for dtype in ["int32", "int64", "float32", "float64"]:
is_float = "float" in dtype
input = numpy.zeros([3, 4, 5], dtype=dtype)
input = backend.from_numpy(input)
if "chainer" in backend.framework_name and not is_float:
continue # chainer doesn't allow non-floating tensors
assert backend.is_float_type(input) == is_float, (dtype, backend, input.dtype)
def test_torch_compile():
"""
Test ensures that allow_ops_in_compiled_graph allows compiling in a single graph
Additionally we ensure that after compilation cache works properly
(by changing shapes and patterns)
We additionally check that pack/unpack still can be handled
despite variable number of inputs/outputs
"""
if not is_backend_tested('torch'):
pytest.skip()
import torch
from torch import nn
from einops import repeat, reduce, pack, unpack, einsum
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
class TorchModuleWithOperations(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x_abc, suffix=''):
a, b, c = x_abc.shape
def suf(pattern):
parts = pattern.split()
return ' '.join([p if p[-1] not in 'acd' else p + suffix for p in parts])
# patterns look a bit strange because names a, c, d will be modified on every run
# by suf function
x_abcd = repeat(x_abc, suf('a b c -> a b c 4'))
x_abc = reduce(x_abcd, suf('a b c d -> a b c'), 'min')
x_abdc, ps = pack([x_abc] * (2 + len(suffix)), suf('a b * c'))
x_array = unpack(rearrange(x_abdc, suf('a b d c -> (a b ) 1 c d')), ps, 'ab one1 c *')
x1 = x_array[0] + len(x_array)
x1 = rearrange(x1, suf('(a b ) 1 c -> a b c'), b=b)
addition = einsum(x_abc, x_abcd, suf('a b c , a b c d -> d'))[0]
return x1 + addition
original = TorchModuleWithOperations()
compiled = torch.compile(original, fullgraph=True, backend='aot_eager')
for size in [10, 20, 40]:
x = torch.rand([size, size + 1, size + 2])
for suffix in ['', 'suf1', 'other_suffix']:
result1 = compiled(x, suffix)
result2 = original(x, suffix)
assert torch.allclose(result1, result2)
| einops-master | tests/test_other.py |
import numpy
import pytest
from einops import rearrange, parse_shape, reduce
from tests import is_backend_tested
from tests.test_ops import imp_op_backends
def test_rearrange_examples():
def test1(x):
# transpose
y = rearrange(x, 'b c h w -> b h w c')
assert tuple(y.shape) == (10, 30, 40, 20)
return y
def test2(x):
# view / reshape
y = rearrange(x, 'b c h w -> b (c h w)')
assert tuple(y.shape) == (10, 20 * 30 * 40)
return y
def test3(x):
# depth-to-space
y = rearrange(x, 'b (c h1 w1) h w -> b c (h h1) (w w1)', h1=2, w1=2)
assert tuple(y.shape) == (10, 5, 30 * 2, 40 * 2)
return y
def test4(x):
# space-to-depth
y = rearrange(x, 'b c (h h1) (w w1) -> b (h1 w1 c) h w', h1=2, w1=2)
assert tuple(y.shape) == (10, 20 * 4, 30 // 2, 40 // 2)
return y
def test5(x):
# simple transposition
y = rearrange(x, 'b1 sound b2 letter -> b1 b2 sound letter')
assert tuple(y.shape) == (10, 30, 20, 40)
return y
def test6(x):
# parsing parameters
t = rearrange(x, 'b c h w -> (b h w) c')
t = t[:, ::2] # replacement for dot-product, just changes size of second axis
assert tuple(t.shape) == (10 * 30 * 40, 10)
y = rearrange(t, '(b h w) c2 -> b c2 h w', **parse_shape(x, 'b _ h w'))
assert tuple(y.shape) == (10, 10, 30, 40)
return y
def test7(x):
# split of embedding into groups
y1, y2 = rearrange(x, 'b (c g) h w -> g b c h w', g=2)
assert tuple(y1.shape) == (10, 10, 30, 40)
assert tuple(y2.shape) == (10, 10, 30, 40)
return y1 + y2 # only one tensor is expected in output
def test8(x):
# max-pooling
y = reduce(x, 'b c (h h1) (w w1) -> b c h w', reduction='max', h1=2, w1=2)
assert tuple(y.shape) == (10, 20, 30 // 2, 40 // 2)
return y
def test9(x):
# squeeze - unsqueeze
y = reduce(x, 'b c h w -> b c () ()', reduction='max')
assert tuple(y.shape) == (10, 20, 1, 1)
y = rearrange(y, 'b c () () -> c b')
assert tuple(y.shape) == (20, 10)
return y
def test10(x):
# stack
tensors = list(x + 0) # 0 is needed https://github.com/tensorflow/tensorflow/issues/23185
tensors = rearrange(tensors, 'b c h w -> b h w c')
assert tuple(tensors.shape) == (10, 30, 40, 20)
return tensors
def test11(x):
# concatenate
tensors = list(x + 0) # 0 is needed https://github.com/tensorflow/tensorflow/issues/23185
tensors = rearrange(tensors, 'b c h w -> h (b w) c')
assert tuple(tensors.shape) == (30, 10 * 40, 20)
return tensors
def shufflenet(x, convolve, c1, c2):
# shufflenet reordering example
x = convolve(x)
x = rearrange(x, 'b (c1 c2) h w-> b (c2 c1) h w', c1=c1, c2=c2)
x = convolve(x)
return x
def convolve_strided_1d(x, stride, usual_convolution):
x = rearrange(x, 'b c t1 t2 -> b c (t1 t2)') # reduce dimensionality
x = rearrange(x, 'b c (t stride) -> (stride b) c t', stride=stride)
x = usual_convolution(x)
x = rearrange(x, '(stride b) c t -> b c (t stride)', stride=stride)
return x
def convolve_strided_2d(x, h_stride, w_stride, usual_convolution):
x = rearrange(x, 'b c (h hs) (w ws) -> (hs ws b) c h w', hs=h_stride, ws=w_stride)
x = usual_convolution(x)
x = rearrange(x, '(hs ws b) c h w -> b c (h hs) (w ws)', hs=h_stride, ws=w_stride)
return x
def unet_like_1d(x, usual_convolution):
# u-net like steps for increasing / reducing dimensionality
x = rearrange(x, 'b c t1 t2 -> b c (t1 t2)') # reduce dimensionality
y = rearrange(x, 'b c (t dt) -> b (dt c) t', dt=2)
y = usual_convolution(y)
x = x + rearrange(y, 'b (dt c) t -> b c (t dt)', dt=2)
return x
# mock for convolution (works for all backends)
convolve_mock = lambda x: x
tests = [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10, test11,
lambda x: shufflenet(x, convolve=convolve_mock, c1=4, c2=5),
lambda x: convolve_strided_1d(x, stride=2, usual_convolution=convolve_mock),
lambda x: convolve_strided_2d(x, h_stride=2, w_stride=2, usual_convolution=convolve_mock),
lambda x: unet_like_1d(x, usual_convolution=convolve_mock),
]
for backend in imp_op_backends:
print('testing source_examples for ', backend.framework_name)
for test in tests:
x = numpy.arange(10 * 20 * 30 * 40).reshape([10, 20, 30, 40])
result1 = test(x)
result2 = backend.to_numpy(test(backend.from_numpy(x)))
assert numpy.array_equal(result1, result2)
# now with strides
x = numpy.arange(10 * 2 * 20 * 3 * 30 * 1 * 40).reshape([10 * 2, 20 * 3, 30 * 1, 40 * 1])
# known torch bug - torch doesn't support negative steps
last_step = -1 if (backend.framework_name != 'torch' and backend.framework_name != 'oneflow') else 1
indexing_expression = numpy.index_exp[::2, ::3, ::1, ::last_step]
result1 = test(x[indexing_expression])
result2 = backend.to_numpy(test(backend.from_numpy(x)[indexing_expression]))
assert numpy.array_equal(result1, result2)
def tensor_train_example_numpy():
# kept here just for a collection, only tested for numpy
# https://arxiv.org/pdf/1509.06569.pdf, (5)
x = numpy.ones([3, 4, 5, 6])
rank = 4
if numpy.__version__ < '1.15.0':
# numpy.einsum fails here, skip test
return
# creating appropriate Gs
Gs = [numpy.ones([d, d, rank, rank]) for d in x.shape]
Gs[0] = Gs[0][:, :, :1, :]
Gs[-1] = Gs[-1][:, :, :, :1]
# einsum way
y = x.reshape((1,) + x.shape)
for G in Gs:
# taking partial results left-to-right
# y = numpy.einsum('i j alpha beta, alpha i ... -> beta ... j', G, y)
y = numpy.einsum('i j a b, a i ... -> b ... j', G, y)
y1 = y.reshape(-1)
# alternative way
y = x.reshape(-1)
for G in Gs:
i, j, alpha, beta = G.shape
y = rearrange(y, '(i rest alpha) -> rest (alpha i)', alpha=alpha, i=i)
y = y @ rearrange(G, 'i j alpha beta -> (alpha i) (j beta)')
y = rearrange(y, 'rest (beta j) -> (beta rest j)', beta=beta, j=j)
y2 = y
assert numpy.allclose(y1, y2)
# yet another way
y = x
for G in Gs:
i, j, alpha, beta = G.shape
y = rearrange(y, 'i ... (j alpha) -> ... j (alpha i)', alpha=alpha, i=i)
y = y @ rearrange(G, 'i j alpha beta -> (alpha i) (j beta)')
y3 = y.reshape(-1)
assert numpy.allclose(y1, y3)
def test_pytorch_yolo_fragment():
if not is_backend_tested('torch'):
pytest.skip()
import torch
def old_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
# https://github.com/BobLiu20/YOLOv3_PyTorch/blob/c6b483743598b5f64d520d81e7e5f47ba936d4c9/nets/yolo_loss.py#L28-L44
bs = input.size(0)
in_h = input.size(2)
in_w = input.size(3)
scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in anchors]
prediction = input.view(bs, num_anchors,
5 + num_classes, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous()
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# https://github.com/BobLiu20/YOLOv3_PyTorch/blob/c6b483743598b5f64d520d81e7e5f47ba936d4c9/nets/yolo_loss.py#L70-L92
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
# Calculate offsets for each grid
grid_x = torch.linspace(0, in_w - 1, in_w).repeat(in_w, 1).repeat(
bs * num_anchors, 1, 1).view(x.shape).type(FloatTensor)
grid_y = torch.linspace(0, in_h - 1, in_h).repeat(in_h, 1).t().repeat(
bs * num_anchors, 1, 1).view(y.shape).type(FloatTensor)
# Calculate anchor w, h
anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))
anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))
anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape)
anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + grid_x
pred_boxes[..., 1] = y.data + grid_y
pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * anchor_h
# Results
_scale = torch.Tensor([stride_w, stride_h] * 2).type(FloatTensor)
output = torch.cat((pred_boxes.view(bs, -1, 4) * _scale,
conf.view(bs, -1, 1), pred_cls.view(bs, -1, num_classes)), -1)
return output
def new_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
raw_predictions = rearrange(input, ' b (anchor prediction) h w -> prediction b anchor h w', anchor=num_anchors)
anchors = torch.FloatTensor(anchors).to(input.device)
anchor_sizes = rearrange(anchors, 'anchor dim -> dim () anchor () ()')
_, _, _, in_h, in_w = raw_predictions.shape
grid_h = rearrange(torch.arange(in_h).float(), 'h -> () () h ()').to(input.device)
grid_w = rearrange(torch.arange(in_w).float(), 'w -> () () () w').to(input.device)
predicted_bboxes = torch.zeros_like(raw_predictions)
predicted_bboxes[0] = (raw_predictions[0].sigmoid() + grid_h) * stride_h # center y
predicted_bboxes[1] = (raw_predictions[1].sigmoid() + grid_w) * stride_w # center x
predicted_bboxes[2:4] = (raw_predictions[2:4].exp()) * anchor_sizes # bbox width and height
predicted_bboxes[4] = raw_predictions[4].sigmoid() # confidence
predicted_bboxes[5:] = raw_predictions[5:].sigmoid() # class predictions
# only to match results of original code, not needed
return rearrange(predicted_bboxes, 'prediction b anchor h w -> b anchor h w prediction')
stride_h = 4
stride_w = 4
batch_size = 5
num_classes = 12
anchors = [[50, 100], [100, 50], [75, 75]]
num_anchors = len(anchors)
input = torch.randn([batch_size, num_anchors * (5 + num_classes), 1, 1])
result1 = old_way(input=input, num_anchors=num_anchors, num_classes=num_classes,
stride_h=stride_h, stride_w=stride_w, anchors=anchors)
result2 = new_way(input=input, num_anchors=num_anchors, num_classes=num_classes,
stride_h=stride_h, stride_w=stride_w, anchors=anchors)
result1 = result1.reshape(result2.shape)
assert torch.allclose(result1, result2) | einops-master | tests/test_examples.py |
"""
just run this script with python converter.py .
It will convert pytorch.ipynb to html page docs/pytorch-examples.html
"""
import nbformat
import markdown
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
notebook = nbformat.read('Pytorch.ipynb', as_version=nbformat.NO_CONVERT)
content = ''
cache = ''
for cell in notebook['cells']:
if cell['cell_type'] == 'code':
source = cell['source']
if source.startswith('#left') or source.startswith('#right'):
trimmed_source = source[source.index('\n') + 1:]
cache += "<div>{}</div>".format(highlight(trimmed_source, PythonLexer(), HtmlFormatter()))
if source.startswith('#right'):
content += "<div class='leftright-wrapper'><div class='leftright-cells'>{}</div></div> ".format(cache)
cache = ''
elif cell['cell_type'] == 'markdown':
content += "<div class='markdown-cell'>{}</div>".format(markdown.markdown(cell['source']))
else:
raise RuntimeError('not expected type of cell' + cell['cell_type'])
styles = HtmlFormatter().get_style_defs('.highlight')
styles += '''
body {
padding: 50px 10px;
}
.leftright-wrapper {
text-align: center;
overflow-x: auto;
}
.leftright-cells {
display: inline-flex;
text-align: left;
}
.leftright-cells > div {
padding: 0px 10px;
min-width: 350px;
}
.markdown-cell{
max-width: 700px;
margin: 0px auto;
}
h1 {
text-align: center;
padding: 10px 0px 0px;
}
'''
meta_tags = '''
<meta property="og:title" content="Writing better code with pytorch and einops">
<meta property="og:description" content="Learning by example: rewriting and fixing popular code fragments">
<meta property="og:image" content="http://arogozhnikov.github.io/images/einops/einops_video.gif">
<meta property="og:video" content="http://arogozhnikov.github.io/images/einops/einops_video.mp4" />
<meta property="og:url" content="https://arogozhnikov.github.io/einops/pytorch-examples.html">
<meta name="twitter:card" content="summary_large_image">
<!-- Non-Essential, But Recommended -->
<meta property="og:site_name" content="Writing better code with pytorch and einops">
<meta name="twitter:image:alt" content="Learning by example: rewriting and fixing popular code fragments">
'''
github_ribbon = '''
<a href="https://github.com/arogozhnikov/einops" class="github-corner" aria-label="View source on GitHub">
<svg width="80" height="80" viewBox="0 0 250 250" style="fill:#151513; color:#fff; position: absolute; top: 0; border: 0; right: 0;" aria-hidden="true">
<path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path>
<path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path>
</svg></a>
<style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>
'''
result = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
{meta_tags}
<title>Writing better code with pytorch+einops</title>
<style>{styles}</style>
</head>
<body>
{github_ribbon}
{content}
</body>
</html>
'''
with open('../pytorch-examples.html', 'w') as f:
f.write(result)
| einops-master | docs/source_examples/converter.py |
import numpy as np
from PIL.Image import fromarray
from IPython import get_ipython
def display_np_arrays_as_images():
def np_to_png(a):
if 2 <= len(a.shape) <= 3:
return fromarray(np.array(np.clip(a, 0, 1) * 255, dtype='uint8'))._repr_png_()
else:
return fromarray(np.zeros([1, 1], dtype='uint8'))._repr_png_()
def np_to_text(obj, p, cycle):
if len(obj.shape) < 2:
print(repr(obj))
if 2 <= len(obj.shape) <= 3:
pass
else:
print('<array of shape {}>'.format(obj.shape))
get_ipython().display_formatter.formatters['image/png'].for_type(np.ndarray, np_to_png)
get_ipython().display_formatter.formatters['text/plain'].for_type(np.ndarray, np_to_text)
from IPython.display import display_html
_style_inline = """<style>
.einops-answer {
color: transparent;
padding: 5px 15px;
background-color: #def;
}
.einops-answer:hover { color: blue; }
</style>
"""
def guess(x):
display_html(
_style_inline
+ "<h4>Answer is: <span class='einops-answer'>{x}</span> (hover to see)</h4>".format(x=tuple(x)),
raw=True)
| einops-master | docs/utils/__init__.py |
"""
This is a fake script, it is not used.
Seems github does not count contributions unless you have a setup.py
"""
__author__ = "Alex Rogozhnikov"
from setuptools import setup
setup(
name="einops",
version="0.7.0rc2",
description="A new flavour of deep learning operations",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
url="https://github.com/arogozhnikov/einops",
author="Alex Rogozhnikov",
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3 ",
"License :: OSI Approved :: MIT License",
],
keywords="deep learning, neural networks, tensor manipulation, machine learning, "
"scientific computations, einops",
install_requires=[
# no run-time or installation-time dependencies
],
)
| einops-master | scripts/setup.py |
"""
Converts readme from github repo page to mkdocs-friendly
"""
from pathlib import Path
original_text = Path(__file__).parent.parent.joinpath('README.md').read_text(encoding='utf-8')
def replace_with_video_tag(line: str):
if line.startswith('https://') and line.endswith('.mp4') and ' ' not in line:
# treating as link to mp4 file.
return ""
# return f"""
# <video width="800" controls><source src="{line}" type="video/mp4">
# Your browser does not support the video </video>\n\n<br />\n\n<br />
# """.strip()
else:
# other lines are not touched
return line
new_content = '\n'.join([
replace_with_video_tag(line)
for line in original_text.splitlines()
])
# save contents
docs_index = Path(__file__).parent.parent.joinpath('docs_src', 'index.md')
assert docs_index.parent.exists()
docs_index.write_bytes(
new_content.encode('utf-8')
)
print('Converted README.md') | einops-master | scripts/convert_readme.py |
import functools
import itertools
import string
import typing
from collections import OrderedDict
from typing import Set, Tuple, List, Dict, Union, Callable, Optional, TypeVar, cast, Any
if typing.TYPE_CHECKING:
# for docstrings in pycharm
import numpy as np
from . import EinopsError
from ._backends import get_backend
from .parsing import ParsedExpression, _ellipsis, AnonymousAxis
Tensor = TypeVar("Tensor")
ReductionCallable = Callable[[Tensor, Tuple[int, ...]], Tensor]
Reduction = Union[str, ReductionCallable]
_reductions = ("min", "max", "sum", "mean", "prod")
# magic integers are required to stay within
# traceable subset of language
_unknown_axis_length = -999999
_expected_axis_length = -99999
def _product(sequence: List[int]) -> int:
"""minimalistic product that works both with numbers and symbols. Supports empty lists"""
result = 1
for element in sequence:
result *= element
return result
def _reduce_axes(tensor, reduction_type: Reduction, reduced_axes: List[int], backend):
if callable(reduction_type):
# custom callable
return reduction_type(tensor, tuple(reduced_axes))
else:
# one of built-in operations
assert reduction_type in _reductions
if reduction_type == "mean":
if not backend.is_float_type(tensor):
raise NotImplementedError("reduce_mean is not available for non-floating tensors")
return backend.reduce(tensor, reduction_type, tuple(reduced_axes))
def _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes):
# 'collapses' neighboring axes if those participate in the result pattern in the same order
# TODO add support for added_axes
assert len(axes_reordering) + len(reduced_axes) == len(init_shapes)
# joining consecutive axes that will be reduced
# possibly we can skip this if all backends can optimize this (not sure)
reduced_axes = tuple(sorted(reduced_axes))
for i in range(len(reduced_axes) - 1)[::-1]:
if reduced_axes[i] + 1 == reduced_axes[i + 1]:
removed_axis = reduced_axes[i + 1]
removed_length = init_shapes[removed_axis]
init_shapes = init_shapes[:removed_axis] + init_shapes[removed_axis + 1 :]
init_shapes[removed_axis - 1] *= removed_length
reduced_axes = reduced_axes[: i + 1] + tuple(axis - 1 for axis in reduced_axes[i + 2 :])
# removing axes that are moved together during reshape
def build_mapping():
init_to_final = {}
for axis in range(len(init_shapes)):
if axis in reduced_axes:
init_to_final[axis] = None
else:
after_reduction = sum(x is not None for x in init_to_final.values())
init_to_final[axis] = list(axes_reordering).index(after_reduction)
return init_to_final
init_axis_to_final_axis = build_mapping()
for init_axis in range(len(init_shapes) - 1)[::-1]:
if init_axis_to_final_axis[init_axis] is None:
continue
if init_axis_to_final_axis[init_axis + 1] is None:
continue
if init_axis_to_final_axis[init_axis] + 1 == init_axis_to_final_axis[init_axis + 1]:
removed_axis = init_axis + 1
removed_length = init_shapes[removed_axis]
removed_axis_after_reduction = sum(x not in reduced_axes for x in range(removed_axis))
reduced_axes = tuple(axis if axis < removed_axis else axis - 1 for axis in reduced_axes)
init_shapes = init_shapes[:removed_axis] + init_shapes[removed_axis + 1 :]
init_shapes[removed_axis - 1] *= removed_length
old_reordering = axes_reordering
axes_reordering = []
for axis in old_reordering:
if axis == removed_axis_after_reduction:
pass
elif axis < removed_axis_after_reduction:
axes_reordering.append(axis)
else:
axes_reordering.append(axis - 1)
init_axis_to_final_axis = build_mapping()
return init_shapes, reduced_axes, axes_reordering, final_shapes
CookedRecipe = Tuple[Optional[List[int]], Optional[List[int]], List[int], Dict[int, int], Optional[List[int]], int]
# Actual type is tuple[tuple[str, int], ...]
# However torch.jit.script does not "understand" the correct type,
# and torch_specific will use list version.
HashableAxesLengths = Tuple[Tuple[str, int], ...]
FakeHashableAxesLengths = List[Tuple[str, int]]
class TransformRecipe:
"""
Recipe describes actual computation pathway.
Recipe can be applied to a tensor or variable.
"""
# structure is non-mutable. In future, this can be non-mutable dataclass (python 3.7+)
# update: pytorch 2.0 torch.jit.script seems to have problems with dataclasses unless they were explicitly provided
def __init__(
self,
# list of sizes (or just sizes) for elementary axes as they appear in left expression.
# this is what (after computing unknown parts) will be a shape after first transposition.
# This does not include any ellipsis dimensions.
elementary_axes_lengths: List[int],
# if additional axes are provided, they should be set in prev array
# This shows mapping from name to position
axis_name2elementary_axis: Dict[str, int],
# each dimension in input can help to reconstruct length of one elementary axis
# or verify one of dimensions. Each element points to element of elementary_axes_lengths.
input_composition_known_unknown: List[Tuple[List[int], List[int]]],
# permutation applied to elementary axes, if ellipsis is absent
axes_permutation: List[int],
# permutation puts reduced axes in the end, we only need to know the first position.
first_reduced_axis: int,
# at which positions which of elementary axes should appear. Axis position -> axis index.
added_axes: Dict[int, int],
# ids of axes as they appear in result, again pointers to elementary_axes_lengths,
# only used to infer result dimensions
output_composite_axes: List[List[int]],
):
self.elementary_axes_lengths: List[int] = elementary_axes_lengths
self.axis_name2elementary_axis: Dict[str, int] = axis_name2elementary_axis
self.input_composition_known_unknown: List[Tuple[List[int], List[int]]] = input_composition_known_unknown
self.axes_permutation: List[int] = axes_permutation
self.first_reduced_axis: int = first_reduced_axis
self.added_axes: Dict[int, int] = added_axes
self.output_composite_axes: List[List[int]] = output_composite_axes
def _reconstruct_from_shape_uncached(
self: TransformRecipe, shape: List[int], axes_dims: FakeHashableAxesLengths
) -> CookedRecipe:
"""
Reconstruct all actual parameters using shape.
Shape is a tuple that may contain integers, shape symbols (tf, theano) and UnknownSize (tf, previously mxnet)
known axes can be integers or symbols, but not Nones.
"""
# magic number
need_init_reshape = False
# last axis is allocated for collapsed ellipsis
axes_lengths: List[int] = list(self.elementary_axes_lengths)
for axis, dim in axes_dims:
axes_lengths[self.axis_name2elementary_axis[axis]] = dim
for input_axis, (known_axes, unknown_axes) in enumerate(self.input_composition_known_unknown):
length = shape[input_axis]
if len(known_axes) == 0 and len(unknown_axes) == 1:
# shortcut for the most common case
axes_lengths[unknown_axes[0]] = length
continue
known_product = 1
for axis in known_axes:
known_product *= axes_lengths[axis]
if len(unknown_axes) == 0:
if isinstance(length, int) and isinstance(known_product, int) and length != known_product:
raise EinopsError(f"Shape mismatch, {length} != {known_product}")
else:
# assert len(unknown_axes) == 1, 'this is enforced when recipe is created, so commented out'
if isinstance(length, int) and isinstance(known_product, int) and length % known_product != 0:
raise EinopsError(f"Shape mismatch, can't divide axis of length {length} in chunks of {known_product}")
unknown_axis = unknown_axes[0]
inferred_length: int = length // known_product
axes_lengths[unknown_axis] = inferred_length
if len(known_axes) + len(unknown_axes) != 1:
need_init_reshape = True
# at this point all axes_lengths are computed (either have values or variables, but not Nones)
# elementary axes are ordered as they appear in input, then all added axes
init_shapes: Optional[List[int]] = axes_lengths[: len(self.axes_permutation)] if need_init_reshape else None
need_final_reshape = False
final_shapes: List[int] = []
for grouping in self.output_composite_axes:
lengths = [axes_lengths[elementary_axis] for elementary_axis in grouping]
final_shapes.append(_product(lengths))
if len(lengths) != 1:
need_final_reshape = True
added_axes: Dict[int, int] = {
pos: axes_lengths[pos_in_elementary] for pos, pos_in_elementary in self.added_axes.items()
}
# this list can be empty
reduced_axes = list(range(self.first_reduced_axis, len(self.axes_permutation)))
n_axes_after_adding_axes = len(added_axes) + len(self.axes_permutation)
axes_reordering: Optional[List[int]] = self.axes_permutation
if self.axes_permutation == list(range(len(self.axes_permutation))):
axes_reordering = None
_final_shapes = final_shapes if need_final_reshape else None
return init_shapes, axes_reordering, reduced_axes, added_axes, _final_shapes, n_axes_after_adding_axes
_reconstruct_from_shape = functools.lru_cache(1024)(_reconstruct_from_shape_uncached)
def _apply_recipe(
backend, recipe: TransformRecipe, tensor: Tensor, reduction_type: Reduction, axes_lengths: HashableAxesLengths
) -> Tensor:
# this method implements actual work for all backends for 3 operations
try:
init_shapes, axes_reordering, reduced_axes, added_axes, final_shapes, n_axes_w_added = _reconstruct_from_shape(
recipe, backend.shape(tensor), axes_lengths
)
except TypeError:
# shape or one of passed axes lengths is not hashable (i.e. they are symbols)
_result = _reconstruct_from_shape_uncached(recipe, backend.shape(tensor), axes_lengths)
(init_shapes, axes_reordering, reduced_axes, added_axes, final_shapes, n_axes_w_added) = _result
if init_shapes is not None:
tensor = backend.reshape(tensor, init_shapes)
if axes_reordering is not None:
tensor = backend.transpose(tensor, axes_reordering)
if len(reduced_axes) > 0:
tensor = _reduce_axes(tensor, reduction_type=reduction_type, reduced_axes=reduced_axes, backend=backend)
if len(added_axes) > 0:
tensor = backend.add_axes(tensor, n_axes=n_axes_w_added, pos2len=added_axes)
if final_shapes is not None:
tensor = backend.reshape(tensor, final_shapes)
return tensor
def _apply_recipe_array_api(
xp, recipe: TransformRecipe, tensor: Tensor, reduction_type: Reduction, axes_lengths: HashableAxesLengths
) -> Tensor:
# completely-inline implementation
init_shapes, axes_reordering, reduced_axes, added_axes, final_shapes, n_axes_w_added = _reconstruct_from_shape(
recipe, tensor.shape, axes_lengths
)
if init_shapes is not None:
tensor = xp.reshape(tensor, init_shapes)
if axes_reordering is not None:
tensor = xp.permute_dims(tensor, axes_reordering)
if len(reduced_axes) > 0:
if callable(reduction_type):
# custom callable
tensor = reduction_type(tensor, tuple(reduced_axes))
else:
# one of built-in operations
assert reduction_type in _reductions
tensor = getattr(xp, reduction_type)(tensor, axis=tuple(reduced_axes))
if len(added_axes) > 0:
# we use broadcasting
for axis_position, axis_length in added_axes.items():
tensor = xp.expand_dims(tensor, axis=axis_position)
final_shape = list(tensor.shape)
for axis_position, axis_length in added_axes.items():
final_shape[axis_position] = axis_length
tensor = xp.broadcast_to(tensor, final_shape)
if final_shapes is not None:
tensor = xp.reshape(tensor, final_shapes)
return tensor
@functools.lru_cache(256)
def _prepare_transformation_recipe(
pattern: str,
operation: Reduction,
axes_names: Tuple[str, ...],
ndim: int,
) -> TransformRecipe:
"""Perform initial parsing of pattern and provided supplementary info
axes_lengths is a tuple of tuples (axis_name, axis_length)
"""
left_str, rght_str = pattern.split("->")
left = ParsedExpression(left_str)
rght = ParsedExpression(rght_str)
# checking that axes are in agreement - new axes appear only in repeat, while disappear only in reduction
if not left.has_ellipsis and rght.has_ellipsis:
raise EinopsError("Ellipsis found in right side, but not left side of a pattern {}".format(pattern))
if left.has_ellipsis and left.has_ellipsis_parenthesized:
raise EinopsError("Ellipsis inside parenthesis in the left side is not allowed: {}".format(pattern))
if operation == "rearrange":
if left.has_non_unitary_anonymous_axes or rght.has_non_unitary_anonymous_axes:
raise EinopsError("Non-unitary anonymous axes are not supported in rearrange (exception is length 1)")
difference = set.symmetric_difference(left.identifiers, rght.identifiers)
if len(difference) > 0:
raise EinopsError("Identifiers only on one side of expression (should be on both): {}".format(difference))
elif operation == "repeat":
difference = set.difference(left.identifiers, rght.identifiers)
if len(difference) > 0:
raise EinopsError("Unexpected identifiers on the left side of repeat: {}".format(difference))
axes_without_size = set.difference(
{ax for ax in rght.identifiers if not isinstance(ax, AnonymousAxis)},
{*left.identifiers, *axes_names},
)
if len(axes_without_size) > 0:
raise EinopsError("Specify sizes for new axes in repeat: {}".format(axes_without_size))
elif operation in _reductions or callable(operation):
difference = set.difference(rght.identifiers, left.identifiers)
if len(difference) > 0:
raise EinopsError("Unexpected identifiers on the right side of reduce {}: {}".format(operation, difference))
else:
raise EinopsError("Unknown reduction {}. Expect one of {}.".format(operation, _reductions))
if left.has_ellipsis:
n_other_dims = len(left.composition) - 1
if ndim < n_other_dims:
raise EinopsError(f"Wrong shape: expected >={n_other_dims} dims. Received {ndim}-dim tensor.")
ellipsis_ndim = ndim - n_other_dims
ell_axes = [_ellipsis + str(i) for i in range(ellipsis_ndim)]
left_composition = []
for composite_axis in left.composition:
if composite_axis == _ellipsis:
for axis in ell_axes:
left_composition.append([axis])
else:
left_composition.append(composite_axis)
rght_composition = []
for composite_axis in rght.composition:
if composite_axis == _ellipsis:
for axis in ell_axes:
rght_composition.append([axis])
else:
group = []
for axis in composite_axis:
if axis == _ellipsis:
group.extend(ell_axes)
else:
group.append(axis)
rght_composition.append(group)
left.identifiers.update(ell_axes)
left.identifiers.remove(_ellipsis)
if rght.has_ellipsis:
rght.identifiers.update(ell_axes)
rght.identifiers.remove(_ellipsis)
else:
if ndim != len(left.composition):
raise EinopsError(f"Wrong shape: expected {len(left.composition)} dims. Received {ndim}-dim tensor.")
left_composition = left.composition
rght_composition = rght.composition
# parsing all dimensions to find out lengths
axis_name2known_length: Dict[Union[str, AnonymousAxis], int] = OrderedDict()
for composite_axis in left_composition:
for axis_name in composite_axis:
if isinstance(axis_name, AnonymousAxis):
axis_name2known_length[axis_name] = axis_name.value
else:
axis_name2known_length[axis_name] = _unknown_axis_length
# axis_ids_after_first_reshape = range(len(axis_name2known_length)) at this point
repeat_axes_names = []
for axis_name in rght.identifiers:
if axis_name not in axis_name2known_length:
if isinstance(axis_name, AnonymousAxis):
axis_name2known_length[axis_name] = axis_name.value
else:
axis_name2known_length[axis_name] = _unknown_axis_length
repeat_axes_names.append(axis_name)
axis_name2position = {name: position for position, name in enumerate(axis_name2known_length)}
# axes provided as kwargs
for elementary_axis in axes_names:
if not ParsedExpression.check_axis_name(elementary_axis):
raise EinopsError("Invalid name for an axis", elementary_axis)
if elementary_axis not in axis_name2known_length:
raise EinopsError("Axis {} is not used in transform".format(elementary_axis))
axis_name2known_length[elementary_axis] = _expected_axis_length
input_axes_known_unknown = []
# some shapes are inferred later - all information is prepared for faster inference
for i, composite_axis in enumerate(left_composition):
known: Set[str] = {axis for axis in composite_axis if axis_name2known_length[axis] != _unknown_axis_length}
unknown: Set[str] = {axis for axis in composite_axis if axis_name2known_length[axis] == _unknown_axis_length}
if len(unknown) > 1:
raise EinopsError("Could not infer sizes for {}".format(unknown))
assert len(unknown) + len(known) == len(composite_axis)
input_axes_known_unknown.append(
([axis_name2position[axis] for axis in known], [axis_name2position[axis] for axis in unknown])
)
axis_position_after_reduction: Dict[str, int] = {}
for axis_name in itertools.chain(*left_composition):
if axis_name in rght.identifiers:
axis_position_after_reduction[axis_name] = len(axis_position_after_reduction)
result_axes_grouping: List[List[int]] = [
[axis_name2position[axis] for axis in composite_axis] for i, composite_axis in enumerate(rght_composition)
]
ordered_axis_left = list(itertools.chain(*left_composition))
ordered_axis_rght = list(itertools.chain(*rght_composition))
reduced_axes = [axis for axis in ordered_axis_left if axis not in rght.identifiers]
order_after_transposition = [axis for axis in ordered_axis_rght if axis in left.identifiers] + reduced_axes
axes_permutation = [ordered_axis_left.index(axis) for axis in order_after_transposition]
added_axes = {
i: axis_name2position[axis_name]
for i, axis_name in enumerate(ordered_axis_rght)
if axis_name not in left.identifiers
}
first_reduced_axis = len(order_after_transposition) - len(reduced_axes)
return TransformRecipe(
elementary_axes_lengths=list(axis_name2known_length.values()),
axis_name2elementary_axis={axis: axis_name2position[axis] for axis in axes_names},
input_composition_known_unknown=input_axes_known_unknown,
axes_permutation=axes_permutation,
first_reduced_axis=first_reduced_axis,
added_axes=added_axes,
output_composite_axes=result_axes_grouping,
)
def _prepare_recipes_for_all_dims(
pattern: str, operation: Reduction, axes_names: Tuple[str, ...]
) -> Dict[int, TransformRecipe]:
"""
Internal function, used in layers.
Layer makes all recipe creation when it is initialized, thus to keep recipes simple we pre-compute for all dims
"""
left_str, rght_str = pattern.split("->")
left = ParsedExpression(left_str)
dims = [len(left.composition)]
if left.has_ellipsis:
dims = [len(left.composition) - 1 + ellipsis_dims for ellipsis_dims in range(8)]
return {ndim: _prepare_transformation_recipe(pattern, operation, axes_names, ndim=ndim) for ndim in dims}
def reduce(tensor: Union[Tensor, List[Tensor]], pattern: str, reduction: Reduction, **axes_lengths: int) -> Tensor:
"""
einops.reduce provides combination of reordering and reduction using reader-friendly notation.
Examples for reduce operation:
```python
>>> x = np.random.randn(100, 32, 64)
# perform max-reduction on the first axis
>>> y = reduce(x, 't b c -> b c', 'max')
# same as previous, but with clearer axes meaning
>>> y = reduce(x, 'time batch channel -> batch channel', 'max')
>>> x = np.random.randn(10, 20, 30, 40)
# 2d max-pooling with kernel size = 2 * 2 for image processing
>>> y1 = reduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h2=2, w2=2)
# if one wants to go back to the original height and width, depth-to-space trick can be applied
>>> y2 = rearrange(y1, 'b (c h2 w2) h1 w1 -> b c (h1 h2) (w1 w2)', h2=2, w2=2)
>>> assert parse_shape(x, 'b _ h w') == parse_shape(y2, 'b _ h w')
# Adaptive 2d max-pooling to 3 * 4 grid
>>> reduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h1=3, w1=4).shape
(10, 20, 3, 4)
# Global average pooling
>>> reduce(x, 'b c h w -> b c', 'mean').shape
(10, 20)
# Subtracting mean over batch for each channel
>>> y = x - reduce(x, 'b c h w -> () c () ()', 'mean')
# Subtracting per-image mean for each channel
>>> y = x - reduce(x, 'b c h w -> b c () ()', 'mean')
```
Parameters:
tensor: tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch).
list of tensors is also accepted, those should be of the same type and shape
pattern: string, reduction pattern
reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
alternatively, a callable f(tensor, reduced_axes) -> tensor can be provided.
This allows using various reductions, examples: np.max, tf.reduce_logsumexp, torch.var, etc.
axes_lengths: any additional specifications for dimensions
Returns:
tensor of the same type as input
"""
try:
if isinstance(tensor, list):
if len(tensor) == 0:
raise TypeError("Rearrange/Reduce/Repeat can't be applied to an empty list")
backend = get_backend(tensor[0])
tensor = backend.stack_on_zeroth_dimension(tensor)
else:
backend = get_backend(tensor)
hashable_axes_lengths = tuple(axes_lengths.items())
shape = backend.shape(tensor)
recipe = _prepare_transformation_recipe(pattern, reduction, axes_names=tuple(axes_lengths), ndim=len(shape))
return _apply_recipe(
backend, recipe, cast(Tensor, tensor), reduction_type=reduction, axes_lengths=hashable_axes_lengths
)
except EinopsError as e:
message = ' Error while processing {}-reduction pattern "{}".'.format(reduction, pattern)
if not isinstance(tensor, list):
message += "\n Input tensor shape: {}. ".format(shape)
else:
message += "\n Input is list. "
message += "Additional info: {}.".format(axes_lengths)
raise EinopsError(message + "\n {}".format(e))
def rearrange(tensor: Union[Tensor, List[Tensor]], pattern: str, **axes_lengths) -> Tensor:
"""
einops.rearrange is a reader-friendly smart element reordering for multidimensional tensors.
This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
stack, concatenate and other operations.
Examples for rearrange operation:
```python
# suppose we have a set of 32 images in "h w c" format (height-width-channel)
>>> images = [np.random.randn(30, 40, 3) for _ in range(32)]
# stack along first (batch) axis, output is a single array
>>> rearrange(images, 'b h w c -> b h w c').shape
(32, 30, 40, 3)
# concatenate images along height (vertical axis), 960 = 32 * 30
>>> rearrange(images, 'b h w c -> (b h) w c').shape
(960, 40, 3)
# concatenated images along horizontal axis, 1280 = 32 * 40
>>> rearrange(images, 'b h w c -> h (b w) c').shape
(30, 1280, 3)
# reordered axes to "b c h w" format for deep learning
>>> rearrange(images, 'b h w c -> b c h w').shape
(32, 3, 30, 40)
# flattened each image into a vector, 3600 = 30 * 40 * 3
>>> rearrange(images, 'b h w c -> b (c h w)').shape
(32, 3600)
# split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
>>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
(128, 15, 20, 3)
# space-to-depth operation
>>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
(32, 15, 20, 12)
```
When composing axes, C-order enumeration used (consecutive elements have different last axis)
Find more examples in einops tutorial.
Parameters:
tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch).
list of tensors is also accepted, those should be of the same type and shape
pattern: string, rearrangement pattern
axes_lengths: any additional specifications for dimensions
Returns:
tensor of the same type as input. If possible, a view to the original tensor is returned.
"""
return reduce(tensor, pattern, reduction="rearrange", **axes_lengths)
def repeat(tensor: Union[Tensor, List[Tensor]], pattern: str, **axes_lengths) -> Tensor:
"""
einops.repeat allows reordering elements and repeating them in arbitrary combinations.
This operation includes functionality of repeat, tile, broadcast functions.
Examples for repeat operation:
```python
# a grayscale image (of shape height x width)
>>> image = np.random.randn(30, 40)
# change it to RGB format by repeating in each channel
>>> repeat(image, 'h w -> h w c', c=3).shape
(30, 40, 3)
# repeat image 2 times along height (vertical axis)
>>> repeat(image, 'h w -> (repeat h) w', repeat=2).shape
(60, 40)
# repeat image 2 time along height and 3 times along width
>>> repeat(image, 'h w -> (h2 h) (w3 w)', h2=2, w3=3).shape
(60, 120)
# convert each pixel to a small square 2x2. Upsample image by 2x
>>> repeat(image, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape
(60, 80)
# pixelate image first by downsampling by 2x, then upsampling
>>> downsampled = reduce(image, '(h h2) (w w2) -> h w', 'mean', h2=2, w2=2)
>>> repeat(downsampled, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape
(30, 40)
```
When composing axes, C-order enumeration used (consecutive elements have different last axis)
Find more examples in einops tutorial.
Parameters:
tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch).
list of tensors is also accepted, those should be of the same type and shape
pattern: string, rearrangement pattern
axes_lengths: any additional specifications for dimensions
Returns:
Tensor of the same type as input. If possible, a view to the original tensor is returned.
"""
return reduce(tensor, pattern, reduction="repeat", **axes_lengths)
def parse_shape(x, pattern: str) -> dict:
"""
Parse a tensor shape to dictionary mapping axes names to their lengths.
```python
# Use underscore to skip the dimension in parsing.
>>> x = np.zeros([2, 3, 5, 7])
>>> parse_shape(x, 'batch _ h w')
{'batch': 2, 'h': 5, 'w': 7}
# `parse_shape` output can be used to specify axes_lengths for other operations:
>>> y = np.zeros([700])
>>> rearrange(y, '(b c h w) -> b c h w', **parse_shape(x, 'b _ h w')).shape
(2, 10, 5, 7)
```
For symbolic frameworks may return symbols, not integers.
Parameters:
x: tensor of any supported framework
pattern: str, space separated names for axes, underscore means skip axis
Returns:
dict, maps axes names to their lengths
"""
exp = ParsedExpression(pattern, allow_underscore=True)
shape = get_backend(x).shape(x)
if exp.has_composed_axes():
raise RuntimeError(f"Can't parse shape with composite axes: {pattern} {shape}")
if len(shape) != len(exp.composition):
if exp.has_ellipsis:
if len(shape) < len(exp.composition) - 1:
raise RuntimeError(f"Can't parse shape with this number of dimensions: {pattern} {shape}")
else:
raise RuntimeError(f"Can't parse shape with different number of dimensions: {pattern} {shape}")
if exp.has_ellipsis:
ellipsis_idx = exp.composition.index(_ellipsis)
composition = (
exp.composition[:ellipsis_idx]
+ ["_"] * (len(shape) - len(exp.composition) + 1)
+ exp.composition[ellipsis_idx + 1 :]
)
else:
composition = exp.composition
result = {}
for (axis_name,), axis_length in zip(composition, shape): # type: ignore
if axis_name != "_":
result[axis_name] = axis_length
return result
# _enumerate_directions is not exposed in the public API
def _enumerate_directions(x):
"""
For an n-dimensional tensor, returns tensors to enumerate each axis.
```python
x = np.zeros([2, 3, 4]) # or any other tensor
i, j, k = _enumerate_directions(x)
result = i + 2*j + 3*k
```
`result[i, j, k] = i + 2j + 3k`, and also has the same shape as result
Works very similarly to numpy.ogrid (open indexing grid)
"""
backend = get_backend(x)
shape = backend.shape(x)
result = []
for axis_id, axis_length in enumerate(shape):
shape = [1] * len(shape)
shape[axis_id] = axis_length
result.append(backend.reshape(backend.arange(0, axis_length), shape))
return result
# to avoid importing numpy
np_ndarray = Any
def asnumpy(tensor) -> np_ndarray:
"""
Convert a tensor of an imperative framework (i.e. numpy/cupy/torch/jax/etc.) to `numpy.ndarray`
Parameters:
tensor: tensor of any known imperative framework
Returns:
`numpy.ndarray`, converted to numpy
"""
return get_backend(tensor).to_numpy(tensor)
def _validate_einsum_axis_name(axis_name):
if len(axis_name) == 0:
raise NotImplementedError("Singleton () axes are not yet supported in einsum.")
if len(axis_name) > 1:
raise NotImplementedError("Shape rearrangement is not yet supported in einsum.")
axis_name = axis_name[0]
if isinstance(axis_name, AnonymousAxis):
raise NotImplementedError("Anonymous axes are not yet supported in einsum.")
if len(axis_name) == 0:
raise RuntimeError("Encountered empty axis name in einsum.")
if not isinstance(axis_name, str):
raise RuntimeError("Axis name in einsum must be a string.")
@functools.lru_cache(256)
def _compactify_pattern_for_einsum(pattern: str) -> str:
if "->" not in pattern:
# numpy allows this, so make sure users
# don't accidentally do something like this.
raise ValueError("Einsum pattern must contain '->'.")
lefts_str, right_str = pattern.split("->")
lefts = [ParsedExpression(left, allow_underscore=True, allow_duplicates=True) for left in lefts_str.split(",")]
right = ParsedExpression(right_str, allow_underscore=True)
# Start from 'a' and go up to 'Z'
output_axis_names = string.ascii_letters
i = 0
axis_name_mapping = {}
left_patterns = []
for left in lefts:
left_pattern = ""
for raw_axis_name in left.composition:
if raw_axis_name == _ellipsis:
left_pattern += "..."
continue
_validate_einsum_axis_name(raw_axis_name)
axis_name = raw_axis_name[0]
if axis_name not in axis_name_mapping:
if i >= len(output_axis_names):
raise RuntimeError("Too many axes in einsum.")
axis_name_mapping[axis_name] = output_axis_names[i]
i += 1
left_pattern += axis_name_mapping[axis_name]
left_patterns.append(left_pattern)
compact_pattern = ",".join(left_patterns) + "->"
for raw_axis_name in right.composition:
if raw_axis_name == _ellipsis:
compact_pattern += "..."
continue
_validate_einsum_axis_name(raw_axis_name)
axis_name = raw_axis_name[0]
if axis_name not in axis_name_mapping:
raise EinopsError(f"Unknown axis {axis_name} on right side of einsum {pattern}.")
compact_pattern += axis_name_mapping[axis_name]
return compact_pattern
@typing.overload
def einsum(tensor: Tensor, pattern: str, /) -> Tensor:
...
@typing.overload
def einsum(tensor1: Tensor, tensor2: Tensor, pattern: str, /) -> Tensor:
...
@typing.overload
def einsum(tensor1: Tensor, tensor2: Tensor, tensor3: Tensor, pattern: str, /) -> Tensor:
...
@typing.overload
def einsum(tensor1: Tensor, tensor2: Tensor, tensor3: Tensor, tensor4: Tensor, pattern: str, /) -> Tensor:
...
def einsum(*tensors_and_pattern: Union[Tensor, str]) -> Tensor:
"""
einops.einsum calls einsum operations with einops-style named
axes indexing, computing tensor products with an arbitrary
number of tensors. Unlike typical einsum syntax, here you must
pass tensors first, and then the pattern.
Also, note that rearrange operations such as `"(batch chan) out"`,
or singleton axes `()`, are not currently supported.
Examples:
For a given pattern such as:
```python
>>> x, y, z = np.random.randn(3, 20, 20, 20)
>>> output = einsum(x, y, z, "a b c, c b d, a g k -> a b k")
```
the following formula is computed:
```tex
output[a, b, k] =
\sum_{c, d, g} x[a, b, c] * y[c, b, d] * z[a, g, k]
```
where the summation over `c`, `d`, and `g` is performed
because those axes names do not appear on the right-hand side.
Let's see some additional examples:
```python
# Filter a set of images:
>>> batched_images = np.random.randn(128, 16, 16)
>>> filters = np.random.randn(16, 16, 30)
>>> result = einsum(batched_images, filters,
... "batch h w, h w channel -> batch channel")
>>> result.shape
(128, 30)
# Matrix multiplication, with an unknown input shape:
>>> batch_shape = (50, 30)
>>> data = np.random.randn(*batch_shape, 20)
>>> weights = np.random.randn(10, 20)
>>> result = einsum(weights, data,
... "out_dim in_dim, ... in_dim -> ... out_dim")
>>> result.shape
(50, 30, 10)
# Matrix trace on a single tensor:
>>> matrix = np.random.randn(10, 10)
>>> result = einsum(matrix, "i i ->")
>>> result.shape
()
```
Parameters:
tensors_and_pattern:
tensors: tensors of any supported library (numpy, tensorflow, pytorch, jax).
pattern: string, einsum pattern, with commas
separating specifications for each tensor.
pattern should be provided after all tensors.
Returns:
Tensor of the same type as input, after processing with einsum.
"""
if len(tensors_and_pattern) <= 1:
raise ValueError(
"`einops.einsum` takes at minimum two arguments: the tensors (at least one), followed by the pattern."
)
pattern = tensors_and_pattern[-1]
if not isinstance(pattern, str):
raise ValueError(
"The last argument passed to `einops.einsum` must be a string, representing the einsum pattern."
)
tensors = tensors_and_pattern[:-1]
pattern = _compactify_pattern_for_einsum(pattern)
return get_backend(tensors[0]).einsum(pattern, *tensors)
| einops-master | einops/einops.py |
from typing import List, Tuple, Sequence
from .einops import Tensor, Reduction, EinopsError, _prepare_transformation_recipe, _apply_recipe_array_api
from .packing import analyze_pattern, prod
def reduce(tensor: Tensor, pattern: str, reduction: Reduction, **axes_lengths: int) -> Tensor:
if isinstance(tensor, list):
if len(tensor) == 0:
raise TypeError("Einops can't be applied to an empty list")
xp = tensor[0].__array_namespace__()
tensor = xp.stack(tensor)
else:
xp = tensor.__array_namespace__()
try:
hashable_axes_lengths = tuple(axes_lengths.items())
recipe = _prepare_transformation_recipe(pattern, reduction, axes_names=tuple(axes_lengths), ndim=tensor.ndim)
return _apply_recipe_array_api(
xp,
recipe=recipe, tensor=tensor, reduction_type=reduction, axes_lengths=hashable_axes_lengths,
)
except EinopsError as e:
message = ' Error while processing {}-reduction pattern "{}".'.format(reduction, pattern)
if not isinstance(tensor, list):
message += "\n Input tensor shape: {}. ".format(tensor.shape)
else:
message += "\n Input is list. "
message += "Additional info: {}.".format(axes_lengths)
raise EinopsError(message + "\n {}".format(e))
def repeat(tensor: Tensor, pattern: str, **axes_lengths) -> Tensor:
return reduce(tensor, pattern, reduction="repeat", **axes_lengths)
def rearrange(tensor: Tensor, pattern: str, **axes_lengths) -> Tensor:
return reduce(tensor, pattern, reduction="rearrange", **axes_lengths)
def asnumpy(tensor: Tensor):
import numpy as np
return np.from_dlpack(tensor)
Shape = Tuple
def pack(tensors: Sequence[Tensor], pattern: str) -> Tuple[Tensor, List[Shape]]:
n_axes_before, n_axes_after, min_axes = analyze_pattern(pattern, 'pack')
xp = tensors[0].__array_namespace__()
reshaped_tensors: List[Tensor] = []
packed_shapes: List[Shape] = []
for i, tensor in enumerate(tensors):
shape = tensor.shape
if len(shape) < min_axes:
raise EinopsError(f'packed tensor #{i} (enumeration starts with 0) has shape {shape}, '
f'while pattern {pattern} assumes at least {min_axes} axes')
axis_after_packed_axes = len(shape) - n_axes_after
packed_shapes.append(shape[n_axes_before:axis_after_packed_axes])
reshaped_tensors.append(xp.reshape(tensor, (*shape[:n_axes_before], -1, *shape[axis_after_packed_axes:])))
return xp.concat(reshaped_tensors, axis=n_axes_before), packed_shapes
def unpack(tensor: Tensor, packed_shapes: List[Shape], pattern: str) -> List[Tensor]:
xp = tensor.__array_namespace__()
n_axes_before, n_axes_after, min_axes = analyze_pattern(pattern, opname='unpack')
# backend = get_backend(tensor)
input_shape = tensor.shape
if len(input_shape) != n_axes_before + 1 + n_axes_after:
raise EinopsError(f'unpack(..., {pattern}) received input of wrong dim with shape {input_shape}')
unpacked_axis: int = n_axes_before
lengths_of_composed_axes: List[int] = [
-1 if -1 in p_shape else prod(p_shape)
for p_shape in packed_shapes
]
n_unknown_composed_axes = sum(x == -1 for x in lengths_of_composed_axes)
if n_unknown_composed_axes > 1:
raise EinopsError(
f"unpack(..., {pattern}) received more than one -1 in {packed_shapes} and can't infer dimensions"
)
# following manipulations allow to skip some shape verifications
# and leave it to backends
# [[], [2, 3], [4], [-1, 5], [6]] < examples of packed_axis
# split positions when computed should be
# [0, 1, 7, 11, N-6 , N ], where N = length of axis
split_positions = [0] * len(packed_shapes) + [input_shape[unpacked_axis]]
if n_unknown_composed_axes == 0:
for i, x in enumerate(lengths_of_composed_axes[:-1]):
split_positions[i + 1] = split_positions[i] + x
else:
unknown_composed_axis: int = lengths_of_composed_axes.index(-1)
for i in range(unknown_composed_axis):
split_positions[i + 1] = split_positions[i] + lengths_of_composed_axes[i]
for j in range(unknown_composed_axis + 1, len(lengths_of_composed_axes))[::-1]:
split_positions[j] = split_positions[j + 1] - lengths_of_composed_axes[j]
shape_start = input_shape[:unpacked_axis]
shape_end = input_shape[unpacked_axis + 1:]
slice_filler = (slice(None, None),) * unpacked_axis
try:
return [
xp.reshape(
# shortest way slice arbitrary axis
tensor[(*slice_filler, slice(split_positions[i], split_positions[i + 1]), ...)],
(*shape_start, *element_shape, *shape_end)
)
for i, element_shape in enumerate(packed_shapes)
]
except BaseException:
# this hits if there is an error during reshapes, which means passed shapes were incorrect
raise RuntimeError(f'Error during unpack(..., "{pattern}"): could not split axis of size {split_positions[-1]}'
f' into requested {packed_shapes}')
| einops-master | einops/array_api.py |
from einops import EinopsError
import keyword
import warnings
from typing import List, Optional, Set, Tuple, Union
_ellipsis: str = '…' # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
class AnonymousAxis(object):
"""Important thing: all instances of this class are not equal to each other """
def __init__(self, value: str):
self.value = int(value)
if self.value <= 1:
if self.value == 1:
raise EinopsError('No need to create anonymous axis of length 1. Report this as an issue')
else:
raise EinopsError('Anonymous axis should have positive length, not {}'.format(self.value))
def __repr__(self):
return "{}-axis".format(str(self.value))
class ParsedExpression:
"""
non-mutable structure that contains information about one side of expression (e.g. 'b c (h w)')
and keeps some information important for downstream
"""
def __init__(self, expression: str, *, allow_underscore: bool = False,
allow_duplicates: bool = False):
self.has_ellipsis: bool = False
self.has_ellipsis_parenthesized: Optional[bool] = None
self.identifiers: Set[str] = set()
# that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition
self.has_non_unitary_anonymous_axes: bool = False
# composition keeps structure of composite axes, see how different corner cases are handled in tests
self.composition: List[Union[List[str], str]] = []
if '.' in expression:
if '...' not in expression:
raise EinopsError('Expression may contain dots only inside ellipsis (...)')
if str.count(expression, '...') != 1 or str.count(expression, '.') != 3:
raise EinopsError(
'Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ')
expression = expression.replace('...', _ellipsis)
self.has_ellipsis = True
bracket_group: Optional[List[str]] = None
def add_axis_name(x):
if x in self.identifiers:
if not (allow_underscore and x == "_") and not allow_duplicates:
raise EinopsError('Indexing expression contains duplicate dimension "{}"'.format(x))
if x == _ellipsis:
self.identifiers.add(_ellipsis)
if bracket_group is None:
self.composition.append(_ellipsis)
self.has_ellipsis_parenthesized = False
else:
bracket_group.append(_ellipsis)
self.has_ellipsis_parenthesized = True
else:
is_number = str.isdecimal(x)
if is_number and int(x) == 1:
# handling the case of anonymous axis of length 1
if bracket_group is None:
self.composition.append([])
else:
pass # no need to think about 1s inside parenthesis
return
is_axis_name, reason = self.check_axis_name_return_reason(x, allow_underscore=allow_underscore)
if not (is_number or is_axis_name):
raise EinopsError('Invalid axis identifier: {}\n{}'.format(x, reason))
if is_number:
x = AnonymousAxis(x)
self.identifiers.add(x)
if is_number:
self.has_non_unitary_anonymous_axes = True
if bracket_group is None:
self.composition.append([x])
else:
bracket_group.append(x)
current_identifier = None
for char in expression:
if char in '() ':
if current_identifier is not None:
add_axis_name(current_identifier)
current_identifier = None
if char == '(':
if bracket_group is not None:
raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)")
bracket_group = []
elif char == ')':
if bracket_group is None:
raise EinopsError('Brackets are not balanced')
self.composition.append(bracket_group)
bracket_group = None
elif str.isalnum(char) or char in ['_', _ellipsis]:
if current_identifier is None:
current_identifier = char
else:
current_identifier += char
else:
raise EinopsError("Unknown character '{}'".format(char))
if bracket_group is not None:
raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression))
if current_identifier is not None:
add_axis_name(current_identifier)
def flat_axes_order(self) -> List:
result = []
for composed_axis in self.composition:
assert isinstance(composed_axis, list), 'does not work with ellipsis'
for axis in composed_axis:
result.append(axis)
return result
def has_composed_axes(self) -> bool:
# this will ignore 1 inside brackets
for axes in self.composition:
if isinstance(axes, list) and len(axes) > 1:
return True
return False
@staticmethod
def check_axis_name_return_reason(name: str, allow_underscore: bool = False) -> Tuple[bool, str]:
if not str.isidentifier(name):
return False, 'not a valid python identifier'
elif name[0] == '_' or name[-1] == '_':
if name == '_' and allow_underscore:
return True, ''
return False, 'axis name should should not start or end with underscore'
else:
if keyword.iskeyword(name):
warnings.warn("It is discouraged to use axes names that are keywords: {}".format(name), RuntimeWarning)
if name in ['axis']:
warnings.warn("It is discouraged to use 'axis' as an axis name "
"and will raise an error in future", FutureWarning)
return True, ''
@staticmethod
def check_axis_name(name: str) -> bool:
"""
Valid axes names are python identifiers except keywords,
and additionally should not start or end with underscore
"""
is_valid, _reason = ParsedExpression.check_axis_name_return_reason(name)
return is_valid
| einops-master | einops/parsing.py |
__author__ = 'Alex Rogozhnikov'
__version__ = '0.7.0rc2'
class EinopsError(RuntimeError):
""" Runtime error thrown by einops """
pass
__all__ = ['rearrange', 'reduce', 'repeat', 'einsum',
'pack', 'unpack',
'parse_shape', 'asnumpy', 'EinopsError']
from .einops import rearrange, reduce, repeat, einsum, parse_shape, asnumpy
from .packing import pack, unpack | einops-master | einops/__init__.py |
from functools import lru_cache
from typing import List, Union, TypeVar, Tuple, Sequence
from einops import EinopsError
from einops._backends import get_backend
from einops.parsing import ParsedExpression
Tensor = TypeVar('Tensor')
Shape = Union[Tuple[int, ...], List[int]]
@lru_cache(maxsize=128)
def analyze_pattern(pattern: str, opname: str) -> Tuple[int, int, int]:
# Maybe some validation of identifiers?
axes = pattern.split()
axes_set = set(axes)
if len(axes) != len(axes_set):
raise EinopsError(f'Duplicates in axes names in {opname}(..., "{pattern}")')
if '*' not in axes_set:
raise EinopsError(f'No *-axis in {opname}(..., "{pattern}")')
for axis in axes:
if axis != '*':
is_valid, reason = ParsedExpression.check_axis_name_return_reason(axis)
if not is_valid:
raise EinopsError(f'Invalid axis name {axis} in {opname}(..., "{pattern}")')
n_axes_before = axes.index('*')
n_axes_after = len(axes) - n_axes_before - 1
min_axes = n_axes_before + n_axes_after
return n_axes_before, n_axes_after, min_axes
def pack(tensors: Sequence[Tensor], pattern: str) -> Tuple[Tensor, List[Shape]]:
"""
Packs several tensors into one.
See einops tutorial for introduction into packing (and how it replaces stack and concatenation).
Parameters:
tensors: tensors to be packed, can be of different dimensionality
pattern: pattern that is shared for all inputs and output, e.g. "i j * k" or "batch seq *"
Returns:
(packed_tensor, packed_shapes aka PS)
Example:
```python
>>> from numpy import zeros as Z
>>> inputs = [Z([2, 3, 5]), Z([2, 3, 7, 5]), Z([2, 3, 7, 9, 5])]
>>> packed, ps = pack(inputs, 'i j * k')
>>> packed.shape, ps
((2, 3, 71, 5), [(), (7,), (7, 9)])
```
In this example, axes were matched to: i=2, j=3, k=5 based on order (first, second, and last).
All other axes were 'packed' and concatenated.
PS (packed shapes) contains information about axes that were matched to '*' in every input.
Resulting tensor has as many elements as all inputs in total.
Packing can be reversed with unpack, which additionally needs PS (packed shapes) to reconstruct order.
```python
>>> inputs_unpacked = unpack(packed, ps, 'i j * k')
>>> [x.shape for x in inputs_unpacked]
[(2, 3, 5), (2, 3, 7, 5), (2, 3, 7, 9, 5)]
```
Read the tutorial for introduction and application scenarios.
"""
n_axes_before, n_axes_after, min_axes = analyze_pattern(pattern, 'pack')
# packing zero tensors is illegal
backend = get_backend(tensors[0])
reshaped_tensors: List[Tensor] = []
packed_shapes: List[Shape] = []
for i, tensor in enumerate(tensors):
shape = backend.shape(tensor)
if len(shape) < min_axes:
raise EinopsError(f'packed tensor #{i} (enumeration starts with 0) has shape {shape}, '
f'while pattern {pattern} assumes at least {min_axes} axes')
axis_after_packed_axes = len(shape) - n_axes_after
packed_shapes.append(shape[n_axes_before:axis_after_packed_axes])
reshaped_tensors.append(
backend.reshape(tensor, (*shape[:n_axes_before], -1, *shape[axis_after_packed_axes:]))
)
return backend.concat(reshaped_tensors, axis=n_axes_before), packed_shapes
def prod(x: Shape) -> int:
result = 1
for i in x:
result *= i
return result
def unpack(tensor: Tensor, packed_shapes: List[Shape], pattern: str) -> List[Tensor]:
"""
Unpacks a single tensor into several by splitting over a selected axes.
See einops tutorial for introduction into packing (and how it replaces stack and concatenation).
Parameters:
tensor: tensor to be unpacked
packed_shapes: packed_shapes (aka PS) is a list of shapes that take place of '*' in each output.
output will contain a single tensor for every provided shape
pattern: pattern that is shared for input and all outputs, e.g. "i j * k" or "batch seq *",
where * designates an axis to be unpacked
Returns:
list of tensors
If framework supports views, results are views to the original tensor.
Example:
```python
>>> from numpy import zeros as Z
>>> inputs = [Z([2, 3, 5]), Z([2, 3, 7, 5]), Z([2, 3, 7, 9, 5])]
>>> packed, ps = pack(inputs, 'i j * k')
>>> packed.shape, ps
((2, 3, 71, 5), [(), (7,), (7, 9)])
```
In this example, axes were matched to: i=2, j=3, k=5 based on order (first, second, and last).
All other axes were 'packed' and concatenated.
PS (packed shapes) contains information about axes that were matched to '*' in every input.
Resulting tensor has as many elements as all inputs in total.
Packing can be reversed with unpack, which additionally needs PS (packed shapes) to reconstruct order.
```python
>>> inputs_unpacked = unpack(packed, ps, 'i j * k')
>>> [x.shape for x in inputs_unpacked]
[(2, 3, 5), (2, 3, 7, 5), (2, 3, 7, 9, 5)]
```
Read the tutorial for introduction and application scenarios.
"""
n_axes_before, n_axes_after, min_axes = analyze_pattern(pattern, opname='unpack')
backend = get_backend(tensor)
input_shape = backend.shape(tensor)
if len(input_shape) != n_axes_before + 1 + n_axes_after:
raise EinopsError(f'unpack(..., {pattern}) received input of wrong dim with shape {input_shape}')
unpacked_axis: int = n_axes_before
lengths_of_composed_axes: List[int] = [
-1 if -1 in p_shape else prod(p_shape)
for p_shape in packed_shapes
]
n_unknown_composed_axes = sum(x == -1 for x in lengths_of_composed_axes)
if n_unknown_composed_axes > 1:
raise EinopsError(
f"unpack(..., {pattern}) received more than one -1 in {packed_shapes} and can't infer dimensions"
)
# following manipulations allow to skip some shape verifications
# and leave it to backends
# [[], [2, 3], [4], [-1, 5], [6]] < examples of packed_axis
# split positions when computed should be
# [0, 1, 7, 11, N-6 , N ], where N = length of axis
split_positions = [0] * len(packed_shapes) + [input_shape[unpacked_axis]]
if n_unknown_composed_axes == 0:
for i, x in enumerate(lengths_of_composed_axes[:-1]):
split_positions[i + 1] = split_positions[i] + x
else:
unknown_composed_axis: int = lengths_of_composed_axes.index(-1)
for i in range(unknown_composed_axis):
split_positions[i + 1] = split_positions[i] + lengths_of_composed_axes[i]
for j in range(unknown_composed_axis + 1, len(lengths_of_composed_axes))[::-1]:
split_positions[j] = split_positions[j + 1] - lengths_of_composed_axes[j]
shape_start = input_shape[:unpacked_axis]
shape_end = input_shape[unpacked_axis + 1:]
slice_filler = (slice(None, None),) * unpacked_axis
try:
return [
backend.reshape(
# shortest way slice arbitrary axis
tensor[(*slice_filler, slice(split_positions[i], split_positions[i + 1]))],
(*shape_start, *element_shape, *shape_end)
)
for i, element_shape in enumerate(packed_shapes)
]
except BaseException:
# this hits if there is an error during reshapes, which means passed shapes were incorrect
raise RuntimeError(f'Error during unpack(..., "{pattern}"): could not split axis of size {split_positions[-1]}'
f' into requested {packed_shapes}')
| einops-master | einops/packing.py |
"""
Specialization of einops for torch.
Unfortunately, torch's jit scripting mechanism isn't strong enough,
and to have scripting supported at least for layers,
a number of additional moves is needed.
Design of main operations (dynamic resolution by lookup) is unlikely
to be implemented by torch.jit.script,
but torch.compile seems to work with operations just fine.
"""
import warnings
from typing import Dict, List, Tuple
import torch
from einops.einops import TransformRecipe, _reconstruct_from_shape_uncached
class TorchJitBackend:
"""
Completely static backend that mimics part of normal backend functionality
but restricted to be within torchscript.
"""
@staticmethod
def reduce(x: torch.Tensor, operation: str, reduced_axes: List[int]):
if operation == "min":
return x.amin(dim=reduced_axes)
elif operation == "max":
return x.amax(dim=reduced_axes)
elif operation == "sum":
return x.sum(dim=reduced_axes)
elif operation == "mean":
return x.mean(dim=reduced_axes)
elif operation == "prod":
for i in list(sorted(reduced_axes))[::-1]:
x = x.prod(dim=i)
return x
else:
raise NotImplementedError("Unknown reduction ", operation)
@staticmethod
def transpose(x, axes: List[int]):
return x.permute(axes)
@staticmethod
def stack_on_zeroth_dimension(tensors: List[torch.Tensor]):
return torch.stack(tensors)
@staticmethod
def tile(x, repeats: List[int]):
return x.repeat(repeats)
@staticmethod
def add_axes(x, n_axes: int, pos2len: Dict[int, int]):
repeats = [-1] * n_axes
for axis_position, axis_length in pos2len.items():
x = torch.unsqueeze(x, axis_position)
repeats[axis_position] = axis_length
return x.expand(repeats)
@staticmethod
def is_float_type(x):
return x.dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16]
@staticmethod
def shape(x):
return x.shape
@staticmethod
def reshape(x, shape: List[int]):
return x.reshape(shape)
# mirrors einops.einops._apply_recipe
def apply_for_scriptable_torch(
recipe: TransformRecipe, tensor: torch.Tensor, reduction_type: str, axes_dims: List[Tuple[str, int]]
) -> torch.Tensor:
backend = TorchJitBackend
(
init_shapes,
axes_reordering,
reduced_axes,
added_axes,
final_shapes,
n_axes_w_added,
) = _reconstruct_from_shape_uncached(recipe, backend.shape(tensor), axes_dims=axes_dims)
if init_shapes is not None:
tensor = backend.reshape(tensor, init_shapes)
if axes_reordering is not None:
tensor = backend.transpose(tensor, axes_reordering)
if len(reduced_axes) > 0:
tensor = backend.reduce(tensor, operation=reduction_type, reduced_axes=reduced_axes)
if len(added_axes) > 0:
tensor = backend.add_axes(tensor, n_axes=n_axes_w_added, pos2len=added_axes)
if final_shapes is not None:
tensor = backend.reshape(tensor, final_shapes)
return tensor
def allow_ops_in_compiled_graph():
if hasattr(torch, "__version__") and torch.__version__[0] < "2":
# torch._dynamo and torch.compile appear in pytorch 2.0
return
try:
from torch._dynamo import allow_in_graph
except ImportError:
warnings.warn("allow_ops_in_compiled_graph failed to import torch: ensure pytorch >=2.0", ImportWarning)
return
from .einops import rearrange, reduce, repeat, einsum
from .packing import pack, unpack
allow_in_graph(rearrange)
allow_in_graph(reduce)
allow_in_graph(repeat)
allow_in_graph(einsum)
allow_in_graph(pack)
allow_in_graph(unpack)
# CF: https://github.com/pytorch/pytorch/blob/2df939aacac68e9621fbd5d876c78d86e72b41e2/torch/_dynamo/__init__.py#L222
global _ops_were_registered_in_torchdynamo
_ops_were_registered_in_torchdynamo = True
# module import automatically registers ops in torchdynamo
allow_ops_in_compiled_graph()
| einops-master | einops/_torch_specific.py |
"""
Backends in `einops` are organized to meet the following requirements
- backends are not imported unless those are actually needed, because
- backends may not be installed
- importing all available backends will drive to significant memory footprint
- backends may by present but installed with errors (but never used),
importing may drive to crashes
- backend should be either symbolic or imperative (tensorflow is for both, but that causes problems)
- this determines which methods (from_numpy/to_numpy or create_symbol/eval_symbol) should be defined
- if backend can't (temporarily) provide symbols for shape dimensions, UnknownSize objects are used
"""
import sys
__author__ = "Alex Rogozhnikov"
_loaded_backends: dict = {}
_type2backend: dict = {}
_debug_importing = False
def get_backend(tensor) -> "AbstractBackend":
"""
Takes a correct backend (e.g. numpy backend if tensor is numpy.ndarray) for a tensor.
If needed, imports package and creates backend
"""
_type = type(tensor)
_result = _type2backend.get(_type, None)
if _result is not None:
return _result
for framework_name, backend in _loaded_backends.items():
if backend.is_appropriate_type(tensor):
_type2backend[_type] = backend
return backend
# Find backend subclasses recursively
backend_subclasses = []
backends = AbstractBackend.__subclasses__()
while backends:
backend = backends.pop()
backends += backend.__subclasses__()
backend_subclasses.append(backend)
for BackendSubclass in backend_subclasses:
if _debug_importing:
print("Testing for subclass of ", BackendSubclass)
if BackendSubclass.framework_name not in _loaded_backends:
# check that module was already imported. Otherwise it can't be imported
if BackendSubclass.framework_name in sys.modules:
if _debug_importing:
print("Imported backend for ", BackendSubclass.framework_name)
backend = BackendSubclass()
_loaded_backends[backend.framework_name] = backend
if backend.is_appropriate_type(tensor):
_type2backend[_type] = backend
return backend
raise RuntimeError("Tensor type unknown to einops {}".format(type(tensor)))
class AbstractBackend:
"""Base backend class, major part of methods are only for debugging purposes."""
framework_name: str
def is_appropriate_type(self, tensor):
"""helper method should recognize tensors it can handle"""
raise NotImplementedError()
def from_numpy(self, x):
raise NotImplementedError("framework doesn't support imperative execution")
def to_numpy(self, x):
raise NotImplementedError("framework doesn't support imperative execution")
def create_symbol(self, shape):
raise NotImplementedError("framework doesn't support symbolic computations")
def eval_symbol(self, symbol, input_dict):
raise NotImplementedError("framework doesn't support symbolic computations")
def arange(self, start, stop):
# supplementary method used only in testing, so should implement CPU version
raise NotImplementedError("framework doesn't implement arange")
def shape(self, x):
"""shape should return a tuple with integers or "shape symbols" (which will evaluate to actual size)"""
return x.shape
def reshape(self, x, shape):
return x.reshape(shape)
def transpose(self, x, axes):
return x.transpose(axes)
def reduce(self, x, operation, axes):
return getattr(x, operation)(axis=axes)
def stack_on_zeroth_dimension(self, tensors: list):
raise NotImplementedError()
def add_axis(self, x, new_position):
raise NotImplementedError()
def add_axes(self, x, n_axes, pos2len):
repeats = [1] * n_axes
for axis_position, axis_length in pos2len.items():
x = self.add_axis(x, axis_position)
repeats[axis_position] = axis_length
return self.tile(x, tuple(repeats))
def tile(self, x, repeats):
"""repeats - same lengths as x.shape"""
raise NotImplementedError()
def concat(self, tensors, axis: int):
"""concatenates tensors along axis.
Assume identical across tensors: devices, dtypes and shapes except selected axis."""
raise NotImplementedError()
def is_float_type(self, x):
# some backends (torch) can't compute average for non-floating types.
# Decided to drop average for all backends if type is not floating
raise NotImplementedError()
def layers(self):
raise NotImplementedError("backend does not provide layers")
def __repr__(self):
return "<einops backend for {}>".format(self.framework_name)
def einsum(self, pattern, *x):
raise NotImplementedError("backend does not support einsum")
class UnknownSize:
"""pseudo-symbol for symbolic frameworks which do not provide symbols for shape elements"""
def __floordiv__(self, other):
return self
def __eq__(self, other):
return True # we don't know actual size
def __mul__(self, other):
return self
def __rmul__(self, other):
return self
def __hash__(self):
return hash(None)
class NumpyBackend(AbstractBackend):
framework_name = "numpy"
def __init__(self):
import numpy
self.np = numpy
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.np.ndarray)
def from_numpy(self, x):
return x
def to_numpy(self, x):
return x
def arange(self, start, stop):
return self.np.arange(start, stop)
def stack_on_zeroth_dimension(self, tensors: list):
return self.np.stack(tensors)
def tile(self, x, repeats):
return self.np.tile(x, repeats)
def concat(self, tensors, axis: int):
return self.np.concatenate(tensors, axis=axis)
def is_float_type(self, x):
return x.dtype in ("float16", "float32", "float64", "float128", "bfloat16")
def add_axis(self, x, new_position):
return self.np.expand_dims(x, new_position)
def einsum(self, pattern, *x):
return self.np.einsum(pattern, *x)
class JaxBackend(NumpyBackend):
framework_name = "jax"
def __init__(self):
super(JaxBackend, self).__init__()
self.onp = self.np
import jax.numpy
self.np = jax.numpy
def from_numpy(self, x):
return self.np.asarray(x)
def to_numpy(self, x):
return self.onp.asarray(x)
class TorchBackend(AbstractBackend):
framework_name = "torch"
def __init__(self):
import torch
self.torch = torch
# importing would register operations in torch._dynamo for torch.compile
from . import _torch_specific # noqa
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.torch.Tensor)
def from_numpy(self, x):
variable = self.torch.from_numpy(x)
if self.is_float_type(variable):
# attach grad only to floating types
variable.requires_grad = True
return variable
def to_numpy(self, x):
return x.detach().cpu().numpy()
def arange(self, start, stop):
return self.torch.arange(start, stop, dtype=self.torch.int64)
def reduce(self, x, operation, reduced_axes):
if operation == "min":
return x.amin(dim=reduced_axes)
elif operation == "max":
return x.amax(dim=reduced_axes)
elif operation == "sum":
return x.sum(dim=reduced_axes)
elif operation == "mean":
return x.mean(dim=reduced_axes)
elif operation == "prod":
for i in list(sorted(reduced_axes))[::-1]:
x = x.prod(dim=i)
return x
else:
raise NotImplementedError("Unknown reduction ", operation)
def transpose(self, x, axes):
return x.permute(axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.torch.stack(tensors)
def add_axes(self, x, n_axes, pos2len):
repeats = [-1] * n_axes
for axis_position, axis_length in pos2len.items():
x = self.add_axis(x, axis_position)
repeats[axis_position] = axis_length
return x.expand(repeats)
def tile(self, x, repeats):
return x.repeat(repeats)
def concat(self, tensors, axis: int):
return self.torch.cat(tensors, dim=axis)
def add_axis(self, x, new_position):
return self.torch.unsqueeze(x, new_position)
def is_float_type(self, x):
return x.dtype in [self.torch.float16, self.torch.float32, self.torch.float64, self.torch.bfloat16]
def layers(self):
from .layers import torch
return torch
def einsum(self, pattern, *x):
return self.torch.einsum(pattern, *x)
class CupyBackend(AbstractBackend):
framework_name = "cupy"
def __init__(self):
import cupy
self.cupy = cupy
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.cupy.ndarray)
def from_numpy(self, x):
return self.cupy.asarray(x)
def to_numpy(self, x):
return self.cupy.asnumpy(x)
def arange(self, start, stop):
return self.cupy.arange(start, stop)
def stack_on_zeroth_dimension(self, tensors: list):
return self.cupy.stack(tensors)
def tile(self, x, repeats):
return self.cupy.tile(x, repeats)
def concat(self, tensors, axis: int):
return self.cupy.concatenate(tensors, axis=axis)
def add_axis(self, x, new_position):
return self.cupy.expand_dims(x, new_position)
def is_float_type(self, x):
return x.dtype in ("float16", "float32", "float64", "float128", "bfloat16")
def einsum(self, pattern, *x):
return self.cupy.einsum(pattern, *x)
class ChainerBackend(AbstractBackend):
framework_name = "chainer"
def __init__(self):
import chainer
import numpy
self.numpy = numpy
self.chainer = chainer
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.chainer.Variable)
def from_numpy(self, x):
return self.chainer.Variable(x.astype("float32"))
def to_numpy(self, x):
if isinstance(x, self.chainer.Variable):
x = x.data
return x
def arange(self, start, stop):
return self.numpy.arange(start, stop)
def reduce(self, x, operation, axes):
return getattr(self.chainer.functions, operation)(x, axis=axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.chainer.functions.stack(tensors)
def tile(self, x, repeats):
return self.chainer.functions.tile(x, repeats)
def concat(self, tensors, axis: int):
return self.chainer.functions.concat(tensors, axis=axis)
def add_axis(self, x, new_position):
return self.chainer.functions.expand_dims(x, new_position)
def is_float_type(self, x):
return x.dtype in ("float16", "float32", "float64", "float128", "bfloat16")
def layers(self):
from .layers import chainer
return chainer
def einsum(self, pattern, *x):
return self.chainer.functions.einsum(pattern, *x)
class HashableTuple:
"""Overcomes non-hashability of symbolic elements"""
def __init__(self, elements: tuple):
self.elements = elements
def __iter__(self):
for x in self.elements:
yield x
def __len__(self):
return len(self.elements)
def __getitem__(self, item):
return self.elements[item]
# default equality and hash is used (True only with itself, hash taken of id)
class TensorflowBackend(AbstractBackend):
framework_name = "tensorflow"
def __init__(self):
import tensorflow
self.tf = tensorflow
def is_appropriate_type(self, tensor):
return isinstance(tensor, (self.tf.Tensor, self.tf.Variable))
def from_numpy(self, x):
assert self.tf.executing_eagerly()
return self.tf.convert_to_tensor(x)
def to_numpy(self, x):
assert self.tf.executing_eagerly()
return x.numpy()
def arange(self, start, stop):
return self.tf.range(start, stop)
def shape(self, x):
if self.tf.executing_eagerly():
return tuple(UnknownSize() if d is None else int(d) for d in x.shape)
else:
static_shape = x.shape.as_list()
tf_shape = self.tf.shape(x)
# use the static shape where known, otherwise use the TF shape components
shape = tuple([s or tf_shape[dim] for dim, s in enumerate(static_shape)])
try:
hash(shape)
return shape
except:
# unhashable symbols in shape. Wrap tuple to be hashable.
return HashableTuple(shape)
def reduce(self, x, operation, axes):
return getattr(self.tf, "reduce_" + operation)(x, axis=axes)
def reshape(self, x, shape):
return self.tf.reshape(x, shape)
def transpose(self, x, axes):
return self.tf.transpose(x, axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.tf.stack(tensors)
def tile(self, x, repeats):
return self.tf.tile(x, repeats)
def concat(self, tensors, axis: int):
return self.tf.concat(tensors, axis=axis)
def add_axis(self, x, new_position):
return self.tf.expand_dims(x, new_position)
def is_float_type(self, x):
return x.dtype in ("float16", "float32", "float64", "float128", "bfloat16")
def layers(self):
from .layers import tensorflow
return tensorflow
def einsum(self, pattern, *x):
return self.tf.einsum(pattern, *x)
class KerasBackend(AbstractBackend):
framework_name = "tensorflow.keras"
def __init__(self):
import tensorflow as tf
self.tf = tf
self.keras = tf.keras
self.K = tf.keras.backend
def is_appropriate_type(self, tensor):
return self.tf.is_tensor(tensor) and self.K.is_keras_tensor(tensor)
def create_symbol(self, shape):
return self.keras.Input(batch_shape=shape)
def eval_symbol(self, symbol, input_dict):
model = self.keras.models.Model([var for (var, _) in input_dict], symbol)
return model.predict_on_batch([val for (_, val) in input_dict])
def arange(self, start, stop):
return self.K.arange(start, stop)
def shape(self, x):
shape = self.K.shape(x) # tf tensor
return HashableTuple(tuple(shape))
def reduce(self, x, operation, axes):
return getattr(self.K, operation)(x, axis=axes)
def reshape(self, x, shape):
return self.K.reshape(x, shape)
def transpose(self, x, axes):
return self.K.permute_dimensions(x, axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.K.stack(tensors)
def tile(self, x, repeats):
return self.K.tile(x, repeats)
def concat(self, tensors, axis: int):
return self.K.concatenate(tensors, axis=axis)
def add_axis(self, x, new_position):
return self.K.expand_dims(x, new_position)
def is_float_type(self, x):
return "float" in self.K.dtype(x)
def layers(self):
from .layers import keras
return keras
class OneFlowBackend(AbstractBackend):
framework_name = "oneflow"
def __init__(self):
import oneflow as flow
self.flow = flow
def is_appropriate_type(self, tensor):
return isinstance(tensor, self.flow.Tensor)
def from_numpy(self, x):
variable = self.flow.from_numpy(x)
if self.is_float_type(variable):
# attach grad only to floating types
variable.requires_grad = True
return variable
def to_numpy(self, x):
return x.detach().cpu().numpy()
def arange(self, start, stop):
return self.flow.arange(start, stop, dtype=self.flow.int64)
def reduce(self, x, operation, reduced_axes):
for axis in sorted(reduced_axes, reverse=True):
if operation == "min":
x, _ = x.min(dim=axis)
elif operation == "max":
x, _ = x.max(dim=axis)
elif operation in ["sum", "mean", "prod"]:
x = getattr(x, operation)(dim=axis)
else:
raise NotImplementedError("Unknown reduction ", operation)
return x
def transpose(self, x, axes):
return x.permute(axes)
def stack_on_zeroth_dimension(self, tensors: list):
return self.flow.stack(tensors)
def add_axes(self, x, n_axes, pos2len):
repeats = [-1] * n_axes
for axis_position, axis_length in pos2len.items():
x = self.add_axis(x, axis_position)
repeats[axis_position] = axis_length
return x.expand(*repeats)
def tile(self, x, repeats):
return x.repeat(repeats)
def concat(self, tensors, axis: int):
return self.flow.concat(tensors, dim=axis)
def add_axis(self, x, new_position):
return self.flow.unsqueeze(x, new_position)
def is_float_type(self, x):
return x.dtype in [self.flow.float16, self.flow.float32, self.flow.float64]
def layers(self):
from .layers import oneflow
return oneflow
def einsum(self, pattern, *x):
return self.flow.einsum(pattern, *x)
class PaddleBackend(AbstractBackend):
framework_name = "paddle"
def __init__(self):
import paddle
self.paddle = paddle
def is_appropriate_type(self, tensor):
return isinstance(tensor, (self.paddle.Tensor, self.paddle.static.Variable))
def from_numpy(self, x):
tensor = self.paddle.to_tensor(x)
tensor.stop_gradient = False
return tensor
def to_numpy(self, x):
return x.detach().numpy()
def arange(self, start, stop):
return self.paddle.arange(start, stop, dtype=self.paddle.int64)
def reduce(self, x, operation, axes):
# TODO: Support the reduce operation to output a 0D Tensor
if len(axes) == x.ndim:
return super().reduce(x, operation, axes).squeeze(0)
else:
return super().reduce(x, operation, axes)
def transpose(self, x, axes):
return x.transpose(axes)
def add_axes(self, x, n_axes, pos2len):
repeats = [-1] * n_axes
for axis_position, axis_length in pos2len.items():
x = self.add_axis(x, axis_position)
repeats[axis_position] = axis_length
return x.expand(repeats)
def stack_on_zeroth_dimension(self, tensors: list):
return self.paddle.stack(tensors)
def reshape(self, x, shape):
return x.reshape(shape)
def tile(self, x, repeats):
return x.tile(repeats)
def concat(self, tensors, axis: int):
return self.paddle.concat(tensors, axis=axis)
def add_axis(self, x, new_position):
return x.unsqueeze(new_position)
def is_float_type(self, x):
return x.dtype in [self.paddle.float16, self.paddle.float32, self.paddle.float64]
def layers(self):
from .layers import paddle
return paddle
def einsum(self, pattern, *x):
return self.paddle.einsum(pattern, *x)
def shape(self, x):
return tuple(x.shape)
| einops-master | einops/_backends.py |
einops-master | einops/experimental/__init__.py |
|
from typing import List, TypeVar, Tuple, Sequence
from einops import EinopsError
T = TypeVar('T')
Shape = Tuple[int, ...]
def pack(pattern: str, tensors: Sequence[T]) -> Tuple[T, List[Shape]]:
axes = pattern.split()
if len(axes) != len(set(axes)):
raise EinopsError(f'Duplicates in axes names in pack("{pattern}", ...)')
if '*' not in axes:
raise EinopsError(f'No *-axis in pack("{pattern}", ...)')
# need some validation of identifiers
n_axes_before = axes.index('*')
n_axes_after = len(axes) - n_axes_before - 1
min_axes = n_axes_before + n_axes_after
xp = tensors[0].__array_namespace__()
reshaped_tensors: List[T] = []
packed_shapes: List[Shape] = []
for i, tensor in enumerate(tensors):
shape = tensor.shape
if len(shape) < min_axes:
raise EinopsError(f'packed tensor #{i} (enumeration starts with 0) has shape {shape}, '
f'while pattern {pattern} assumes at least {min_axes} axes')
axis_after_packed_axes = len(shape) - n_axes_after
packed_shapes.append(shape[n_axes_before:])
reshaped_tensors.append(
xp.reshape(tensor, (*shape[:n_axes_before], -1, *shape[axis_after_packed_axes:]))
)
return xp.concat(reshaped_tensors, axis=n_axes_before), packed_shapes
def prod(x: Shape) -> int:
result = 1
for i in x:
result *= i
return result
def unpack(pattern: str, tensor: T, packed_shapes: List[Shape]) -> List[T]:
axes = pattern.split()
if len(axes) != len(set(axes)):
raise EinopsError(f'Duplicates in axes names in unpack("{pattern}", ...)')
if '*' not in axes:
raise EinopsError(f'No *-axis in unpack("{pattern}", ...)')
# need some validation of identifiers
input_shape = tensor.shape
if len(input_shape) != len(axes):
raise EinopsError(f'unpack({pattern}, ...) received input of wrong dim with shape {input_shape}')
unpacked_axis = axes.index('*')
lengths_of_composed_axes: List[int] = [
-1 if -1 in p_shape else prod(p_shape)
for p_shape in packed_shapes
]
n_unknown_composed_axes = sum(x == -1 for x in lengths_of_composed_axes)
if n_unknown_composed_axes > 1:
raise EinopsError(
f"unpack({pattern}, ...) received more than one -1 in {packed_shapes} and can't infer dimensions"
)
# following manipulations allow to skip some shape verifications
# and leave them to backends
# [[], [2, 3], [4], [-1, 5], [6]] < examples of packed_axis
# split positions when computed should be
# [0, 1, 7, 11, N-6 , N ], where N = length of axis
split_positions = [0] * len(packed_shapes) + [input_shape[unpacked_axis]]
if n_unknown_composed_axes == 0:
for i, x in enumerate(lengths_of_composed_axes[:-1]):
split_positions[i + 1] = split_positions[i] + x
else:
unknown_composed_axis: int = lengths_of_composed_axes.index(-1)
for i in range(unknown_composed_axis):
split_positions[i + 1] = split_positions[i] + lengths_of_composed_axes[i]
for j in range(unknown_composed_axis + 1, len(lengths_of_composed_axes))[::-1]:
split_positions[j] = split_positions[j + 1] + lengths_of_composed_axes[j]
xp = tensor.__array_namespace__()
shape_start = input_shape[:unpacked_axis]
shape_end = input_shape[unpacked_axis + 1:]
slice_filler = (slice(None, None),) * unpacked_axis
return [
xp.reshape(
# shortest way slice arbitrary axis
tensor[(*slice_filler, slice(split_positions[i], split_positions[i + 1]))],
(*shape_start, *element_shape, *shape_end)
)
for i, element_shape in enumerate(packed_shapes)
]
if __name__ == '__main__':
import numpy.array_api as np
H = 100
W = 101
C = 3
r = np.zeros((H, W))
g = np.zeros((H, W))
b = np.zeros((H, W))
embeddings = np.zeros((H, W, 32))
im = np.stack([r, g, b], axis=-1)
print(im.shape)
image, shapes = pack('h w *', [r, g, b])
print(image.shape, shapes)
print(type(image))
print(type(im))
assert np.all(np.equal(image, im))
images_and_embedding, shapes = pack('h w *', [r, g, b, embeddings])
print(images_and_embedding.shape, shapes)
r2, g2, b2, embeddings2 = unpack('h w *', images_and_embedding, shapes)
assert np.all(np.equal(r, r2))
assert np.all(np.equal(g, g2))
assert np.all(np.equal(b, b2))
assert np.all(np.equal(embeddings, embeddings2))
print([x.shape for x in unpack('h w *', images_and_embedding, shapes[1:])])
print('all is fine')
| einops-master | einops/experimental/data_api_packing.py |
"""
Indexing one array with the other(s).
Concept for discussion.
Notation targets hard cases, not simple ones, like indexing of 1d-array with another 1d-array
(notation supports that, but you can't simplify arr[ind], and there is no reason to)
Examples
1. query for every token in sequence a token in the image. Images and sequences are paired
einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, [h_indices_bt, w_indices_bt])
this is equivalent, so you can pass indexers idependently or together
einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, np.asarray([h_indices_bt, w_indices_bt]))
after some thinking I decided that having first axis for indexing variable is not too restrictive,
but should simplify mapping of such cases.
For this reason [...] part should always go first in indexer.
This makes the largest difference with einindex https://github.com/malmaud/einindex,
which has almost identical grammar, but puts special dimension last, while we put it first.
This trick allows naturally decomposing multiindex into individual dimensions or visa versa.
2. query for every token in the video the most suitable word in a (matching) sentence
einindex('b t h w <- seq b, [seq] t b h w', arr_tbc, [t_indices_bhw])
note, that only one indexer is used, but still it has to be enclosed in the list.
That's a price for being generic. Alternatively leading singleton dimension can be added.
3. (not supported now, future planning)
for every timeframe in a video, find the token with the highest norm (across h and w), and compose a new stack of them
indices_2bt = argmax(x_bthwc.norm(dim=-1), 'b t h w -> [h, w] b t')
selected_embeddings_btc = einindex('b t c <- b t h w c, [h, w] b t', x_bthwc, indices_2bt)
while currently question is around 'how do we index',
it is important to pre-align that with a question 'what are natural ways to get indices'.
Most common are min/max. less common options: topk (works here), random sampling.
Some important properties of this notation:
- support for multiple indexers, including using a single tensor to keep multiple indexers
- 'batch' indexing, when some axes of indexer and array should be matched
- universal (one-indexing-to-rule-them-all)
- extensible for (named) ellipses, including variadic number of indexers
- extensible for einops-style compositions and decompositions
- extensible for outer indexing when indexers are not aligned
Current implementation based on python array api and uses loops,
because no appropriate indexing available in the standard.
"""
from typing import List, Union, TypeVar, Tuple
from einops import EinopsError
T = TypeVar('T')
class CompositionDecomposition:
def __init__(
self,
decomposed_shape: List[str],
composed_shape: List[List[str]],
):
flat_shape = []
for x in composed_shape:
flat_shape.extend(x)
self.compose_transposition: Tuple[int, ...] = tuple([decomposed_shape.index(x) for x in flat_shape])
self.decompose_transposition: Tuple[int, ...] = tuple([flat_shape.index(x) for x in decomposed_shape])
self.composed_shape = composed_shape
self.decomposed_shape = decomposed_shape
def decompose(self, x, known_axes_lengths: dict[str, int]):
xp = x.__array_namespace__()
shape = x.shape
flat_shape = []
for i, axis_group in enumerate(self.composed_shape):
unknown_axis_name = None
known_sizes_prod = 1
for axis_name in axis_group:
if axis_name in known_axes_lengths:
known_sizes_prod *= known_axes_lengths[axis_name]
else:
if unknown_axis_name is None:
unknown_axis_name = axis_name
else:
raise EinopsError("Can't infer the size")
if unknown_axis_name is None:
assert shape[i] == known_sizes_prod
else:
known_axes_lengths[unknown_axis_name] = shape[i] // known_sizes_prod
for axis in axis_group:
flat_shape.append(known_axes_lengths[axis])
x = xp.reshape(x, flat_shape)
return xp.permute_dims(x, self.decompose_transposition)
def compose(self, x, known_axes_lengths: dict[str, int]):
xp = x.__array_namespace__()
for axis_len, axis_name in zip(x.shape, self.decomposed_shape):
if axis_name in known_axes_lengths:
assert known_axes_lengths[axis_name] == axis_len
else:
known_axes_lengths[axis_name] = axis_len
x = xp.permute_dims(x, self.compose_transposition)
new_shape = []
for axis_group in self.composed_shape:
composed_axis_size = 1
for axis_name in axis_group:
composed_axis_size *= known_axes_lengths[axis_name]
new_shape.append(composed_axis_size)
return xp.reshape(x, tuple(new_shape))
def arange_at_position(xp, n_axes, axis, axis_len, device=None):
x = xp.arange(axis_len, dtype=xp.int64, device=device)
shape = [1] * n_axes
shape[axis] = axis_len
x = xp.reshape(x, shape)
return x
class IndexingFormula:
def __init__(self, pattern: str):
"""
:param pattern: example 'b t c <- b hsel wsel c, [hsel, wsel] b t'
"""
self.pattern = pattern
left, right = pattern.split('<-')
arg_split = right.index(',')
arr_pattern, ind_pattern = right[:arg_split], right[arg_split + 1:]
ind_pattern = ind_pattern.strip()
# print(
# arr_pattern, '\n',
# ind_pattern,
# )
assert ind_pattern.startswith('['), 'composition axis should go first in indexer (second argument) [h w] i j k'
composition_start = ind_pattern.index('[')
composition_end = ind_pattern.index(']')
composition = ind_pattern[composition_start + 1: composition_end]
ind_other_axes = ind_pattern[composition_end + 1:]
self.result_axes_names = left.split()
self.array_axes_names = arr_pattern.split()
self.indexing_axes_names = [x.strip() for x in composition.split(',')]
self.indexer_other_axes_names = ind_other_axes.split()
for group_name, group in [
('result', self.result_axes_names),
('array', self.array_axes_names),
('indexer', self.indexing_axes_names + self.indexer_other_axes_names),
]:
if len(set(group)) != len(group):
# need more verbosity, which axis, raise
raise EinopsError(f'{group_name} pattern ({group}) contains a duplicated axis')
axis_groups = [
self.result_axes_names,
self.array_axes_names,
self.indexing_axes_names,
self.indexer_other_axes_names,
]
all_axes = set()
for group in axis_groups:
all_axes.update(group)
self.indexer_axes = []
self.batch_axes = []
self.result_and_index_axes = []
self.result_and_array_axes = []
for axis in all_axes:
presence = tuple(axis in g for g in axis_groups)
# want match-case here. sweet dreams
if presence == (False, True, True, False):
self.indexer_axes.append(axis)
elif presence[2]:
raise EinopsError(f'Wrong usage of indexer variable {axis}')
elif presence == (True, True, False, True):
self.batch_axes.append(axis)
elif presence == (True, False, False, True):
self.result_and_index_axes.append(axis)
elif presence == (True, True, False, False):
self.result_and_array_axes.append(axis)
else:
# TODO better categorization of wrong usage patterns
raise EinopsError(f'{axis} is used incorrectly in {pattern}')
assert set(self.indexer_axes) == set(self.indexing_axes_names)
# order of these variables matters, since we can't lose mapping here
self.indexer_axes = self.indexing_axes_names
self.array_composition = CompositionDecomposition(
decomposed_shape=self.array_axes_names,
composed_shape=[self.batch_axes + self.indexer_axes, self.result_and_array_axes],
)
self.index_composition = CompositionDecomposition(
decomposed_shape=self.indexer_other_axes_names,
# single axis after composition
composed_shape=[self.batch_axes + self.result_and_index_axes],
)
self.result_composition = CompositionDecomposition(
decomposed_shape=self.result_axes_names,
composed_shape=[self.batch_axes + self.result_and_index_axes, self.result_and_array_axes],
)
def apply_to_array_api(self, arr: T, ind: Union[T, List[T]]):
known_axes_sizes: dict[str, int] = {}
xp = arr.__array_namespace__()
if not isinstance(ind, list):
ind = [ind[i, ...] for i in range(ind.shape[0])]
for indexer in ind:
assert len(indexer.shape) == len(self.indexer_other_axes_names)
# step 1. transpose, reshapes of arr; learn its dimensions
arr_2d = self.array_composition.compose(arr, known_axes_sizes)
# step 2. compute shifts and create an actual indexing array
shift = 1
full_index = xp.zeros([1] * len(ind[0].shape), dtype=xp.int64, device=arr.device)
# original order: [*batch-like axes, *indexing_axes,]
# now we need to traverse them in the opposite direction
for axis_name, indexer in list(zip(self.indexing_axes_names, ind))[::-1]:
full_index = full_index + shift * (indexer % known_axes_sizes[axis_name])
shift *= known_axes_sizes[axis_name]
for axis_name in self.batch_axes[::-1]:
axis_id = self.indexer_other_axes_names.index(axis_name)
full_index = full_index + arange_at_position(
xp, len(self.indexer_other_axes_names), axis=axis_id, axis_len=known_axes_sizes[axis_name],
device=arr.device,
) * shift
shift *= known_axes_sizes[axis_name]
assert shift == arr_2d.shape[0]
# step 3. Flatten index
full_index = self.index_composition.compose(full_index, known_axes_sizes)
# step 4. indexing
# python array api lacks any integer indexing, so... I use loops.
# did you know that there is conceptual programming ... just like art?
# result_2d = arr_2d[full_index]
result_2d = xp.stack([arr_2d[full_index[i], :] for i in range(full_index.shape[0])])
# step 5. doing resulting
result = self.result_composition.decompose(result_2d, known_axes_sizes)
return result
def einindex(pattern: str, arr: T, /, ind: Union[T, List[T]]):
"""
Demonstrates how einindex should work.
Supports data-api compliant arrays.
"""
formula = IndexingFormula(pattern)
return formula.apply_to_array_api(arr, ind)
def test_composition_and_decomposition():
import numpy.array_api as np
x = np.arange(2 * 3 * 5 * 7)
x = np.reshape(x, (2, 3, 5, 7))
comp = CompositionDecomposition(
decomposed_shape=['a', 'b', 'c', 'd'],
composed_shape=[['a', 'b'], ['c', 'd']],
)
assert comp.compose(x, known_axes_lengths={}).shape == (2 * 3, 5 * 7)
y = CompositionDecomposition(
decomposed_shape=['a', 'b', 'c', 'd'],
composed_shape=[['a', 'b'], [], ['c', 'd']],
).compose(x, {})
assert y.shape == (2 * 3, 1, 5 * 7)
assert np.all(np.reshape(x, (-1,)) == np.reshape(y, (-1,)))
comp = CompositionDecomposition(
decomposed_shape=['a', 'b', 'e', 'c', 'd'],
composed_shape=[['e', 'c'], ['b'], ['a', 'd']],
)
x = np.arange(2 * 3 * 5 * 7 * 3)
x = np.reshape(x, (2, 3, 5, 7, 3))
axes = {}
y = comp.compose(x, axes)
x2 = comp.decompose(y, axes)
assert np.all(x == x2)
def test_simple_indexing():
import numpy.array_api as np
# simple 2d test
arr = np.reshape(np.arange(5 * 7), (5, 7))
ind = np.arange(7) % 5
x = einindex('j <- i j, [i] j', arr, [ind])
for j, i in enumerate(ind):
assert arr[i, j] == x[j]
y = einindex('j <- j i, [i] j', np.permute_dims(arr, (1, 0)), [ind])
for j, i in enumerate(ind):
assert arr[i, j] == y[j]
def test_multidimensional_indexing():
import numpy.array_api as np
embedding_bhwc = (
+ arange_at_position(np, 4, 0, 2) * 1000
+ arange_at_position(np, 4, 1, 3) * 100
+ arange_at_position(np, 4, 2, 5) * 10
+ arange_at_position(np, 4, 3, 7) * 1
)
hindices_bt = np.reshape(np.arange(6), (2, 3)) % 3
windices_bt = np.reshape(np.arange(6), (2, 3)) % 5
# imagine that you have pairs of image <> sentence
# your goal is to get most suitable token from image for every token in sentence
# thus for every token in sentence you compute best k and v
result = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, [hindices_bt, windices_bt])
# example of using a single array for indexing multiple axes
hw_indices_bt = np.stack([hindices_bt, windices_bt])
result2 = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, hw_indices_bt)
assert np.all(result == result2)
# check vs manual element computation
result_manual = result * 0
for b in range(2):
for t in range(3):
for c in range(7):
h = hindices_bt[b, t]
w = windices_bt[b, t]
result_manual[c, t, b] = embedding_bhwc[b, h, w, c]
assert np.all(result == result_manual)
def test_reverse_indexing():
import numpy.array_api as np
C, T, B = 2, 3, 5
# G = GPU, batch-like varaible
G = 4
H = 7
W = 9
arr_gtbc = (
+ arange_at_position(np, 4, 0, G) * 1000
+ arange_at_position(np, 4, 1, T) * 100
+ arange_at_position(np, 4, 2, B) * 10
+ arange_at_position(np, 4, 3, C) * 1
)
t_indices_gbhw = np.reshape(np.arange(G * B * H * W), (G, B, H, W)) % T
result = einindex('g b c h w <- g t b c, [t] g b h w', arr_gtbc, [t_indices_gbhw])
result_manual = result * 0
for g in range(G):
for b in range(B):
for c in range(C):
for h in range(H):
for w in range(W):
t = t_indices_gbhw[g, b, h, w]
result_manual[g, b, c, h, w] = arr_gtbc[g, t, b, c]
assert np.all(result == result_manual)
| einops-master | einops/experimental/indexing.py |
__author__ = 'Alex Rogozhnikov'
from ..layers.tensorflow import Rearrange, Reduce, EinMix
keras_custom_objects = {
Rearrange.__name__: Rearrange,
Reduce.__name__: Reduce,
EinMix.__name__: EinMix,
}
| einops-master | einops/layers/keras.py |
from typing import Optional, Dict, cast
import paddle
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
__author__ = 'PaddlePaddle'
class Rearrange(RearrangeMixin, paddle.nn.Layer):
def forward(self, input):
return self._apply_recipe(input)
class Reduce(ReduceMixin, paddle.nn.Layer):
def forward(self, input):
return self._apply_recipe(input)
class EinMix(_EinmixMixin, paddle.nn.Layer):
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
self.weight = self.create_parameter(
weight_shape,
default_initializer=paddle.nn.initializer.Uniform(-weight_bound, weight_bound)
)
if bias_shape is not None:
self.bias = self.create_parameter(
bias_shape,
default_initializer=paddle.nn.initializer.Uniform(-bias_bound, bias_bound)
)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict],
):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
def forward(self, input):
if self.pre_rearrange is not None:
input = self.pre_rearrange(input)
result = paddle.einsum(self.einsum_pattern, input, self.weight)
if self.bias is not None:
result += self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result | einops-master | einops/layers/paddle.py |
__author__ = 'Alex Rogozhnikov'
from typing import Any, Dict
from ..einops import TransformRecipe, _apply_recipe, _prepare_recipes_for_all_dims, get_backend
from .. import EinopsError
class RearrangeMixin:
"""
Rearrange layer behaves identically to einops.rearrange operation.
:param pattern: str, rearrangement pattern
:param axes_lengths: any additional specification of dimensions
See einops.rearrange for source_examples.
"""
def __init__(self, pattern: str, **axes_lengths: Any) -> None:
super().__init__()
self.pattern = pattern
self.axes_lengths = axes_lengths
# self._recipe = self.recipe() # checking parameters
self._multirecipe = self.multirecipe()
self._axes_lengths = tuple(self.axes_lengths.items())
def __repr__(self) -> str:
params = repr(self.pattern)
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
def multirecipe(self) -> Dict[int, TransformRecipe]:
try:
return _prepare_recipes_for_all_dims(
self.pattern, operation='rearrange', axes_names=tuple(self.axes_lengths)
)
except EinopsError as e:
raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
def _apply_recipe(self, x):
backend = get_backend(x)
return _apply_recipe(
backend=backend,
recipe=self._multirecipe[len(x.shape)],
tensor=x,
reduction_type='rearrange',
axes_lengths=self._axes_lengths,
)
def __getstate__(self):
return {'pattern': self.pattern, 'axes_lengths': self.axes_lengths}
def __setstate__(self, state):
self.__init__(pattern=state['pattern'], **state['axes_lengths'])
class ReduceMixin:
"""
Reduce layer behaves identically to einops.reduce operation.
:param pattern: str, rearrangement pattern
:param reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
:param axes_lengths: any additional specification of dimensions
See einops.reduce for source_examples.
"""
def __init__(self, pattern: str, reduction: str, **axes_lengths: Any):
super().__init__()
self.pattern = pattern
self.reduction = reduction
self.axes_lengths = axes_lengths
self._multirecipe = self.multirecipe()
self._axes_lengths = tuple(self.axes_lengths.items())
def __repr__(self):
params = '{!r}, {!r}'.format(self.pattern, self.reduction)
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
def multirecipe(self) -> Dict[int, TransformRecipe]:
try:
return _prepare_recipes_for_all_dims(
self.pattern, operation=self.reduction, axes_names=tuple(self.axes_lengths)
)
except EinopsError as e:
raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
def _apply_recipe(self, x):
backend = get_backend(x)
return _apply_recipe(
backend=backend,
recipe=self._multirecipe[len(x.shape)],
tensor=x,
reduction_type=self.reduction,
axes_lengths=self._axes_lengths,
)
def __getstate__(self):
return {'pattern': self.pattern, 'reduction': self.reduction, 'axes_lengths': self.axes_lengths}
def __setstate__(self, state):
self.__init__(pattern=state['pattern'], reduction=state['reduction'], **state['axes_lengths'])
| einops-master | einops/layers/__init__.py |
from typing import List, Optional, Dict, cast
import tensorflow as tf
from tensorflow.keras.layers import Layer
from .._backends import UnknownSize
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
from ..einops import TransformRecipe, _reconstruct_from_shape_uncached
__author__ = 'Alex Rogozhnikov'
def _compute_output_shape(recipe: TransformRecipe, input_shape) -> List[Optional[int]]:
input_shape = [UnknownSize() if d is None else int(d) for d in input_shape]
init_shapes, reduced_axes, axes_reordering, added_axes, final_shape = \
_reconstruct_from_shape_uncached(recipe, input_shape)
output_shape: List[Optional[int]] = [None if isinstance(d, UnknownSize) else int(d) for d in final_shape]
return output_shape
class Rearrange(RearrangeMixin, Layer):
def compute_output_shape(self, input_shape):
return _compute_output_shape(self.recipe(), input_shape)
def call(self, inputs):
return self._apply_recipe(inputs)
def get_config(self):
return {'pattern': self.pattern, **self.axes_lengths}
class Reduce(ReduceMixin, Layer):
def compute_output_shape(self, input_shape):
return _compute_output_shape(self.recipe(), input_shape)
def call(self, inputs):
return self._apply_recipe(inputs)
def get_config(self):
return {'pattern': self.pattern, 'reduction': self.reduction, **self.axes_lengths}
class EinMix(_EinmixMixin, Layer):
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
self.weight = tf.Variable(tf.random_uniform_initializer(-weight_bound, weight_bound)(shape=weight_shape),
trainable=True)
if bias_shape is not None:
self.bias = tf.Variable(tf.random_uniform_initializer(-bias_bound, bias_bound)(shape=bias_shape),
trainable=True)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict],
):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
def build(self, input_shape):
pass
def call(self, inputs):
if self.pre_rearrange is not None:
inputs = self.pre_rearrange(inputs)
result = tf.einsum(self.einsum_pattern, inputs, self.weight)
if self.bias is not None:
result = result + self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result
def get_config(self):
return {'pattern': self.pattern,
'weight_shape': self.weight_shape,
'bias_shape': self.bias_shape,
**self.axes_lengths}
| einops-master | einops/layers/tensorflow.py |
from typing import Any, List, Optional, Dict
from einops import EinopsError
from einops.parsing import ParsedExpression
import warnings
import string
from ..einops import _product
def _report_axes(axes: set, report_message: str):
if len(axes) > 0:
raise EinopsError(report_message.format(axes))
class _EinmixMixin:
def __init__(self, pattern: str, weight_shape: str, bias_shape: Optional[str]=None, **axes_lengths: Any):
"""
EinMix - Einstein summation with automated tensor management and axis packing/unpacking.
EinMix is an advanced tool, helpful tutorial:
https://github.com/arogozhnikov/einops/blob/master/docs/3-einmix-layer.ipynb
Imagine taking einsum with two arguments, one of each input, and one - tensor with weights
>>> einsum('time batch channel_in, channel_in channel_out -> time batch channel_out', input, weight)
This layer manages weights for you, syntax highlights separate role of weight matrix
>>> EinMix('time batch channel_in -> time batch channel_out', weight_shape='channel_in channel_out')
But otherwise it is the same einsum under the hood.
Simple linear layer with bias term (you have one like that in your framework)
>>> EinMix('t b cin -> t b cout', weight_shape='cin cout', bias_shape='cout', cin=10, cout=20)
There is no restriction to mix the last axis. Let's mix along height
>>> EinMix('h w c-> hout w c', weight_shape='h hout', bias_shape='hout', h=32, hout=32)
Channel-wise multiplication (like one used in normalizations)
>>> EinMix('t b c -> t b c', weight_shape='c', c=128)
Multi-head linear layer (each head is own linear layer):
>>> EinMix('t b (head cin) -> t b (head cout)', weight_shape='head cin cout', ...)
... ah yes, you need to specify all dimensions of weight shape/bias shape in parameters.
Use cases:
- when channel dimension is not last, use EinMix, not transposition
- patch/segment embeddings
- when need only within-group connections to reduce number of weights and computations
- perfect as a part of sequential models
- next-gen MLPs (follow tutorial to learn more)
Uniform He initialization is applied to weight tensor and encounters for number of elements mixed.
Parameters
:param pattern: transformation pattern, left side - dimensions of input, right side - dimensions of output
:param weight_shape: axes of weight. A tensor of this shape is created, stored, and optimized in a layer
:param bias_shape: axes of bias added to output. Weights of this shape are created and stored. If `None` (the default), no bias is added.
:param axes_lengths: dimensions of weight tensor
"""
super().__init__()
self.pattern = pattern
self.weight_shape = weight_shape
self.bias_shape = bias_shape
self.axes_lengths = axes_lengths
self.initialize_einmix(pattern=pattern, weight_shape=weight_shape, bias_shape=bias_shape, axes_lengths=axes_lengths)
def initialize_einmix(self, pattern: str, weight_shape: str, bias_shape: Optional[str], axes_lengths: dict):
left_pattern, right_pattern = pattern.split('->')
left = ParsedExpression(left_pattern)
right = ParsedExpression(right_pattern)
weight = ParsedExpression(weight_shape)
_report_axes(
set.difference(right.identifiers, {*left.identifiers, *weight.identifiers}),
'Unrecognized identifiers on the right side of EinMix {}'
)
if left.has_ellipsis or right.has_ellipsis or weight.has_ellipsis:
raise EinopsError('Ellipsis is not supported in EinMix (right now)')
if any(x.has_non_unitary_anonymous_axes for x in [left, right, weight]):
raise EinopsError('Anonymous axes (numbers) are not allowed in EinMix')
if '(' in weight_shape or ')' in weight_shape:
raise EinopsError(f'Parenthesis is not allowed in weight shape: {weight_shape}')
pre_reshape_pattern = None
pre_reshape_lengths = None
post_reshape_pattern = None
if any(len(group) != 1 for group in left.composition):
names: List[str] = []
for group in left.composition:
names += group
composition = ' '.join(names)
pre_reshape_pattern = f'{left_pattern}->{composition}'
pre_reshape_lengths = {name: length for name, length in axes_lengths.items() if name in names}
if any(len(group) != 1 for group in right.composition):
names = []
for group in right.composition:
names += group
composition = ' '.join(names)
post_reshape_pattern = f'{composition}->{right_pattern}'
self._create_rearrange_layers(pre_reshape_pattern, pre_reshape_lengths, post_reshape_pattern, {})
for axis in weight.identifiers:
if axis not in axes_lengths:
raise EinopsError('Dimension {} of weight should be specified'.format(axis))
_report_axes(
set.difference(set(axes_lengths), {*left.identifiers, *weight.identifiers}),
'Axes {} are not used in pattern',
)
_report_axes(
set.difference(weight.identifiers, {*left.identifiers, *right.identifiers}),
'Weight axes {} are redundant'
)
if len(weight.identifiers) == 0:
warnings.warn('EinMix: weight has no dimensions (means multiplication by a number)')
_weight_shape = [axes_lengths[axis] for axis, in weight.composition]
# single output element is a combination of fan_in input elements
_fan_in = _product([axes_lengths[axis] for axis, in weight.composition if axis not in right.identifiers])
if bias_shape is not None:
if not isinstance(bias_shape, str):
raise EinopsError('bias shape should be string specifying which axes bias depends on')
bias = ParsedExpression(bias_shape)
_report_axes(
set.difference(bias.identifiers, right.identifiers),
'Bias axes {} not present in output'
)
_report_axes(
set.difference(bias.identifiers, set(axes_lengths)),
'Sizes not provided for bias axes {}',
)
_bias_shape = []
for axes in right.composition:
for axis in axes:
if axis in bias.identifiers:
_bias_shape.append(axes_lengths[axis])
else:
_bias_shape.append(1)
else:
_bias_shape = None
weight_bound = (3 / _fan_in) ** 0.5
bias_bound = (1 / _fan_in) ** 0.5
self._create_parameters(_weight_shape, weight_bound, _bias_shape, bias_bound)
# rewrite einsum expression with single-letter latin identifiers so that
# expression will be understood by any framework
mapped_identifiers = {*left.identifiers, *right.identifiers, *weight.identifiers}
mapping2letters = {k: letter for letter, k in zip(string.ascii_lowercase, mapped_identifiers)}
def write_flat(axes: list):
return ''.join(mapping2letters[axis] for axis in axes)
self.einsum_pattern: str = '{},{}->{}'.format(
write_flat(left.flat_axes_order()),
write_flat(weight.flat_axes_order()),
write_flat(right.flat_axes_order()),
)
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict]):
raise NotImplementedError('Should be defined in framework implementations')
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
""" Shape and implementations """
raise NotImplementedError('Should be defined in framework implementations')
def __repr__(self):
params = repr(self.pattern)
params += f", '{self.weight_shape}'"
if self.bias_shape is not None:
params += f", '{self.bias_shape}'"
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
| einops-master | einops/layers/_einmix.py |
from typing import Optional, Dict, cast
import torch
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
from .._torch_specific import apply_for_scriptable_torch
__author__ = 'Alex Rogozhnikov'
class Rearrange(RearrangeMixin, torch.nn.Module):
def forward(self, input):
recipe = self._multirecipe[input.ndim]
return apply_for_scriptable_torch(
recipe, input, reduction_type='rearrange', axes_dims=self._axes_lengths
)
def _apply_recipe(self, x):
# overriding parent method to prevent it's scripting
pass
class Reduce(ReduceMixin, torch.nn.Module):
def forward(self, input):
recipe = self._multirecipe[input.ndim]
return apply_for_scriptable_torch(
recipe, input, reduction_type=self.reduction, axes_dims=self._axes_lengths
)
def _apply_recipe(self, x):
# overriding parent method to prevent it's scripting
pass
class EinMix(_EinmixMixin, torch.nn.Module):
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
self.weight = torch.nn.Parameter(torch.zeros(weight_shape).uniform_(-weight_bound, weight_bound),
requires_grad=True)
if bias_shape is not None:
self.bias = torch.nn.Parameter(torch.zeros(bias_shape).uniform_(-bias_bound, bias_bound),
requires_grad=True)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict],
):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
def forward(self, input):
if self.pre_rearrange is not None:
input = self.pre_rearrange(input)
result = torch.einsum(self.einsum_pattern, input, self.weight)
if self.bias is not None:
result += self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result
| einops-master | einops/layers/torch.py |
from typing import Optional, Dict, cast
import oneflow as flow
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
__author__ = 'Tianhe Ren & Depeng Liang'
class Rearrange(RearrangeMixin, flow.nn.Module):
def forward(self, input):
return self._apply_recipe(input)
class Reduce(ReduceMixin, flow.nn.Module):
def forward(self, input):
return self._apply_recipe(input)
class EinMix(_EinmixMixin, flow.nn.Module):
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
self.weight = flow.nn.Parameter(flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound),
requires_grad=True)
if bias_shape is not None:
self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound),
requires_grad=True)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict],
):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
def forward(self, input):
if self.pre_rearrange is not None:
input = self.pre_rearrange(input)
result = flow.einsum(self.einsum_pattern, input, self.weight)
if self.bias is not None:
result += self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result
| einops-master | einops/layers/oneflow.py |
from dataclasses import field
from typing import Optional, Dict, cast
import flax.linen as nn
import jax
import jax.numpy as jnp
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
__author__ = 'Alex Rogozhnikov'
class Reduce(nn.Module):
pattern: str
reduction: str
sizes: dict = field(default_factory=lambda: {})
def setup(self):
self.reducer = ReduceMixin(self.pattern, self.reduction, **self.sizes)
def __call__(self, input):
return self.reducer._apply_recipe(input)
class Rearrange(nn.Module):
pattern: str
sizes: dict = field(default_factory=lambda: {})
def setup(self):
self.rearranger = RearrangeMixin(self.pattern, **self.sizes)
def __call__(self, input):
return self.rearranger._apply_recipe(input)
class EinMix(nn.Module, _EinmixMixin):
pattern: str
weight_shape: str
bias_shape: Optional[str] = None
sizes: dict = field(default_factory=lambda: {})
def setup(self):
self.initialize_einmix(
pattern=self.pattern,
weight_shape=self.weight_shape,
bias_shape=self.bias_shape,
axes_lengths=self.sizes,
)
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
self.weight = self.param("weight", jax.nn.initializers.uniform(weight_bound), weight_shape)
if bias_shape is not None:
self.bias = self.param("bias", jax.nn.initializers.uniform(bias_bound), bias_shape)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict]):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, sizes=cast(dict, pre_reshape_lengths))
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, sizes=cast(dict, post_reshape_lengths))
def __call__(self, input):
if self.pre_rearrange is not None:
input = self.pre_rearrange(input)
result = jnp.einsum(self.einsum_pattern, input, self.weight)
if self.bias is not None:
result += self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result
| einops-master | einops/layers/flax.py |
from typing import Optional, Dict, cast
import chainer
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
__author__ = 'Alex Rogozhnikov'
class Rearrange(RearrangeMixin, chainer.Link):
def __call__(self, x):
return self._apply_recipe(x)
class Reduce(ReduceMixin, chainer.Link):
def __call__(self, x):
return self._apply_recipe(x)
class EinMix(_EinmixMixin, chainer.Link):
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
uniform = chainer.variable.initializers.Uniform
with self.init_scope():
self.weight = chainer.variable.Parameter(uniform(weight_bound), weight_shape)
if bias_shape is not None:
self.bias = chainer.variable.Parameter(uniform(bias_bound), bias_shape)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict],
):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
def __call__(self, input):
if self.pre_rearrange is not None:
input = self.pre_rearrange(input)
result = chainer.functions.einsum(self.einsum_pattern, input, self.weight)
if self.bias is not None:
result = result + self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result
| einops-master | einops/layers/chainer.py |
from setuptools import setup, find_packages
setup(
name="local-attention-flax",
packages=find_packages(),
version="0.0.2",
license="MIT",
description="Local Attention - Flax Module in Jax",
author="Phil Wang",
author_email="",
url="https://github.com/lucidrains/local-attention-flax",
keywords=[
"artificial intelligence",
"deep learning",
"attention mechanism",
"jax"
],
install_requires=[
"einops>=0.3",
"flax",
"jax",
"jaxlib"
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
| local-attention-flax-main | setup.py |
from local_attention_flax.local_attention_flax import LocalAttention
| local-attention-flax-main | local_attention_flax/__init__.py |
import flax.linen as nn
from jax import numpy as np
from einops import rearrange
ATTN_MASK_VALUE = -1e10
class LocalAttention(nn.Module):
dim: int
window_size: int
heads: int = 8
dim_head: int = 64
@nn.compact
def __call__(self, x):
n, h, dim_head, wsz = x.shape[0], self.heads, self.dim_head, self.window_size
assert (n % wsz) == 0, 'sequence length must be divisible by the window size'
scale = dim_head ** -0.5
window = n // wsz
qkv = nn.Dense(features = 3 * h * dim_head, use_bias = False)(x)
q, k, v = np.split(qkv, 3, axis = -1)
q, k, v = map(lambda t: rearrange(t, '(w n) (h d) -> h w n d', w = window, h = h), (q, k, v))
k, v = map(lambda t: np.pad(t, ((0, 0), (1, 0), (0, 0), (0, 0)), constant_values = 0.), (k ,v))
k, v = map(lambda t: np.concatenate((t[:, :-1], t[:, 1:]), axis = 2), (k, v))
sim = np.einsum('h w i d, h w j d -> h w i j', q, k) * scale
mask = np.tril(np.ones((wsz, wsz * 2)), wsz)
sim = np.where(mask, sim, ATTN_MASK_VALUE)
attn = nn.softmax(sim, axis = -1)
out = np.einsum('h w i j, h w j d -> h w i d', attn, v)
out = rearrange(out, 'h w n d -> (w n) (h d)')
out = nn.Dense(features = self.dim)(out)
return out
| local-attention-flax-main | local_attention_flax/local_attention_flax.py |
from setuptools import setup, find_packages
from io import open
import versioneer
DESCRIPTION = (
"ANANSE: Prediction of key transcription factors in cell fate "
"determination using enhancer networks"
)
with open("README.md", encoding="utf-8") as f:
long_description = f.read().strip("\n")
setup(
name="ananse",
version=versioneer.get_version(),
long_description=long_description,
long_description_content_type="text/markdown",
description=DESCRIPTION,
author="Quan Xu",
author_email="[email protected]",
url="https://github.com/vanheeringen-lab/ananse/",
download_url="https://github.com/vanheeringen-lab/ananse/"
+ versioneer.get_version(),
license="MIT",
packages=find_packages(),
scripts=["scripts/ananse"],
include_package_data=True,
zip_safe=False, # This is necessary, otherwise files won't be installed
classifiers=[
"Development Status :: 4 Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
install_requires=[
"setuptools >= 0.7",
"adjusttext",
"dask",
"gimmemotifs >=0.15.1",
"loguru",
"networkx",
"numpy",
"openpyxl",
"pandas",
"scipy",
"scikit-learn",
"tables",
"genomepy >= 0.9.3",
"pyranges",
],
)
| ANANSE-master | setup.py |
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
import configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| ANANSE-master | versioneer.py |
import urllib
import pandas as pd
import numpy as np
import re
import sys
import os
from loguru import logger
import ananse
logger.remove()
logger.add(
sys.stderr, format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | {level} | {message}"
)
TFP_URL = "https://maayanlab.cloud/Enrichr/geneSetLibrary?mode=text&libraryName=TF_Perturbations_Followed_by_Expression"
TRRUST_URL = "https://www.grnpedia.org/trrust/data/trrust_rawdata.human.tsv"
MSIGDB_URL = "https://data.broadinstitute.org/gsea-msigdb/msigdb/release/7.4/c3.all.v7.4.symbols.gmt"
def download_trrust_reference(outfile):
edges = []
with urllib.request.urlopen(
TRRUST_URL,
) as f:
for line in f.readlines():
tf, target, regtype, pmid = line.decode().strip().split("\t")
# Just skip repression for now
if regtype in ["Activation", "Unknown"]:
edges.append([tf, target, 1])
edges = pd.DataFrame(edges, columns=["tf", "target", "interaction"])
edges.to_csv(outfile, sep="\t", index=False)
def download_msigdb_reference(outfile):
with urllib.request.urlopen(MSIGDB_URL) as gmt, open(outfile, "w") as fl1:
for line in gmt:
a = line.decode("utf-8").split()
tf = a[0].split("_")[0]
targets = a[2:]
for target in targets:
fl1.write(f"{tf}\t{target}\n")
def fix_columns(df):
"""Make sure network has a tf and a target column."""
df.columns = df.columns.str.lower()
df = df.rename(
columns={
"source": "tf",
"source_target": "tf_target",
"target_gene": "target",
}
)
if "tf_target" in df.columns:
df[["tf", "target"]] = df["tf_target"].str.split("_", expand=True).iloc[:, :2]
df = df.drop(columns=["tf_target"])
if "tf" not in df.columns:
raise ValueError("Expect a column named 'source' or 'tf'")
if "target" not in df.columns:
raise ValueError("Expect a column named 'target' or 'target_gene'")
return df
def prepare_reference_network(network, filter_tfs=True):
"""Generate reference network.
This network contains all possible edges, based on the TFs
and the target genes in the input. TFs are optionally filtered
to contain only validated TFs.
Returns
-------
DataFrame with column `"interaction"` having 1 for a validated
edge and 0 otherwise.
"""
if isinstance(network, pd.DataFrame):
df = network.reset_index()
elif isinstance(network, str):
if network.endswith("feather"):
df = pd.read_feather(network)
else:
df = pd.read_table(network)
else:
raise ValueError("Unknown network type, need DataFrame or filename.")
df = fix_columns(df)
interaction_column = None
for col in df.columns:
if col in ["tf", "target"]:
continue
vals = df[col].unique()
if len(vals) in [1, 2] and 1 in vals:
interaction_column = col
break
tfs = set(df["tf"].unique())
if filter_tfs:
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
targets = df["target"].unique()
# logger.info(
# f"{os.path.split(network)[-1]} reference - {len(tfs)} TFs, {len(targets)} targets, {df.shape[0]} edges."
# )
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
if interaction_column is not None:
logger.info(f"Using '{interaction_column}' as interaction column.")
df = df.set_index(["tf", "target"])[[interaction_column]].rename(
columns={interaction_column: "interaction"}
)
else:
logger.info("No column with 1 found, assuming all lines are positive edges.")
df = df.set_index(["tf", "target"])
df["interaction"] = 1
return total.join(df[["interaction"]]).fillna(0)
def _read_dorothea_reference(fname):
dorothea = pd.read_table(fname)
cols = [
"is_evidence_chip_seq",
"is_evidence_curated",
"is_evidence_inferred",
"is_evidence_tfbs",
]
dorothea = dorothea.set_index(["tf", "target"])[cols]
for col in cols:
dorothea[col] = dorothea[col].astype(int)
dorothea["dorothea"] = np.any(dorothea[cols] == 1, 1).astype(int)
dorothea = dorothea.reset_index()
tfs = set(dorothea["tf"].unique())
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
targets = dorothea["target"].unique()
logger.info(
f"Dorothea reference - {len(tfs)} TFs, {len(targets)} targets, {dorothea.shape[0]} edges."
)
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
dorothea = dorothea.set_index(["tf", "target"])
dorothea = total.join(dorothea)
dorothea = dorothea.fillna(0)
return dorothea
def _read_enrichr_perturbation_reference(fname=None):
"""Uses the TF perturbations from Enrichr[1,2] to create reference edges.
Targets are defined by up- or down-regulated gened from the following sets:
Up: INDUCTION, ACTIVATION, OE.
Down: KD, KO, INACTIVATION, DEPLETION, SIRNA, SHRNA, KNOCKOUT, DELETION INHIBITION.
The TF and targets in the DataFrame consists of the Cartesian product of
all TFs and target genes that occur in the set.
Returns
-------
DataFrame with tf-target edges.
References
----------
.. [1] Chen EY, Tan CM, Kou Y, Duan Q, Wang Z, Meirelles GV, Clark NfR, Ma'ayan A.
"Enrichr: interactive and collaborative HTML5 gene list enrichment analysis
tool." BMC Bioinformatics. 2013;128(14)
.. [2] Kuleshov MV, Jones MR, Rouillard AD, Fernandez NF, Duan Q, Wang Z,
Koplev S, Jenkins SL, Jagodnik KM, Lachmann A, McDermott MG, Monteiro CD,
Gundersen GW, Ma'ayan A. "Enrichr: a comprehensive gene set enrichment
analysis web server 2016 update." Nucleic Acids Research. 2016; gkw377.
"""
use_online = False
if fname:
fopen = open(fname)
else:
logger.info(
"No filename provided for TF perturbations, downloading from Enrichr"
)
fopen = urllib.request.urlopen(TFP_URL)
use_online = True
p = re.compile(r"(\w+)\s+(\w+)\s+(.+)\s+(\w+)")
all_info = []
edges = []
with fopen as f:
for line in f:
if use_online:
line = line.decode("utf-8")
vals = line.strip().split("\t")
m = re.search(p, vals[0])
all_info.append(m.groups(0))
if (
m.group(2) in ["INDUCTION", "ACTIVATION", "OE"] and m.group(4) == "UP"
) or (
m.group(2)
in [
"KD",
"KO",
"INACTIVATION",
"DEPLETION",
"SIRNA",
"SHRNA",
"KNOCKOUT",
"DELETION",
"INHIBITION",
]
and m.group(4) == "DOWN"
):
tf = m.group(1)
for target in vals[2:]:
edges.append([tf, target])
all_info = pd.DataFrame(all_info, columns=["tf", "exp", "info", "up_down"])
perturb_df = pd.DataFrame(edges, columns=["tf", "target"])
tfs = set(perturb_df["tf"].unique())
targets = perturb_df["target"].unique()
logger.info(
f"TF perturbation reference - {len(tfs)} TFs, {len(targets)} targets, {perturb_df.shape[0]} edges."
)
perturb_df["experiments"] = 1
perturb_df = perturb_df.groupby(["tf", "target"]).count()
perturb_df["interaction"] = 1
perturb_df.columns = ["perturb_experiments", "perturb_interaction"]
valid_tfs = set(get_tfs())
tfs = list(tfs.intersection(valid_tfs))
total = []
for tf in tfs:
for target in targets:
total.append([tf, target])
total = pd.DataFrame(total, columns=["tf", "target"]).set_index(["tf", "target"])
perturb_df = total.join(perturb_df).fillna(0)
return perturb_df
def get_tfs():
valid_factors = pd.read_excel(
"https://www.biorxiv.org/content/biorxiv/early/2020/12/07/2020.10.28.359232/DC1/embed/media-1.xlsx",
engine="openpyxl",
sheet_name=1,
)
valid_factors = valid_factors.loc[
valid_factors["Pseudogene"].isnull(), "HGNC approved gene symbol"
].values
valid_factors = [f for f in valid_factors if f != "EP300"]
return valid_factors
def read_network(fname, name=None):
network = fname
if fname.endswith("feather"):
df = pd.read_feather(network)
else:
df = pd.read_table(network)
df = fix_columns(df)
df = df.set_index(["tf", "target"])
# Assuming last column is the edge weight
df = df.iloc[:, [-1]]
if name is not None:
df.columns = [name]
return df
def _read_correlation_reference(network, corCutoff=0.6):
tfs_name = f"{os.path.dirname(ananse.__file__)}/db/tfs.txt"
tfs = pd.read_csv(tfs_name, header=None)[0].tolist()
edb = pd.read_csv(network, sep="\t")
edb["iscorrelation"] = [1 if i > corCutoff else 0 for i in edb["correlationRank"]]
edb[["tf", "target"]] = edb["source_target"].str.split("_", expand=True).iloc[:, :2]
edb = edb.drop(
columns=["source_target", "ocorrelation", "correlation", "correlationRank"]
)
edb = edb[edb.tf.isin(tfs)]
edb = edb.set_index(["tf", "target"])
return edb
def _read_goterm_reference(network, goCutoff=0):
tfs_name = f"{os.path.dirname(ananse.__file__)}/db/tfs.txt"
tfs = pd.read_csv(tfs_name, header=None)[0].tolist()
gdb = pd.read_csv(network, sep="\t", header=None)
gdb["isgo"] = [1 if i > goCutoff else 0 for i in gdb[2]]
gdb = gdb.rename(columns={3: "tf", 1: "target"})
gdb = gdb[gdb.tf.isin(tfs)]
gdb = gdb.drop(columns=[0, 2])
gdb = gdb.set_index(["tf", "target"])
return gdb
def _read_msigdb_reference(network):
msidb = pd.read_csv(network, sep="\t", header=None)
msidb = msidb.rename(columns={0: "tf", 1: "target"})
msidb = msidb.set_index(["tf", "target"])
msidb["interaction"] = 1
return msidb
def _read_regnet_reference(network):
regnet = pd.read_csv(network)
regnet = regnet.rename(
columns={"regulator_symbol": "tf", "target_symbol": "target"}
)
regnet = regnet.set_index(["tf", "target"])
regnet["interaction"] = 1
return regnet[["interaction"]]
def read_reference(name, fname=None):
"""
Valid reference networks (name):
- dorothea
- perturbation
- correlation
- goterm
- msigdb
- regnet
- trrust
"""
if name.lower() == "dorothea":
return _read_dorothea_reference(fname)
if name.lower() == "perturbation":
return prepare_reference_network(_read_enrichr_perturbation_reference(fname))
if name.lower() == "correlation":
return prepare_reference_network(_read_correlation_reference(fname, 0.6))
if name.lower() == "goterm":
return prepare_reference_network(_read_goterm_reference(fname, 0))
if name.lower() == "msigdb":
return prepare_reference_network(_read_msigdb_reference(fname))
if name.lower() == "regnet":
return prepare_reference_network(_read_regnet_reference(fname))
if name.lower() == "trrust":
return prepare_reference_network(fname)
def validate_files(fnames, ignore_missing=False):
file_error = False
for fname in fnames:
if not os.path.exists(fname):
logger.error(f"file {fname} does not exist")
file_error = True
if not ignore_missing and file_error:
raise ValueError("One or more files not found!")
def read_networks(network_dict, ignore_missing=False):
"""Read predicted networks.
Input is a dictionary with name as key and filename as value.
"""
# Validate files first
validate_files(network_dict.values(), ignore_missing=ignore_missing)
df = pd.DataFrame({"tf": [], "target": []}).set_index(["tf", "target"])
for name, fname in network_dict.items():
if os.path.exists(fname):
logger.info(f"Reading {name}")
tmp = read_network(fname, name=name)
logger.info(f"Merging {name}")
df = df.join(tmp, how="outer")
return df
| ANANSE-master | ananse/benchmark.py |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> master)"
git_full = "18995f01657db5e92d4558eff4c1e81d30ff088e"
git_date = "2021-09-28 10:06:03 +0200"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "ananse-"
cfg.versionfile_source = "ananse/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| ANANSE-master | ananse/_version.py |
from glob import glob
import inspect
import os
import re
import sys
from tempfile import NamedTemporaryFile
from fluff.fluffio import load_heatmap_data
from genomepy import Genome
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import scan_regionfile_to_table
from gimmemotifs.moap import moap
import joblib
from loguru import logger
import networkx as nx
import numpy as np
import pandas as pd
from pandas import HDFStore
from sklearn.preprocessing import scale, minmax_scale
from scipy.stats import rankdata
import qnorm
import ananse
from ananse.enhancer_binding import CombineBedFiles
from ananse.utils import get_motif_factors, check_input_factors
# This motif file is not created by default
# * f"{self.data_dir}/reference.factor.feather"
class PeakPredictor:
def __init__(
self,
reference=None,
atac_bams=None,
histone_bams=None,
regions=None,
genome="hg38",
pfmfile=None,
factors=None,
pfmscorefile=None,
ncpus=4,
):
self.data_dir = reference
if atac_bams is None and histone_bams is None:
raise ValueError("Need either ATAC-seq or H3K27ac BAM file(s).")
if genome is None:
logger.warning("Assuming genome is hg38")
genome = "hg38"
self.genome = genome
self.set_species(genome)
if pfmfile is None and self.species not in ["human", "mouse"]:
logger.warning(
f"The genome '{genome}' is not recognized as human or mouse."
)
logger.warning(
"If you do have another species, the motif file likely needs to be adapted."
)
logger.warning(
"Currently mouse and human gene names are used to link motif to TFs."
)
logger.warning(
"If your gene symbols are different, then you will need to create a new mapping"
)
logger.warning(
"and use the `-p` argument. For a possible method to do this, see here:"
)
logger.warning(
"https://gimmemotifs.readthedocs.io/en/stable/reference.html#command-gimme-motif2factors"
)
# Set basic information
self.ncpus = ncpus
self._atac_data = None
self._histone_data = None
self.factor_models = {}
self.pfmfile = pfmfile
self._load_motifs(factors=factors)
# if the reference regions are used, we can use existing data such
# as motif scores.
if regions is None:
self.region_type = "reference"
self._load_reference_data()
# If we have custom regions we have to scan for motifs.
else:
self.region_type = "custom"
self.regions = regions
if pfmscorefile is None:
self._scan_motifs(regions)
else:
self._load_prescanned_motifs(pfmscorefile)
# Load ATAC data
if atac_bams is not None:
self.load_atac(atac_bams, update_models=False)
# Load histone ChIP-seq data
if histone_bams is not None:
self.load_histone(histone_bams, update_models=False)
self._set_model_type()
def _scan_motifs(self, regions):
"""[summary]
Parameters
----------
regions : [type]
[description]
"""
logger.info("Scanning regions for motifs.")
with NamedTemporaryFile(mode="w") as f:
print("region", file=f)
for region in regions:
print(region, file=f)
f.flush()
# TODO: we're still scanning for *all* motifs, even if we only have
# a few factors
motif_df = scan_regionfile_to_table(
f.name, self.genome, "score", ncpus=self.ncpus
)
self._motifs = pd.DataFrame(index=motif_df.index)
for factor in self.f2m:
# if factor not in valid_factors:
# continue
self._motifs[factor] = motif_df[self.f2m[factor]].mean(1)
def _load_prescanned_motifs(self, pfmscorefile):
"""
Use pre-scanned gimmemotifs motif scores.
Parameters
----------
pfmscorefile : str/file
pre-scanned gimmemotifs scores file
"""
logger.info("loading pre-scanned motif scores.")
motif_df = pd.read_table(pfmscorefile, comment="#", index_col=0)
self._motifs = pd.DataFrame(index=motif_df.index)
for factor in self.f2m:
# if factor not in valid_factors:
# continue
self._motifs[factor] = motif_df[self.f2m[factor]].mean(1)
def _load_reference_data(self):
"""Load data for reference regions.
Will load three types of data:
* Motif scores.
* The average peak coverage (self._avg)
* The distance from the peak to nearest TSS. (self._dist)
All of these data are only used with the reference set of regions.
"""
# Read motifs
logger.info("loading motifs for reference")
self._motifs = pd.read_feather(f"{self.data_dir}/reference.factor.feather")
self._motifs.set_index(self._motifs.columns[0], inplace=True)
# Read average coverage
logger.info("loading average peak coverage for reference")
self._avg = pd.read_table(
f"{self.data_dir}/reference.coverage.txt",
sep="\t",
comment="#",
index_col=0,
)
self._avg.columns = ["average"]
self._avg["average"] = self._avg["average"] / self._avg["average"].max()
# Read distance to TSS
logger.info("loading distance for reference")
self._dist = pd.read_table(
f"{self.data_dir}/reference.dist_to_tss.txt",
sep="\t",
comment="#",
index_col=0,
)
# Set regions
self.regions = self._avg.index
def _load_human_factors(self):
package_dir = os.path.dirname(ananse.__file__)
tf_xlsx = os.path.join(package_dir, "db", "lovering.tfs.xlsx")
valid_factors = pd.read_excel(
tf_xlsx,
engine="openpyxl",
sheet_name=1,
)
valid_factors = valid_factors.loc[
valid_factors["Pseudogene"].isnull(), "HGNC approved gene symbol"
].values
valid_factors = list(set(valid_factors) - set(["EP300"]))
return valid_factors
def set_species(self, genome):
try:
# Try to get taxonomy id for genomepy managed genome.
# If there is a taxonomy id, we can be really sure about the species.
# If genome doesn't have a tax_id, then it will be 'na' and
# fail to convert to int.
genome = Genome(genome)
tax_id = int(genome.tax_id)
if tax_id == 9606:
self.species = "human"
elif tax_id == 10090:
self.species = "mouse"
else:
# tax_id converts to int so it is valid, must be not human or mouse
self.species = None
return
except Exception:
pass
mapping = {
"hg38": "human",
"hg19": "human",
"GRCh3": "human",
"mm10": "mouse",
"mm9": "mouse",
"GRCm3": "mouse",
}
base_genome = os.path.basename(self.genome.strip("/"))
for name, species in mapping.items():
if name in base_genome:
self.species = species
return
self.species = None
def factors(self):
if self.species == "human":
valid_factors = self._load_human_factors()
return [f for f in self.f2m if f in valid_factors]
if self.species == "mouse":
# Mouse mappings are included in the default motif db.
# Using the fact here that mouse names are not all upper-case.
# TODO: replace with a curated set of factors.
return [f for f in self.f2m if f[1:].islower()]
return list(self.f2m.keys())
def _load_factor2motifs(self, pfmfile=None, indirect=True, factors=None):
motifs = read_motifs(pfmfile, as_dict=True)
f2m = {}
if self.species == "human":
valid_factors = self._load_human_factors()
for name, motif in motifs.items():
for factor in get_motif_factors(motif, indirect=indirect):
if factors is not None and factor not in factors:
continue
# TODO: this is temporary, while the motif database we use
# not very clean...
if self.species == "human":
factor = factor.upper()
if self.species == "human" and factor not in valid_factors:
continue
f2m.setdefault(factor, []).append(name)
return f2m
def _load_motifs(self, indirect=True, factors=None):
"""Load motif-associated data.
For now, only default motifs are supported.
Will read factors associated to motifs, and generates a graph of
related factors based on different factors binding to the same motif.
This information is used to select the most appropriate TF model.
Parameters
----------
indirect : bool, optional
Include TF-motif associations that are not curated, for instance
based on ChIP-seq motif prediction, or binding inference. This will
greatly increase TF coverage. By default True.
"""
if self.pfmfile is None:
logger.info("using default motif file")
else:
logger.debug(f"Motifs: {self.pfmfile}")
self.motifs = read_motifs(self.pfmfile, as_dict=True)
self.f2m = self._load_factor2motifs(
pfmfile=self.pfmfile, indirect=indirect, factors=factors
)
if len(self.f2m) == 1:
logger.info("using motifs for 1 factor")
else:
logger.info(f"using motifs for {len(self.f2m)} factors")
# Create a graph of TFs where edges are determined by the Jaccard index
# of the motifs that they bind to. For instance, when TF 1 binds motif
# A and B and TF 2 binds motif B and C, the edge weight will be 0.33.
tmp_f2m = {}
if self.pfmfile is not None:
logger.debug("reading default file")
tmp_f2m = self._load_factor2motifs(indirect=True)
for k, v in self.f2m.items():
if k in tmp_f2m:
tmp_f2m[k] += v
else:
tmp_f2m[k] = v
self.motif_graph = nx.Graph()
d = []
for f1 in tmp_f2m:
for f2 in tmp_f2m:
jaccard = len(set(tmp_f2m[f1]).intersection(set(tmp_f2m[f2]))) / len(
set(tmp_f2m[f1]).union(set(tmp_f2m[f2]))
)
d.append([f1, f2, jaccard])
if jaccard > 0:
self.motif_graph.add_edge(f1, f2, weight=1 - jaccard)
def _load_bams(self, bams, title, window=200):
tmp = pd.DataFrame(index=self.regions)
with NamedTemporaryFile(mode="w") as f_out:
for region in self.regions:
print("{}\t{}\t{}".format(*re.split("[:-]", region)), file=f_out)
f_out.flush()
for bam in bams:
result = load_heatmap_data(
f_out.name,
bam,
bins=1,
up=window // 2,
down=window // 2,
rmdup=True,
rmrepeats=True,
)
tmp[result[0]] = result[2].T[0]
fname = f"{self.data_dir}/{title}.qnorm.ref.txt.gz"
if os.path.exists(fname):
logger.debug(f"quantile normalization for {title}")
qnorm_ref = pd.read_table(fname, index_col=0)["qnorm_ref"].values
if len(self.regions) != len(qnorm_ref):
qnorm_ref = np.random.choice(
qnorm_ref, size=len(self.regions), replace=True
)
tmp = qnorm.quantile_normalize(tmp, target=qnorm_ref)
else:
tmp = np.log1p(tmp)
# Limit memory usage by using float16
tmp = tmp.mean(1).astype("float16").to_frame(title)
fname = f"{self.data_dir}/{title}.mean.ref.txt.gz"
if self.region_type == "reference" and os.path.exists(fname):
mean_ref = pd.read_table(fname, index_col=0)
if mean_ref.shape[0] == tmp.shape[0]:
mean_ref.index = tmp.index
tmp[f"{title}.relative"] = (
tmp[title] - mean_ref.loc[tmp.index]["mean_ref"].values
)
tmp[f"{title}.relative"] = scale(tmp[f"{title}.relative"])
else:
logger.debug(f"Regions of {fname} are not the same as input regions.")
logger.debug("Skipping calculation of relative values.")
tmp[title] = tmp[title] / tmp[title].max()
return tmp
def load_atac(self, bams, update_models=True):
"""Load ATAC-seq counts from BAM files.
Parameters
----------
bams : list
List of file names.
update_models : bool, optional
Update the model used if data is loaded, by default True.
"""
logger.info("loading ATAC data")
self._atac_data = self._load_bams(bams, title="ATAC", window=200)
if update_models:
self._set_model_type()
def load_histone(self, bams, update_models=True):
"""Load H3K27ac ChIP-seq counts from BAM files.
Parameters
----------
bams : list
List of file names.
update_models : bool, optional
Update the model used if data is loaded, by default True.
"""
logger.info("loading H3K27ac data")
self._histone_data = self._load_bams(bams, title="H3K27ac", window=2000)
if update_models:
self._set_model_type()
def _set_model_type(self):
"""Select the mode to use for binding prediction.
Basically, this will select the columns that are available,
based on the different types of data that are loaded.
Reference regions will have the most information.
"""
cols = ["motif"]
if self._atac_data is not None:
cols += ["ATAC"]
if self.region_type == "reference":
cols += ["ATAC.relative"]
if self._histone_data is not None:
cols += ["H3K27ac"]
if self.region_type == "reference":
cols += ["average", "dist"]
cols = sorted(cols)
self._X_columns = cols
self._model_type = "_".join(cols)
# Load models
logger.info("Loading models")
# print(os.path.join(self.data_dir, self._model_type))
for fname in glob(os.path.join(self.data_dir, self._model_type, "*.pkl")):
factor = fname.split("/")[-1].replace(".pkl", "")
self.factor_models[factor] = joblib.load(fname)
logger.info(f"{len(self.factor_models)} models found")
def predict_proba(self, factor=None, motifs=None):
"""Predict binding probability.
Predict binding probability for either a TF (factor) or a set of
motifs. Prediction will be based on the data that been loaded,
either ATAC-seq or H3K27ac data or both.
Parameters
----------
factor : str, optional
Transcription factor name.
motifs : [type], optional
Motifs. Currently not implemented.
Returns
-------
pandas.DataFrame
DataFrame with binding probabilities
"""
if factor is None and motifs is None:
raise ValueError("Need either a TF name or one or more motifs.")
if motifs is not None:
raise NotImplementedError("Custom motifs not yet implemented!")
if factor not in self.f2m:
raise ValueError(f"Motif not known for {factor}")
model, factor = self._load_model(factor)
X = self._load_data(factor)
proba = model.predict_proba(X)[:, 1]
return pd.DataFrame(proba, index=self.regions)
def _load_data(self, factor):
# if self.region_type == "reference":
# logger.debug("Reading motif data")
tmp = pd.DataFrame(
{factor: self._motifs[factor]}, index=self.regions
) # pd.read_table(os.path.join(self.data_dir, f"{factor}.motif.txt.gz"), index_col=0)
# else:
tmp.columns = ["motif"]
if self._atac_data is not None:
tmp = tmp.join(self._atac_data)
if self._histone_data is not None:
tmp = tmp.join(self._histone_data)
if self.region_type == "reference":
tmp = tmp.join(self._avg)
tmp = tmp.join(self._dist)
tmp = tmp.dropna()
# logger.debug(str(self._X_columns))
return tmp[self._X_columns]
def _load_model(self, factor):
model = None
if factor in self.factor_models:
logger.info(f"Using {factor} model")
model = self.factor_models[factor]
elif factor in self.motif_graph:
paths = {
p: v
for p, v in nx.single_source_dijkstra_path_length(
self.motif_graph, factor
).items()
if p in self.factor_models
}
try:
sub_factor = list(paths.keys())[0]
logger.info(f"Using {factor} motif with {sub_factor} model weights")
model = self.factor_models[sub_factor]
# factor = sub_factor
except Exception:
logger.info(f"No match for {factor} based on motifs")
if model is None:
logger.info(f"No related TF found for {factor}, using general model")
model = self.factor_models["general"]
return model, factor
def predict_factor_activity(self, nregions=20_000):
"""Predict TF activity.
Predicted based on motif activity using ridge regression.
Parameters
----------
"""
# Run ridge regression using motif score to predict (relative) ATAC/H3K27ac signal
try:
nregions = int(nregions)
except ValueError:
logger.warning("nregions is not an integer, using default number of 20_000")
nregions = 20_000
activity = pd.DataFrame()
for df in (self._atac_data, self._histone_data):
if df is None:
continue
for col in df.columns:
with NamedTemporaryFile() as f:
# float16 will give NaN's
signal = df[col].astype("float32")
signal = pd.DataFrame({col: scale(signal)}, index=df.index)
if df.shape[0] < nregions:
signal.to_csv(f.name, sep="\t")
else:
signal.sample(nregions).to_csv(f.name, sep="\t")
try:
activity = activity.join(
moap(
f.name,
genome=self.genome,
method="bayesianridge",
pfmfile=self.pfmfile,
),
how="outer",
)
except Exception as e:
print(e)
# Rank aggregation
for col in activity:
activity[col] = rankdata(activity[col])
activity = activity.mean(1)
activity[:] = minmax_scale(activity)
# Take the maximum activity from the motifs of each factor
factor_activity = []
for factor, motifs in self.f2m.items():
act = activity.loc[motifs].max()
factor_activity.append([factor, act])
factor_activity = pd.DataFrame(factor_activity, columns=["factor", "activity"])
return factor_activity
def _check_input_regions(regionfiles, genome, outdir=".", verbose=True, force=False):
# Load regions from BED or region text file
if regionfiles is None:
# Keep regions to None, use reference regions.
return
infile = regionfiles[0]
if len(regionfiles) > 1:
# merge files, assumed to be all BED
peak_width = 200
cbed = CombineBedFiles(genome=genome, peakfiles=regionfiles, verbose=verbose)
combined_bed = os.path.join(outdir, "regions_combined.bed")
cbed.run(outfile=combined_bed, width=peak_width, force=force)
infile = combined_bed
df = pd.read_table(infile, header=None, sep="\t", comment="#", dtype=str)
assert df.shape[0] > 2, "regions file must have more that 2 regions."
test = str(df.at[1, 0])
if bool(re.match(r"^.*:\d+-\d+$", test)):
# it's a regions list
# or it's a Seq2science counts table
regions = df.iloc[:, 0].tolist()
elif df.shape[1] >= 3:
# it's a BED file
regions = (
# For Ensembl genome names, make sure it's a string
df.iloc[:, 0].astype(str)
+ ":"
+ df.iloc[:, 1].astype(str)
+ "-"
+ df.iloc[:, 2].astype(str)
).tolist()
else:
raise TypeError("Cannot identify regions file(s) type.")
# remove the header, if any.
header = str(regions[0])
if not bool(re.match(r"^.*:\d+-\d+$", header)):
regions = regions[1:]
return regions
def _check_input_files(*args):
files = []
for arg in args:
if arg is None:
continue
if isinstance(arg, list):
files.extend(arg)
else:
files.append(arg)
all_files_found = True
for fname in files:
if not os.path.exists(fname):
logger.exception(f"Could not find {fname}!")
all_files_found = False
if not all_files_found:
exit(1)
def predict_peaks(
outdir,
atac_bams=None,
histone_bams=None,
regionfiles=None,
reference=None,
factors=None,
genome=None,
pfmfile=None,
pfmscorefile=None,
ncpus=4,
):
"""Predict binding in a set of genomic regions.
Binding is predicted based on ATAC-seq and/or H3K27ac ChIP-seq data in
combination with motif scores. The model that is used is flexible, based
on the input data. The most accurate model will be the one that uses the
references regions in combination with both ATAC-seq and H3K27ac ChIP-seq.
The result will will be saved to an outputfile called `binding.tsv` in the
output directory, specified by the `outdir` argument. This file wil contain
three columns: factor, enhancer and binding. The binding columns represents
the binding probability.
To predict binding, `predict_peaks()` needs a set of input regions. For
human, you have two options. You can either use the reference set of
putative enhancer regions, as described in the ANANSE manuscript [1]. This
is specified by the `reference` argument.
Alternatively, you can specify one or more region files with the
`regionfiles` argument. These are files in BED or narrowPeak format, that
describe potential enhancers. For instance, a reference enhancer set, peaks
from your ATAC-seq experiments or any other collection of regions. For
accurate motif analysis, these should be as precise as possible. BroadPeaks
from histone ChIP-seq are not really suitable. NarrowPeaks from ATAC-seq,
DNase-seq or TF ChIP-seq will be fine.
Parameters
----------
outdir : str
Name of output directory.
atac_bams : list, optional
List of BAM files, by default None
histone_bams : list, optional
List of H3K27ac ChIP-seq BAM files, by default None
regionfiles : list, optional
BED file or text file with regions, or a list of BED, narrowPeak or
broadPeak files If None, then the reference regions are used.
reference : str, optional
Directory name to a reference.
factors : list, optional
List of TF names or file with TFs, one per line. If None (default),
then all TFs are used.
genome : str, optional
Genome name. The default is hg38.
pfmfile : str, optional
Motifs in PFM format, with associated motif2factors.txt file.
pfmscorefile : str, optional
Path to file with pre-scanned motif scores.
ncpus : int, optional
Number of threads to use. Default is 4.
"""
if reference is None and regionfiles is None:
logger.error("Need either input regions or location of a reference set!")
logger.error(
"For human, you can download the REMAP reference here: https://doi.org/10.5281/zenodo.4768075 "
"(please see the docs on how to install this)."
)
logger.error(
"Otherwise you need to specify one or more BED or narrowPeak files"
)
logger.error(
"with potential enhancer regions, for instance, all ATAC-seq peaks"
)
logger.error("from your combined experiments.")
sys.exit(1)
if reference is not None and regionfiles is not None:
logger.error("Need either a reference location *or* or a set of input regions")
sys.exit(1)
# Check if all specified BAM files exist
_check_input_files(atac_bams, histone_bams)
# Read the factors, from a file if needed
factors = check_input_factors(factors)
# Check genome, will fail if it is not a correct genome name or file
Genome(genome)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
# If regions are specified, read them in, combining multiple files if
# necessary.
regions = _check_input_regions(regionfiles, genome, outdir=outdir)
if reference is None:
install_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
reference = os.path.join(install_dir, "db", "default_reference")
if reference is not None:
if not os.path.exists(reference):
logger.error(f"Reference directory {reference} does not exist!")
sys.exit(1)
p = PeakPredictor(
reference=reference,
atac_bams=atac_bams,
histone_bams=histone_bams,
regions=regions,
genome=genome,
pfmfile=pfmfile,
factors=factors,
pfmscorefile=pfmscorefile,
ncpus=ncpus,
)
outfile = os.path.join(outdir, "binding.h5")
# Make sure we create a new file
with open(outfile, "w"):
pass
with HDFStore(outfile, complib="lzo", complevel=9) as hdf:
if p._atac_data is not None:
hdf.put(key="_atac", value=p._atac_data, format="table")
if p._histone_data is not None:
hdf.put(key="_h3k27ac", value=p._histone_data, format="table")
logger.info("Predicting TF activity")
factor_activity = p.predict_factor_activity()
hdf.put(key="_factor_activity", value=factor_activity, format="table")
for factor in p.factors():
try:
proba = p.predict_proba(factor)
hdf.put(
key=f"{factor}",
value=proba.iloc[:, -1].reset_index(drop=True).astype(np.float16),
format="table",
)
except ValueError as e:
logger.debug(str(e))
hdf.put(key="_index", value=proba.index.to_series(), format="table")
| ANANSE-master | ananse/peakpredictor.py |
from ._version import get_versions
import os
import sys
from loguru import logger
# Remove default logger
logger.remove()
# Add logger
logger.add(sys.stderr, format="{time} | {level} | {message}", level="INFO")
# This is here to prevent very high memory usage on numpy import.
# On a machine with many cores, just importing numpy can result in up to
# 8GB of (virtual) memory. This wreaks havoc on management of the dask
# workers.
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
__version__ = get_versions()["version"]
del get_versions
| ANANSE-master | ananse/__init__.py |
#!/usr/bin/env python
# Copyright (c) 2009-2019 Quan Xu <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
"""Predict TF influence score"""
# Python imports
from __future__ import print_function
import sys
import warnings
from collections import namedtuple
from loguru import logger
from tqdm import tqdm
import numpy as np
import pandas as pd
import networkx as nx
import multiprocessing as mp
from sklearn.preprocessing import minmax_scale
from scipy.stats import rankdata, mannwhitneyu
from adjustText import adjust_text
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings("ignore")
# Here because of multiprocessing and pickling
Expression = namedtuple("Expression", ["score", "absfc", "realfc"])
def read_network(fname, edges=100000):
"""Read network file and return networkx DiGraph."""
G = nx.DiGraph()
rnet = pd.read_csv(fname, sep="\t")
nrnet = rnet.sort_values("prob", ascending=False)
if len(nrnet) < edges:
usenet = nrnet
else:
usenet = nrnet[:edges]
for vals in usenet.iterrows():
source, target = vals[1][0].split("_", 1)
try:
if len(vals[1]) > 1:
# weight = 1 - float(vals[1])
weight = float(vals[1][1])
# if weight < 0 or weight > 1:
# sys.stderr.write("expect weight between 0 and 1")
# sys.exit(1)
else:
weight = 0
G.add_edge(source, target, weight=weight, n=1)
except Exception:
sys.stderr.write("could not parse edge weight\n")
raise
return G
def difference(S, R):
"""Calculate the network different between two cell types."""
DIF = nx.create_empty_copy(R)
for (u, v, d) in S.edges(data=True):
if (u, v) not in R.edges:
DIF.add_edge(u, v, weight=d["weight"], n=1)
else:
diff_weight = S.edges[u, v]["weight"] - R.edges[u, v]["weight"]
if diff_weight > 0:
DIF.add_edge(
u, v, weight=diff_weight, n=1, neglogweight=-np.log(diff_weight)
)
return DIF
def read_expression(fname):
"""Read differential gene expression analysis output, return dictionary with namedtuples of scores, absolute fold
change and "real" (directional) fold change.
input:
a tab-separated file containing 3 columns (HGNC gene symbols, (adjusted) p-values and log2foldchange)
header is omitted if starting with "resid"
"""
expression_change = dict()
df = pd.read_table(
fname,
index_col=0,
header=0,
dtype={"resid": str, "log2FoldChange": float, "padj": float},
)
# absolute fold change
df["fc"] = df["log2FoldChange"].abs()
# get the gscore (absolute fold change if significanlty differential)
df["score"] = df["fc"] * (df["padj"] < 0.05)
for k, row in df.iterrows():
expression_change[row.name] = Expression(
score=row.score, absfc=row.fc, realfc=row.log2FoldChange
)
return expression_change
def targetScore(node, G, expression_change, max_degree=3):
"""Calculate the influence score."""
# debug only.
# todo
# if expression_change is None:
# expression_change = {"score": {}, "fc": {}}
total_score = 0
# Get the targets that are within a certain number of steps from TF
lengths, paths = nx.single_source_dijkstra(G, node, cutoff=max_degree - 1)
targets = [t for t in lengths if 0 < lengths[t] <= max_degree]
for target in paths:
all_paths = {}
# Calculate all paths from TF to target to select to path with the lowest total weight
for path in nx.all_simple_paths(G, node, target, cutoff=max_degree - 1):
if len(path) <= max_degree:
weight = np.cumprod(
[G[s][t]["weight"] for s, t in zip(path, path[1:])]
)[-1]
# Add weight, corrected for the length of the path
all_paths[tuple(path)] = weight / (len(path) - 1)
if len(all_paths) > 0:
path, weight = sorted(all_paths.items(), key=lambda p: p[1])[-1]
# print(target, path, weight)
# outdegree of parent node of the target
# d = np.log(G.out_degree(path[-2]) + 1)
# d = G.out_degree(path[-2])
# the level (or the number of steps) that gene is away from transcription factor
pathlen = len(path)
# expression score of the target
g = expression_change[target].score if target in expression_change else 0
# weight is cumulative product of probabilities
# weight = [G[s][t]["weight"] for s, t in zip(path[:-1], path[1:])]
# cumulative sum of weight
# weight = np.cumprod(weight)[-1]
# score = g / len(path) / d * weight
score = g / pathlen * weight
total_score += score
# Get Mann-Whitney U p-value of direct targets vs. non-direct targets
direct_targets = [n for n in G[node] if n in expression_change]
non_direct_targets = [
n for n in list(G.nodes) if n in expression_change and n not in direct_targets
]
target_fc = [expression_change[t].absfc for t in direct_targets]
non_target_fc = [expression_change[t].absfc for t in non_direct_targets]
pval = mannwhitneyu(target_fc, non_target_fc)[1]
target_fc_diff = np.mean(target_fc) - np.mean(non_target_fc)
# factor, targetScore, directTargets, totalTargets, Gscore, pval, target_fc
return (
node,
total_score,
G.out_degree(node),
len(targets),
expression_change[node].absfc if node in expression_change else 0,
pval,
target_fc_diff,
)
def filter_TF(scores_df, network=None, tpmfile=None, tpm=20, overlap=0.98):
"""Filter TFs:
1) it have high expression in origin cell type;
2) 98% of its target genes are also regulated by previous TFs.
"""
tpmscore = {}
with open(tpmfile) as tpf:
next(tpf)
for line in tpf:
tpmscore[line.split()[0]] = float(line.split()[1])
tftarget = {}
for tf in scores_df.index:
tftarget[tf] = set(network[tf]) if tf in network else set()
ltf = list(scores_df.index)
keeptf = []
for i in ltf:
passtf = []
if len(tftarget[i]) > 0:
for j in ltf[: ltf.index(i)]:
if len(tftarget[i] & tftarget[j]) / len(tftarget[i]) > overlap:
break
else:
passtf.append(j)
if passtf == ltf[: ltf.index(i)] and i in tpmscore and tpmscore[i] < tpm:
keeptf.append(i)
scores_df = scores_df.loc[keeptf]
scores_df.sort_values("sumScaled", inplace=True, ascending=False)
return scores_df
def plot_influscore(infile, outfile):
"""Plot TF influence score to expression."""
mogrify = pd.read_table(infile, index_col="factor")
mogrify = mogrify.dropna()
factors = list(mogrify.sort_values("sumScaled").tail(20).index)
# factors = list(mogrify.sort_values("sumScaled").tail(20).index)
xcol = "factor_fc"
plt.figure(figsize=(8, 6))
sns.regplot(
data=mogrify,
x=xcol,
y="sumScaled",
fit_reg=False,
scatter_kws={"s": mogrify["directTargets"] / 10, "alpha": 0.5},
)
x = mogrify.loc[factors, xcol]
y = mogrify.loc[factors, "sumScaled"]
texts = []
for s, xt, yt in zip(factors, x, y):
texts.append(plt.text(xt, yt, s))
adjust_text(texts, arrowprops=dict(arrowstyle="-", color="black"))
plt.xlabel("Log2 fold change of TF")
plt.ylabel("Influence score")
plt.savefig(outfile, dpi=300)
class Influence(object):
def __init__(
self, outfile, degenes, Gbf=None, Gaf=None, filter=False, edges=100000, ncore=1
):
self.ncore = ncore
logger.info(f"Reading network(s), using top {edges} edges.")
# Load GRNs
if Gbf is None and Gaf is not None:
self.G = read_network(Gaf, edges=edges)
logger.warning("You only provide the target network!")
elif Gaf is None and Gbf is not None:
self.G = read_network(Gbf, edges=edges)
logger.warning("You only provided the source network!")
elif Gaf is None and Gbf is None:
logger.warning("You should provide at least one ANANSE network file!")
else:
G1 = read_network(Gbf, edges=edges)
G2 = read_network(Gaf, edges=edges)
self.G = difference(G2, G1)
logger.info(f"Differential network has {len(self.G.edges)} edges.")
# Load expression file
self.expression_change = read_expression(degenes)
self.outfile = outfile
# Filter TFs
self.filter = filter
def save_reg_network(self, filename):
"""Save the network difference between two cell types to a file."""
with open(filename, "w") as nw:
for (u, v, d) in self.G.edges(data=True):
nw.write(u + "\t" + v + "\t" + str(d["weight"]) + "\n")
def run_target_score(self, max_degree=3):
"""Run target score for all TFs."""
pool = mp.Pool(self.ncore)
jobs = []
tfs = [node for node in self.G.nodes() if self.G.out_degree(node) > 0]
logger.info(f"Differential network contains {len(tfs)} transcription factors.")
# differentially expressed TFs
detfs = [tf for tf in tfs if tf in self.expression_change]
if len(detfs) == 0:
sys.stderr.write(
"no overlapping transcription factors found between the network file(s) "
"(-s/--source, -t/--target) and the differential expression data (-d/--degenes)\n"
)
sys.exit(1)
detfs = [tf for tf in detfs if self.expression_change[tf].realfc > 0]
if len(detfs) == 0:
sys.stderr.write(
"no differentially expressed TFs found with a log2 fold change above 0\n"
)
sys.exit(1)
for tf in detfs:
jobs.append(
pool.apply_async(
targetScore, (tf, self.G, self.expression_change, max_degree)
)
)
# Get results and write to file
influence_file = open(self.outfile, "w")
influence_file.write(
"factor\tdirectTargets\ttotalTargets\ttargetsore\tGscore\tfactor_fc\tpval\ttarget_fc\n"
)
with tqdm(total=len(jobs)) as pbar:
for j in jobs:
(
factor,
score,
direct_targets,
total_targets,
factor_fc,
pval,
target_fc,
) = j.get()
print(
factor,
direct_targets,
total_targets,
score,
self.expression_change[factor].score,
factor_fc,
pval,
target_fc,
file=influence_file,
sep="\t",
)
pbar.update(1)
print("\n", file=influence_file)
pool.close()
influence_file.close()
scores_df = pd.read_table(self.outfile, index_col=0)
scores_df["targetScaled"] = minmax_scale(
rankdata(scores_df["targetsore"], method="dense")
)
scores_df.sort_values("targetScaled", inplace=True, ascending=False)
return self.outfile
def run_influence_score(self, influence_file, fin_expression=None):
"""Calculate influence score from target score and gscore"""
scores_df = pd.read_table(influence_file, index_col=0)
scores_df["targetScaled"] = minmax_scale(
rankdata(scores_df["targetsore"], method="dense")
)
scores_df["GscoreScaled"] = minmax_scale(
rankdata(scores_df["Gscore"], method="dense")
)
scores_df["sumScaled"] = minmax_scale(
rankdata(scores_df.targetScaled + scores_df.GscoreScaled, method="dense")
)
scores_df.sort_values("sumScaled", inplace=True, ascending=False)
scores_df = scores_df[
[
"targetScaled",
"GscoreScaled",
"sumScaled",
"directTargets",
"targetsore",
"factor_fc",
]
]
scores_df.to_csv(self.outfile, sep="\t")
if self.filter:
scores_df2 = filter_TF(
network=self.G, scores_df=scores_df, tpmfile=fin_expression
)
scores_df2.to_csv(
".".join(self.outfile.split(".")[:-1]) + "_filtered.txt", sep="\t"
)
def run_influence(self, plot=True, fin_expression=None):
logger.info("Save differential network")
self.save_reg_network(
".".join(self.outfile.split(".")[:-1]) + "_diffnetwork.txt"
)
logger.info("Run target score")
influence_file = self.run_target_score()
logger.info("Run influence score")
self.run_influence_score(influence_file, fin_expression=fin_expression)
if plot is True:
logger.info("Plot results")
plot_influscore(
self.outfile, ".".join(self.outfile.split(".")[:-1]) + ".pdf"
)
| ANANSE-master | ananse/influence.py |
import os.path
import numpy as np
import pandas as pd
from scipy import stats
from ananse.utils import cleanpath
class Distributions:
def __init__(self):
# dist_functions = [f for f in dir(ananse.distributions) if f.endswith("_dist")]
dist_functions = [
scale_dist,
log_scale_dist,
scipy_dist,
peak_rank_dist,
peak_rank_file_dist,
]
self.functions = {func.__name__: func for func in dist_functions}
def get(self):
"""list distribution methods"""
return list(self.functions.keys())
def set(self, dist_func):
"""return a distribution method by name"""
dist_functions = self.get()
if dist_func not in dist_functions:
raise ValueError(
f"Distribution function '{dist_func}' not recognised. Options: {', '.join(dist_functions)}"
)
return self.functions[dist_func]
def scale_dist(scores, **kwargs): # noqa
"""
Scale the scores between 0 and 1
"""
return (scores - np.min(scores)) / (np.max(scores) - np.min(scores))
def log_scale_dist(scores, **kwargs): # noqa
"""
Scale the log of the scores between 0 and 1
"""
scores = np.log(scores + 1)
return (scores - np.min(scores)) / (np.max(scores) - np.min(scores))
def replace_infs(dist):
"""
Replace positive and negative infinity with the closes real value in the array
"""
# https://stackoverflow.com/questions/12937824/lognormal-random-numbers-centered-around-a-high-value
if not isinstance(dist, np.ndarray):
dist = np.array(dist)
min_real_val = np.nanmin(dist[dist != -np.inf])
dist[dist == -np.inf] = min_real_val
max_real_val = np.nanmax(dist[dist != np.inf])
dist[dist == np.inf] = max_real_val
return dist
def scipy_dist(scores, **kwargs):
"""
fit scores to a scipy.stats distribution.
specified distribution name via kwargs['dist']
"""
if not isinstance(scores, np.ndarray):
scores = np.array(scores)
scores = scores + 1 # add pseudocount
x = range(len(scores))
dist_name = kwargs.get("dist", "lognorm")
if dist_name not in dir(stats):
raise ValueError(f"'{dist_name}' is not a recognized scipy.stats model.")
distribution = getattr(stats, dist_name) # eval(f"stats.{dist_name}")
# fit dist to data
params = distribution.fit(scores)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF
dist = distribution.pdf(x, loc=loc, scale=scale, *arg)
dist = replace_infs(dist)
return dist
# def lognorm_dist(scores, **kwargs):
# """
# fit scores to a log normal distribution
# """
# scores = scores + 1 # add pseudocount
# x = range(len(scores))
#
# # mu = np.log(scores).mean()
# # sigma = np.log(scores).std()
# # dist = stats.lognorm([sigma], loc=mu).pdf(x)
#
# s, loc, scale = stats.lognorm.fit(scores) # floc=0
# dist = stats.lognorm.pdf(x=x, s=s, loc=loc, scale=scale)
# return dist
def peak_rank_dist(scores, **kwargs): # noqa
"""
Fit scores to a distribution similar to what the p300 model was trained on
"""
# use a lognormal distribution:
# https://github.com/jsh58/Genrich#p-value-calculation
# # peak_rank_file = "ananse/db/peak_rank.txt"
# # scores = pd.read_csv(peak_rank_file, header=None)[0]
# # mu = np.log(scores+1).mean()
# # sigma = np.log(scores+1).std()
# mu = 1.0500836750482117
# sigma = 0.8000981267240566
#
# x = len(scores)
# rng = np.random.default_rng(seed=None)
# dist = rng.lognormal(mean=mu, sigma=sigma, size=x)
#
# print("proximity to the initial distribtion")
# print("delta mu:", np.abs(mu - np.log(dist).mean()))
# print("delta std:", np.abs(sigma - np.log(dist).std()))
# best fitting distribution turns out to be this loglaplace
x = range(len(scores))
c = 0.92
loc = 1.00
scale = 1.14
dist = stats.loglaplace.pdf(x=x, c=c, loc=loc, scale=scale)
dist = replace_infs(dist)
return dist
def peak_rank_file_dist(scores, **kwargs):
"""
fit scores to the distribution in kwargs['file'].
builtin files: "peak_rank.txt" and "peak_rank_hg38_h3k27ac.txt"
"""
if not isinstance(scores, np.ndarray):
scores = np.array(scores)
dist_filename = kwargs.get("file", "peak_rank.txt")
# internal data or user data
if dist_filename in ["peak_rank.txt", "peak_rank_hg38_h3k27ac.txt"]:
package_dir = os.path.dirname(__file__)
dist_filepath = os.path.join(package_dir, "db", dist_filename)
else:
dist_filepath = cleanpath(dist_filename)
if not os.path.exists(dist_filepath):
raise FileNotFoundError(f"Could not find file {dist_filepath}")
dist = pd.read_csv(dist_filepath, header=None)
n = scores.shape[0]
max_n = dist.shape[0]
if max_n < n:
raise ValueError(
f"Too many regions ({n}) to fit to '{dist_filename}' ({max_n})"
)
dist = dist.sample(n=n, random_state=1)[0].tolist()
return dist
| ANANSE-master | ananse/distributions.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.