python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch
import torch.nn.functional as F
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from equiformer_pytorch import Equiformer
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
MAX_SEQ_LEN = 512
DEFAULT_TYPE = torch.float64
torch.set_default_dtype(DEFAULT_TYPE)
def cycle(loader, len_thres = MAX_SEQ_LEN):
while True:
for data in loader:
if data.seqs.shape[1] > len_thres:
continue
yield data
transformer = Equiformer(
num_tokens = 24,
dim = (16, 8, 8, 8),
dim_head = (16, 8, 8, 8),
heads = (4, 2, 2, 2),
depth = 10,
reversible = True,
attend_self = True,
reduce_dim_out = True,
num_neighbors = 6,
num_degrees = 4,
linear_out = True
).cuda()
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = BATCH_SIZE,
dynamic_batching = False
)
# Add gaussian noise to the coords
# Testing the refinement algorithm
dl = cycle(data['train'])
optim = Adam(transformer.parameters(), lr = 1e-4)
for _ in range(10000):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seqs, coords, masks = batch.seqs, batch.crds, batch.msks
seqs = seqs.cuda().argmax(dim = -1)
coords = coords.cuda().type(torch.get_default_dtype())
masks = masks.cuda().bool()
l = seqs.shape[1]
coords = rearrange(coords, 'b (l s) c -> b l s c', s = 14)
# Keeping only the backbone coordinates
coords = coords[:, :, 0:3, :]
coords = rearrange(coords, 'b l s c -> b (l s) c')
seq = repeat(seqs, 'b n -> b (n c)', c = 3)
masks = repeat(masks, 'b n -> b (n c)', c = 3)
noised_coords = coords + torch.randn_like(coords).cuda()
_, type1_out = transformer(
seq,
noised_coords,
mask = masks
)
denoised_coords = noised_coords + type1_out
loss = F.mse_loss(denoised_coords[masks], coords[masks])
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
| equiformer-pytorch-main | denoise.py |
import pytest
import torch
from equiformer_pytorch.equiformer_pytorch import Equiformer
from equiformer_pytorch.irr_repr import rot
from equiformer_pytorch.utils import (
torch_default_dtype,
cast_tuple,
to_order,
exists
)
# test output shape
@pytest.mark.parametrize('dim', [32])
def test_transformer(dim):
model = Equiformer(
dim = dim,
depth = 2,
num_degrees = 3,
init_out_zero = False
)
feats = torch.randn(1, 32, dim)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
type0, _ = model(feats, coors, mask)
assert type0.shape == (1, 32, dim), 'output must be of the right shape'
# test equivariance
@pytest.mark.parametrize('dim', [32, (4, 8, 16)])
@pytest.mark.parametrize('dim_in', [32, (32, 32)])
@pytest.mark.parametrize('l2_dist_attention', [True, False])
@pytest.mark.parametrize('reversible', [True, False])
def test_equivariance(
dim,
dim_in,
l2_dist_attention,
reversible
):
dim_in = cast_tuple(dim_in)
model = Equiformer(
dim = dim,
dim_in = dim_in,
input_degrees = len(dim_in),
depth = 2,
l2_dist_attention = l2_dist_attention,
reversible = reversible,
num_degrees = 3,
reduce_dim_out = True,
init_out_zero = False
)
feats = {deg: torch.randn(1, 32, dim, to_order(deg)) for deg, dim in enumerate(dim_in)}
type0, type1 = feats[0], feats.get(1, None)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(*torch.randn(3))
maybe_rotated_feats = {0: type0}
if exists(type1):
maybe_rotated_feats[1] = type1 @ R
_, out1 = model(maybe_rotated_feats, coors @ R, mask)
out2 = model(feats, coors, mask)[1] @ R
assert torch.allclose(out1, out2, atol = 1e-4), 'is not equivariant'
| equiformer-pytorch-main | tests/test_equivariance.py |
import pytest
import torch
from equiformer_pytorch.equiformer_pytorch import Equiformer
from equiformer_pytorch.irr_repr import rot
from equiformer_pytorch.utils import torch_default_dtype
# test equivariance with edges
@pytest.mark.parametrize('l2_dist_attention', [True, False])
@pytest.mark.parametrize('reversible', [True, False])
def test_edges_equivariance(
l2_dist_attention,
reversible
):
model = Equiformer(
num_tokens = 28,
dim = 64,
num_edge_tokens = 4,
edge_dim = 16,
depth = 2,
input_degrees = 1,
num_degrees = 3,
l2_dist_attention = l2_dist_attention,
reversible = reversible,
init_out_zero = False,
reduce_dim_out = True
)
atoms = torch.randint(0, 28, (2, 32))
bonds = torch.randint(0, 4, (2, 32, 32))
coors = torch.randn(2, 32, 3)
mask = torch.ones(2, 32).bool()
R = rot(*torch.randn(3))
_, out1 = model(atoms, coors @ R, mask, edges = bonds)
out2 = model(atoms, coors, mask, edges = bonds)[1] @ R
assert torch.allclose(out1, out2, atol = 1e-4), 'is not equivariant'
# test equivariance with adjacency matrix
@pytest.mark.parametrize('l2_dist_attention', [True, False])
@pytest.mark.parametrize('reversible', [True, False])
def test_adj_mat_equivariance(
l2_dist_attention,
reversible
):
model = Equiformer(
dim = 32,
heads = 8,
depth = 1,
dim_head = 64,
num_degrees = 2,
valid_radius = 10,
l2_dist_attention = l2_dist_attention,
reversible = reversible,
attend_sparse_neighbors = True,
num_neighbors = 0,
num_adj_degrees_embed = 2,
max_sparse_neighbors = 8,
init_out_zero = False,
reduce_dim_out = True
)
feats = torch.randn(1, 128, 32)
coors = torch.randn(1, 128, 3)
mask = torch.ones(1, 128).bool()
i = torch.arange(128)
adj_mat = (i[:, None] <= (i[None, :] + 1)) & (i[:, None] >= (i[None, :] - 1))
R = rot(*torch.randn(3))
_, out1 = model(feats, coors @ R, mask, adj_mat = adj_mat)
out2 = model(feats, coors, mask, adj_mat = adj_mat)[1] @ R
assert torch.allclose(out1, out2, atol = 1e-4), 'is not equivariant'
| equiformer-pytorch-main | tests/test_edges.py |
import os
from itertools import product
from collections import namedtuple
import torch
from einops import rearrange, repeat, reduce, einsum
from equiformer_pytorch.irr_repr import (
irr_repr,
rot_to_euler_angles
)
from equiformer_pytorch.utils import (
torch_default_dtype,
cache_dir,
exists,
default,
to_order,
identity,
l2norm,
slice_for_centering_y_to_x
)
# constants
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.equivariant_attention'))
CACHE_PATH = CACHE_PATH if not exists(os.environ.get('CLEAR_CACHE')) else None
# todo (figure out why this was hard coded in official repo)
RANDOM_ANGLES = torch.tensor([
[4.41301023, 5.56684102, 4.59384642],
[4.93325116, 6.12697327, 4.14574096],
[0.53878964, 4.09050444, 5.36539036],
[2.16017393, 3.48835314, 5.55174441],
[2.52385107, 0.2908958, 3.90040975]
], dtype = torch.float64)
# functions
def get_matrix_kernel(A, eps = 1e-10):
'''
Compute an orthonormal basis of the kernel (x_1, x_2, ...)
A x_i = 0
scalar_product(x_i, x_j) = delta_ij
:param A: matrix
:return: matrix where each row is a basis vector of the kernel of A
'''
A = rearrange(A, '... d -> (...) d')
_u, s, v = torch.svd(A)
kernel = v.t()[s < eps]
return kernel
def sylvester_submatrix(order_out, order_in, J, a, b, c):
''' generate Kronecker product matrix for solving the Sylvester equation in subspace J '''
angles = torch.stack((a, b, c), dim = -1)
R_tensor = get_R_tensor(order_out, order_in, a, b, c) # [m_out * m_in, m_out * m_in]
R_irrep_J = irr_repr(J, angles) # [m, m]
R_irrep_J_T = rearrange(R_irrep_J, '... m n -> ... n m')
R_tensor_identity = torch.eye(R_tensor.shape[-1])
R_irrep_J_identity = torch.eye(R_irrep_J.shape[-1])
return kron(R_tensor, R_irrep_J_identity) - kron(R_tensor_identity, R_irrep_J_T) # [(m_out * m_in) * m, (m_out * m_in) * m]
def kron(a, b):
"""
A part of the pylabyk library: numpytorch.py at https://github.com/yulkang/pylabyk
Kronecker product of matrices a and b with leading batch dimensions.
Batch dimensions are broadcast. The number of them mush
:type a: torch.Tensor
:type b: torch.Tensor
:rtype: torch.Tensor
"""
res = einsum(a, b, '... i j, ... k l -> ... i k j l')
return rearrange(res, '... i j k l -> ... (i j) (k l)')
def get_R_tensor(order_out, order_in, a, b, c):
angles = torch.stack((a, b, c), dim = -1)
return kron(irr_repr(order_out, angles), irr_repr(order_in, angles))
@cache_dir(CACHE_PATH)
@torch_default_dtype(torch.float64)
@torch.no_grad()
def basis_transformation_Q_J(J, order_in, order_out, random_angles = RANDOM_ANGLES):
"""
:param J: order of the spherical harmonics
:param order_in: order of the input representation
:param order_out: order of the output representation
:return: one part of the Q^-1 matrix of the article
"""
sylvester_submatrices = sylvester_submatrix(order_out, order_in, J, *random_angles.unbind(dim = -1))
null_space = get_matrix_kernel(sylvester_submatrices)
assert null_space.size(0) == 1, null_space.size() # unique subspace solution
Q_J = null_space[0] # [(m_out * m_in) * m]
Q_J = rearrange(
Q_J,
'(oi m) -> oi m',
m = to_order(J)
)
return Q_J.float() # [m_out * m_in, m]
@cache_dir(CACHE_PATH)
@torch_default_dtype(torch.float64)
@torch.no_grad()
def get_basis(max_degree):
"""
Return equivariant weight basis (basis)
assuming edges are aligned to z-axis
"""
basis = dict()
# Equivariant basis (dict['<d_in><d_out>'])
for d_in, d_out in product(range(max_degree+1), range(max_degree+1)):
K_Js = []
d_min = min(d_in, d_out)
m_in, m_out, m_min = map(to_order, (d_in, d_out, d_min))
slice_in, slice_out = map(lambda t: slice_for_centering_y_to_x(t, m_min), (m_in, m_out))
if d_min == 0:
continue
for J in range(abs(d_in - d_out), d_in + d_out + 1):
# Get spherical harmonic projection matrices
Q_J = basis_transformation_Q_J(J, d_in, d_out)
# aligning edges (r_ij) with z-axis leads to sparse spherical harmonics (ex. degree 1 [0., 1., 0.]) - thus plucking out only the mo index
# https://arxiv.org/abs/2206.14331
# equiformer v2 then normalizes the Y, to remove it altogether
mo_index = J
K_J = Q_J[..., mo_index]
K_J = rearrange(K_J, '... (o i) -> ... o i', o = m_out)
K_J = K_J[..., slice_out, slice_in]
K_J = reduce(K_J, 'o i -> i', 'sum') # the matrix is a sparse diagonal, but flipped depending on whether J is even or odd
K_Js.append(K_J)
K_Js = torch.stack(K_Js, dim = -1)
basis[f'({d_in},{d_out})'] = K_Js # (mi, mf)
return basis
# functions for rotating r_ij to z-axis
def rot_x_to_y_direction(x, y, eps = 1e-6):
'''
Rotates a vector x to the same direction as vector y
Taken from https://math.stackexchange.com/a/2672702
This formulation, although not the shortest path, has the benefit of rotation matrix being symmetric; rotating back to x upon two rotations
'''
n, dtype, device = x.shape[-1], x.dtype, x.device
I = torch.eye(n, device = device, dtype = dtype)
if torch.allclose(x, y, atol = 1e-6):
return I
x, y = x.double(), y.double()
x, y = map(l2norm, (x, y))
xy = rearrange(x + y, '... n -> ... n 1')
xy_t = rearrange(xy, '... n 1 -> ... 1 n')
R = 2 * (xy @ xy_t) / (xy_t @ xy).clamp(min = eps) - I
return R.type(dtype)
@torch.no_grad()
def get_D_to_from_z_axis(r_ij, max_degree):
device, dtype = r_ij.device, r_ij.dtype
D = dict()
# precompute D
# 1. compute rotation to [0., 1., 0.]
# 2. calculate the ZYZ euler angles from that rotation
# 3. calculate the D irreducible representation from 0 ... max_degree (technically 0 not needed)
z_axis = r_ij.new_tensor([0., 1., 0.])
R = rot_x_to_y_direction(r_ij, z_axis)
angles = rot_to_euler_angles(R)
for d in range(max_degree + 1):
if d == 0:
continue
D[d] = irr_repr(d, angles)
return D
| equiformer-pytorch-main | equiformer_pytorch/basis.py |
__version__ = '0.3.10'
| equiformer-pytorch-main | equiformer_pytorch/version.py |
import torch
from torch.nn import Module
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from beartype import beartype
from beartype.typing import List, Tuple
from einops import rearrange, reduce
from equiformer_pytorch.utils import to_order
# helpers
def map_values(fn, x):
out = {}
for (k, v) in x.items():
out[k] = fn(v)
return out
def dict_chunk(x, chunks, dim):
out1 = {}
out2 = {}
for (k, v) in x.items():
c1, c2 = v.chunk(chunks, dim = dim)
out1[k] = c1
out2[k] = c2
return out1, out2
def dict_sum(x, y):
out = {}
for k in x.keys():
out[k] = x[k] + y[k]
return out
def dict_subtract(x, y):
out = {}
for k in x.keys():
out[k] = x[k] - y[k]
return out
def dict_cat(x, y, dim):
out = {}
for k, v1 in x.items():
v2 = y[k]
out[k] = torch.cat((v1, v2), dim = dim)
return out
def dict_set_(x, key, value):
for k, v in x.items():
setattr(v, key, value)
def dict_backwards_(outputs, grad_tensors):
for k, v in outputs.items():
torch.autograd.backward(v, grad_tensors[k], retain_graph = True)
def dict_del_(x):
for k, v in x.items():
del v
del x
def values(d):
return [v for _, v in d.items()]
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, **kwargs):
training = self.training
x1, x2 = dict_chunk(x, 2, dim = -1)
y1, y2 = None, None
with torch.no_grad():
y1 = dict_sum(x1, self.f(x2, record_rng = training, **kwargs))
y2 = dict_sum(x2, self.g(y1, record_rng = training))
return dict_cat(y1, y2, dim = -1)
def backward_pass(self, y, dy, **kwargs):
y1, y2 = dict_chunk(y, 2, dim = -1)
dict_del_(y)
dy1, dy2 = dict_chunk(dy, 2, dim = -1)
dict_del_(dy)
with torch.enable_grad():
dict_set_(y1, 'requires_grad', True)
gy1 = self.g(y1, set_rng = True)
dict_backwards_(gy1, dy2)
with torch.no_grad():
x2 = dict_subtract(y2, gy1)
dict_del_(y2)
dict_del_(gy1)
dx1 = dict_sum(dy1, map_values(lambda t: t.grad, y1))
dict_del_(dy1)
dict_set_(y1, 'grad', None)
with torch.enable_grad():
dict_set_(x2, 'requires_grad', True)
fx2 = self.f(x2, set_rng = True, **kwargs)
dict_backwards_(fx2, dx1)
with torch.no_grad():
x1 = dict_subtract(y1, fx2)
dict_del_(y1)
dict_del_(fx2)
dx2 = dict_sum(dy2, map_values(lambda t: t.grad, x2))
dict_del_(dy2)
dict_set_(x2, 'grad', None)
x2 = map_values(lambda t: t.detach(), x2)
x = dict_cat(x1, x2, dim = -1)
dx = dict_cat(dx1, dx2, dim = -1)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
input_keys = kwargs.pop('input_keys')
split_dims = kwargs.pop('split_dims')
input_values = x.split(split_dims, dim = -1)
x = dict(zip(input_keys, input_values))
ctx.kwargs = kwargs
ctx.split_dims = split_dims
ctx.input_keys = input_keys
x = {k: rearrange(v, '... (d m) -> ... d m', m = to_order(k) * 2) for k, v in x.items()}
for block in blocks:
x = block(x, **kwargs)
ctx.y = map_values(lambda t: t.detach(), x)
ctx.blocks = blocks
x = map_values(lambda t: rearrange(t, '... d m -> ... (d m)'), x)
x = torch.cat(values(x), dim = -1)
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
input_keys = ctx.input_keys
split_dims = ctx.split_dims
dy = dy.split(split_dims, dim = -1)
dy = dict(zip(input_keys, dy))
dy = {k: rearrange(v, '... (d m) -> ... d m', m = to_order(k) * 2) for k, v in dy.items()}
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
dy = map_values(lambda t: rearrange(t, '... d m -> ... (d m)'), dy)
dy = torch.cat(values(dy), dim = -1)
return dy, None, None
# sequential
def residual_fn(x, residual):
out = {}
for degree, tensor in x.items():
out[degree] = tensor
if degree not in residual:
continue
if not any(t.requires_grad for t in (out[degree], residual[degree])):
out[degree] += residual[degree]
else:
out[degree] = out[degree] + residual[degree]
return out
class SequentialSequence(Module):
@beartype
def __init__(
self,
blocks: List[Tuple[Module, Module]]
):
super().__init__()
self.blocks = nn.ModuleList([nn.ModuleList([f, g]) for f, g in blocks])
def forward(self, x, **kwargs):
for attn, ff in self.blocks:
x = residual_fn(attn(x, **kwargs), x)
x = residual_fn(ff(x), x)
return x
# reversible
class ReversibleSequence(Module):
@beartype
def __init__(
self,
blocks: List[Tuple[Module, Module]]
):
super().__init__()
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for (f, g) in blocks])
def forward(self, x, **kwargs):
blocks = self.blocks
# merge into single tensor
x = map_values(lambda t: torch.cat((t, t), dim = -1), x)
x = map_values(lambda t: rearrange(t, '... d m -> ... (d m)'), x)
input_keys = x.keys()
split_dims = tuple(map(lambda t: t.shape[-1], x.values()))
block_kwargs = {'input_keys': input_keys, 'split_dims': split_dims, **kwargs}
x = torch.cat(values(x), dim = -1)
# reversible function, tailored for equivariant network
x = _ReversibleFunction.apply(x, blocks, block_kwargs)
# reconstitute
x = dict(zip(input_keys, x.split(split_dims, dim = -1)))
x = {k: reduce(v, '... (d r m) -> ... d m', 'mean', r = 2, m = to_order(k)) for k, v in x.items()}
return x
| equiformer-pytorch-main | equiformer_pytorch/reversible.py |
from equiformer_pytorch.equiformer_pytorch import Equiformer
| equiformer-pytorch-main | equiformer_pytorch/__init__.py |
from math import sqrt
from functools import partial
from itertools import product
from collections import namedtuple
from beartype.typing import Optional, Union, Tuple, Dict
from beartype import beartype
import torch
from torch import nn, is_tensor, Tensor
import torch.nn.functional as F
from opt_einsum import contract as opt_einsum
from equiformer_pytorch.basis import (
get_basis,
get_D_to_from_z_axis
)
from equiformer_pytorch.reversible import (
SequentialSequence,
ReversibleSequence
)
from equiformer_pytorch.utils import (
exists,
default,
batched_index_select,
masked_mean,
to_order,
cast_tuple,
safe_cat,
fast_split,
slice_for_centering_y_to_x,
pad_for_centering_y_to_x
)
from einops import rearrange, repeat, reduce, einsum, pack, unpack
from einops.layers.torch import Rearrange
# constants
Return = namedtuple('Return', ['type0', 'type1'])
EdgeInfo = namedtuple('EdgeInfo', ['neighbor_indices', 'neighbor_mask', 'edges'])
# helpers
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
# fiber functions
@beartype
def fiber_product(
fiber_in: Tuple[int, ...],
fiber_out: Tuple[int, ...]
):
fiber_in, fiber_out = tuple(map(lambda t: [(degree, dim) for degree, dim in enumerate(t)], (fiber_in, fiber_out)))
return product(fiber_in, fiber_out)
@beartype
def fiber_and(
fiber_in: Tuple[int, ...],
fiber_out: Tuple[int, ...]
):
fiber_in = [(degree, dim) for degree, dim in enumerate(fiber_in)]
fiber_out_degrees = set(range(len(fiber_out)))
out = []
for degree, dim in fiber_in:
if degree not in fiber_out_degrees:
continue
dim_out = fiber_out[degree]
out.append((degree, dim, dim_out))
return out
# helper functions
def split_num_into_groups(num, groups):
num_per_group = (num + groups - 1) // groups
remainder = num % groups
if remainder == 0:
return (num_per_group,) * groups
return (*((num_per_group,) * remainder), *((((num_per_group - 1),) * (groups - remainder))))
def get_tensor_device_and_dtype(features):
_, first_tensor = next(iter(features.items()))
return first_tensor.device, first_tensor.dtype
def residual_fn(x, residual):
out = {}
for degree, tensor in x.items():
out[degree] = tensor
if degree not in residual:
continue
if not any(t.requires_grad for t in (out[degree], residual[degree])):
out[degree] += residual[degree]
else:
out[degree] = out[degree] + residual[degree]
return out
def tuple_set_at_index(tup, index, value):
l = list(tup)
l[index] = value
return tuple(l)
def feature_shapes(feature):
return tuple(v.shape for v in feature.values())
def feature_fiber(feature):
return tuple(v.shape[-2] for v in feature.values())
def cdist(a, b, dim = -1, eps = 1e-5):
a = a.expand_as(b)
a, _ = pack_one(a, '* c')
b, ps = pack_one(b, '* c')
dist = F.pairwise_distance(a, b, p = 2)
dist = unpack_one(dist, ps, '*')
return dist
# classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
y = self.fn(x, **kwargs)
if not y.requires_grad and not x.requires_grad:
return x.add_(y)
return x + y
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
class Linear(nn.Module):
@beartype
def __init__(
self,
fiber_in: Tuple[int, ...],
fiber_out: Tuple[int, ...]
):
super().__init__()
self.weights = nn.ParameterList([])
self.degrees = []
for (degree, dim_in, dim_out) in fiber_and(fiber_in, fiber_out):
self.weights.append(nn.Parameter(torch.randn(dim_in, dim_out) / sqrt(dim_in)))
self.degrees.append(degree)
def init_zero_(self):
for weight in self.weights:
weight.data.zero_()
def forward(self, x):
out = {}
for degree, weight in zip(self.degrees, self.weights):
out[degree] = einsum(x[degree], weight, '... d m, d e -> ... e m')
return out
class Norm(nn.Module):
@beartype
def __init__(
self,
fiber: Tuple[int, ...],
eps = 1e-12,
):
"""
deviates from the paper slightly, will use rmsnorm throughout (no mean centering or bias, even for type0 fatures)
this has been proven at scale for a number of models, including T5 and alphacode
"""
super().__init__()
self.eps = eps
self.transforms = nn.ParameterList([])
for degree, dim in enumerate(fiber):
self.transforms.append(nn.Parameter(torch.ones(dim, 1)))
def forward(self, features):
output = {}
for scale, (degree, t) in zip(self.transforms, features.items()):
dim = t.shape[-2]
l2normed = t.norm(dim = -1, keepdim = True)
rms = l2normed.norm(dim = -2, keepdim = True) * (dim ** -0.5)
output[degree] = t / rms.clamp(min = self.eps) * scale
return output
class Gate(nn.Module):
@beartype
def __init__(
self,
fiber: Tuple[int, ...]
):
super().__init__()
type0_dim = fiber[0]
dim_gate = sum(fiber[1:])
assert type0_dim > dim_gate, 'sum of channels from rest of the degrees must be less than the channels in type 0, as they would be used up for gating and subtracted out'
self.fiber = fiber
self.num_degrees = len(fiber)
self.type0_dim_split = [*fiber[1:], type0_dim - dim_gate]
def forward(self, x):
output = {}
type0_tensor = x[0]
*gates, type0_tensor = type0_tensor.split(self.type0_dim_split, dim = -2)
# silu for type 0
output = {0: F.silu(type0_tensor)}
# sigmoid gate the higher types
for degree, gate in zip(range(1, self.num_degrees), gates):
output[degree] = x[degree] * gate.sigmoid()
return output
class DTP(nn.Module):
""" 'Tensor Product' - in the equivariant sense """
@beartype
def __init__(
self,
fiber_in: Tuple[int, ...],
fiber_out: Tuple[int, ...],
self_interaction = True,
project_xi_xj = True, # whether to project xi and xj and then sum, as in paper
project_out = True, # whether to do a project out after the "tensor product"
pool = True,
edge_dim = 0,
radial_hidden_dim = 16
):
super().__init__()
self.fiber_in = fiber_in
self.fiber_out = fiber_out
self.edge_dim = edge_dim
self.self_interaction = self_interaction
self.pool = pool
self.project_xi_xj = project_xi_xj
if project_xi_xj:
self.to_xi = Linear(fiber_in, fiber_in)
self.to_xj = Linear(fiber_in, fiber_in)
self.kernel_unary = nn.ModuleDict()
# in the depthwise tensor product, each channel of the output only gets contribution from one degree of the input (please email me if i misconstrued this)
for degree_out, dim_out in enumerate(self.fiber_out):
num_degrees_in = len(self.fiber_in)
split_dim_out = split_num_into_groups(dim_out, num_degrees_in) # returns a tuple of ints representing how many channels come from each input degree
for degree_in, (dim_in, dim_out_from_degree_in) in enumerate(zip(self.fiber_in, split_dim_out)):
degree_min = min(degree_out, degree_in)
self.kernel_unary[f'({degree_in},{degree_out})'] = Radial(degree_in, dim_in, degree_out, dim_out_from_degree_in, radial_hidden_dim = radial_hidden_dim, edge_dim = edge_dim)
# whether a single token is self-interacting
if self_interaction:
self.self_interact = Linear(fiber_in, fiber_out)
self.project_out = project_out
if project_out:
self.to_out = Linear(fiber_out, fiber_out)
@beartype
def forward(
self,
inp,
basis,
D,
edge_info: EdgeInfo,
rel_dist = None,
):
neighbor_indices, neighbor_masks, edges = edge_info
kernels = {}
outputs = {}
# neighbors
if self.project_xi_xj:
source, target = self.to_xi(inp), self.to_xj(inp)
else:
source, target = inp, inp
# go through every permutation of input degree type to output degree type
for degree_out, _ in enumerate(self.fiber_out):
output = None
m_out = to_order(degree_out)
for degree_in, _ in enumerate(self.fiber_in):
etype = f'({degree_in},{degree_out})'
m_in = to_order(degree_in)
m_min = min(m_in, m_out)
degree_min = min(degree_in, degree_out)
# get source and target (neighbor) representations
xi, xj = source[degree_in], target[degree_in]
x = batched_index_select(xj, neighbor_indices, dim = 1)
if self.project_xi_xj:
xi = rearrange(xi, 'b i d m -> b i 1 d m')
x = x + xi
# multiply by D(R) - rotate to z-axis
if degree_in > 0:
Di = D[degree_in]
x = einsum(Di, x, '... mi1 mi2, ... li mi1 -> ... li mi2')
# remove some 0s if degree_in != degree_out
maybe_input_slice = slice_for_centering_y_to_x(m_in, m_min)
maybe_output_pad = pad_for_centering_y_to_x(m_out, m_min)
x = x[..., maybe_input_slice]
# process input, edges, and basis in chunks along the sequence dimension
kernel_fn = self.kernel_unary[etype]
edge_features = safe_cat(edges, rel_dist, dim = -1)
B = basis.get(etype, None)
R = kernel_fn(edge_features)
# mo depends only on mi (or other way around), removing yet another dimension
if not exists(B): # degree_in or degree_out is 0
output_chunk = einsum(R, x, '... lo li, ... li mi -> ... lo mi')
else:
y = x.clone()
x = repeat(x, '... mi -> ... mi mf r', mf = (B.shape[-1] + 1) // 2, r = 2) # mf + 1, so that mf can be divided in 2
x, x_to_flip = x.unbind(dim = -1)
x_flipped = torch.flip(x_to_flip, dims = (-2,)) # flip on the mi axis, as the basis alternates between diagonal and flipped diagonal across mf
x = torch.stack((x, x_flipped), dim = -1)
x = rearrange(x, '... mf r -> ... (mf r)', r = 2)
x = x[..., :-1]
output_chunk = opt_einsum('... o i, m f, ... i m f -> ... o m', R, B, x)
# in the case that degree_out < degree_in
output_chunk = F.pad(output_chunk, (maybe_output_pad, maybe_output_pad), value = 0.)
output = safe_cat(output, output_chunk, dim = -2)
# multiply by D(R^-1) - rotate back from z-axis
if degree_out > 0:
Do = D[degree_out]
output = einsum(output, Do, '... lo mo1, ... mo2 mo1 -> ... lo mo2')
# pool or not along j (neighbors) dimension
if self.pool:
output = masked_mean(output, neighbor_masks, dim = 2)
outputs[degree_out] = output
if not self.self_interaction and not self.project_out:
return outputs
if self.project_out:
outputs = self.to_out(outputs)
self_interact_out = self.self_interact(inp)
if self.pool:
return residual_fn(outputs, self_interact_out)
self_interact_out = {k: rearrange(v, '... d m -> ... 1 d m') for k, v in self_interact_out.items()}
outputs = {degree: torch.cat(tensors, dim = -3) for degree, tensors in enumerate(zip(self_interact_out.values(), outputs.values()))}
return outputs
class Radial(nn.Module):
def __init__(
self,
degree_in,
nc_in,
degree_out,
nc_out,
edge_dim = 0,
radial_hidden_dim = 64
):
super().__init__()
self.degree_in = degree_in
self.degree_out = degree_out
self.nc_in = nc_in
self.nc_out = nc_out
self.d_out = to_order(degree_out)
self.edge_dim = edge_dim
mid_dim = radial_hidden_dim
edge_dim = default(edge_dim, 0)
self.rp = nn.Sequential(
nn.Linear(edge_dim + 1, mid_dim),
nn.SiLU(),
LayerNorm(mid_dim),
nn.Linear(mid_dim, mid_dim),
nn.SiLU(),
LayerNorm(mid_dim),
nn.Linear(mid_dim, nc_in * nc_out),
Rearrange('... (lo li) -> ... lo li', li = nc_in, lo = nc_out)
)
def forward(self, feat):
return self.rp(feat)
# feed forwards
class FeedForward(nn.Module):
@beartype
def __init__(
self,
fiber: Tuple[int, ...],
fiber_out: Optional[Tuple[int, ...]] = None,
mult = 4,
include_htype_norms = True,
init_out_zero = True
):
super().__init__()
self.fiber = fiber
fiber_hidden = tuple(dim * mult for dim in fiber)
project_in_fiber = fiber
project_in_fiber_hidden = tuple_set_at_index(fiber_hidden, 0, sum(fiber_hidden))
self.include_htype_norms = include_htype_norms
if include_htype_norms:
project_in_fiber = tuple_set_at_index(project_in_fiber, 0, sum(fiber))
fiber_out = default(fiber_out, fiber)
self.prenorm = Norm(fiber)
self.project_in = Linear(project_in_fiber, project_in_fiber_hidden)
self.gate = Gate(project_in_fiber_hidden)
self.project_out = Linear(fiber_hidden, fiber_out)
if init_out_zero:
self.project_out.init_zero_()
def forward(self, features):
outputs = self.prenorm(features)
if self.include_htype_norms:
type0, *htypes = [*outputs.values()]
htypes = map(lambda t: t.norm(dim = -1, keepdim = True), htypes)
type0 = torch.cat((type0, *htypes), dim = -2)
outputs[0] = type0
outputs = self.project_in(outputs)
outputs = self.gate(outputs)
outputs = self.project_out(outputs)
return outputs
# global linear attention
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.heads = heads
dim_inner = dim_head * heads
self.to_qkv = nn.Linear(dim, dim_inner * 3)
def forward(self, x, mask = None):
has_degree_m_dim = x.ndim == 4
if has_degree_m_dim:
x = rearrange(x, '... 1 -> ...')
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
if exists(mask):
mask = rearrange(mask, 'b n -> b 1 n 1')
k = k.masked_fill(~mask, -torch.finfo(q.dtype).max)
v = v.masked_fill(~mask, 0.)
k = k.softmax(dim = -2)
q = q.softmax(dim = -1)
kv = einsum(k, v, 'b h n d, b h n e -> b h d e')
out = einsum(kv, q, 'b h d e, b h n d -> b h n e')
out = rearrange(out, 'b h n d -> b n (h d)')
if has_degree_m_dim:
out = rearrange(out, '... -> ... 1')
return out
# attention
class L2DistAttention(nn.Module):
@beartype
def __init__(
self,
fiber: Tuple[int, ...],
dim_head: Union[int, Tuple[int, ...]] = 64,
heads: Union[int, Tuple[int, ...]] = 8,
attend_self = False,
edge_dim = None,
single_headed_kv = False,
radial_hidden_dim = 64,
splits = 4,
num_linear_attn_heads = 0,
init_out_zero = True
):
super().__init__()
num_degrees = len(fiber)
dim_head = cast_tuple(dim_head, num_degrees)
assert len(dim_head) == num_degrees
heads = cast_tuple(heads, num_degrees)
assert len(heads) == num_degrees
hidden_fiber = tuple(dim * head for dim, head in zip(dim_head, heads))
self.single_headed_kv = single_headed_kv
self.attend_self = attend_self
kv_hidden_fiber = hidden_fiber if not single_headed_kv else dim_head
kv_hidden_fiber = tuple(dim * 2 for dim in kv_hidden_fiber)
self.scale = tuple(dim ** -0.5 for dim in dim_head)
self.heads = heads
self.prenorm = Norm(fiber)
self.to_q = Linear(fiber, hidden_fiber)
self.to_kv = DTP(fiber, kv_hidden_fiber, radial_hidden_dim = radial_hidden_dim, edge_dim = edge_dim, pool = False, self_interaction = attend_self)
# linear attention heads
self.has_linear_attn = num_linear_attn_heads > 0
if self.has_linear_attn:
degree_zero_dim = fiber[0]
self.linear_attn = LinearAttention(degree_zero_dim, dim_head = dim_head[0], heads = num_linear_attn_heads)
hidden_fiber = tuple_set_at_index(hidden_fiber, 0, hidden_fiber[0] + dim_head[0] * num_linear_attn_heads)
self.to_out = Linear(hidden_fiber, fiber)
if init_out_zero:
self.to_out.init_zero_()
@beartype
def forward(
self,
features,
edge_info: EdgeInfo,
rel_dist,
basis,
D,
mask = None
):
one_head_kv = self.single_headed_kv
device, dtype = get_tensor_device_and_dtype(features)
neighbor_indices, neighbor_mask, edges = edge_info
if exists(neighbor_mask):
neighbor_mask = rearrange(neighbor_mask, 'b i j -> b 1 i j')
if self.attend_self:
neighbor_mask = F.pad(neighbor_mask, (1, 0), value = True)
features = self.prenorm(features)
queries = self.to_q(features)
keyvalues = self.to_kv(
features,
edge_info = edge_info,
rel_dist = rel_dist,
basis = basis,
D = D
)
kv_einsum_eq = 'b h i j d m' if not one_head_kv else 'b i j d m'
outputs = {}
for degree, h, scale in zip(features.keys(), self.heads, self.scale):
is_degree_zero = degree == 0
q, kv = map(lambda t: t[degree], (queries, keyvalues))
q = rearrange(q, 'b i (h d) m -> b h i d m', h = h)
if not one_head_kv:
kv = rearrange(kv, f'b i j (h d) m -> b h i j d m', h = h)
k, v = kv.chunk(2, dim = -2)
if one_head_kv:
k = repeat(k, 'b i j d m -> b h i j d m', h = h)
q = repeat(q, 'b h i d m -> b h i j d m', j = k.shape[-3])
if is_degree_zero:
q, k = map(lambda t: rearrange(t, '... 1 -> ...'), (q, k))
sim = -cdist(q, k) * scale
if not is_degree_zero:
sim = sim.sum(dim = -1)
sim = sim.masked_fill(~neighbor_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum(attn, v, f'b h i j, {kv_einsum_eq} -> b h i d m')
outputs[degree] = rearrange(out, 'b h n d m -> b n (h d) m')
if self.has_linear_attn:
lin_attn_out = self.linear_attn(features[0], mask = mask)
outputs[0] = torch.cat((outputs[0], lin_attn_out), dim = -2)
return self.to_out(outputs)
class MLPAttention(nn.Module):
@beartype
def __init__(
self,
fiber: Tuple[int, ...],
dim_head: Union[int, Tuple[int, ...]] = 64,
heads: Union[int, Tuple[int, ...]] = 8,
attend_self = False,
edge_dim = None,
splits = 4,
single_headed_kv = False,
attn_leakyrelu_slope = 0.1,
attn_hidden_dim_mult = 4,
radial_hidden_dim = 16,
num_linear_attn_heads = 0,
init_out_zero = True,
**kwargs
):
super().__init__()
num_degrees = len(fiber)
dim_head = cast_tuple(dim_head, num_degrees)
assert len(dim_head) == num_degrees
heads = cast_tuple(heads, num_degrees)
assert len(heads) == num_degrees
hidden_fiber = tuple(dim * head for dim, head in zip(dim_head, heads))
self.single_headed_kv = single_headed_kv
value_hidden_fiber = hidden_fiber if not single_headed_kv else dim_head
self.attend_self = attend_self
self.scale = tuple(dim ** -0.5 for dim in dim_head)
self.heads = heads
self.prenorm = Norm(fiber)
# type 0 needs greater dimension, for
# (1) gating the htypes on the values branch
# (2) attention logits, with dimension equal to heads amount for starters
type0_dim = value_hidden_fiber[0]
htype_dims = sum(value_hidden_fiber[1:])
value_gate_fiber = tuple_set_at_index(value_hidden_fiber, 0, type0_dim + htype_dims)
attn_hidden_dims = tuple(head * attn_hidden_dim_mult for head in heads)
intermediate_fiber = tuple_set_at_index(value_hidden_fiber, 0, sum(attn_hidden_dims) + type0_dim + htype_dims)
self.intermediate_type0_split = [*attn_hidden_dims, type0_dim + htype_dims]
# main branch tensor product
self.to_attn_and_v = DTP(fiber, intermediate_fiber, radial_hidden_dim = radial_hidden_dim, edge_dim = edge_dim, pool = False, self_interaction = attend_self)
# non-linear projection of attention branch into the attention logits
self.to_attn_logits = nn.ModuleList([
nn.Sequential(
nn.LeakyReLU(attn_leakyrelu_slope),
nn.Linear(attn_hidden_dim, h, bias = False)
) for attn_hidden_dim, h in zip(attn_hidden_dims, self.heads)
])
# non-linear transform of the value branch
# todo - needs a DTP here?
self.to_values = nn.Sequential(
Gate(value_gate_fiber),
Linear(value_hidden_fiber, value_hidden_fiber)
)
# linear attention heads
self.has_linear_attn = num_linear_attn_heads > 0
if self.has_linear_attn:
degree_zero_dim = fiber[0]
self.linear_attn = LinearAttention(degree_zero_dim, dim_head = dim_head[0], heads = num_linear_attn_heads)
hidden_fiber = tuple_set_at_index(hidden_fiber, 0, hidden_fiber[0] + dim_head[0] * num_linear_attn_heads)
# combining heads and projection out
self.to_out = Linear(hidden_fiber, fiber)
if init_out_zero:
self.to_out.init_zero_()
@beartype
def forward(
self,
features,
edge_info: EdgeInfo,
rel_dist,
basis,
D,
mask = None
):
one_headed_kv = self.single_headed_kv
_, neighbor_mask, _ = edge_info
if exists(neighbor_mask):
if self.attend_self:
neighbor_mask = F.pad(neighbor_mask, (1, 0), value = True)
neighbor_mask = rearrange(neighbor_mask, '... -> ... 1')
features = self.prenorm(features)
intermediate = self.to_attn_and_v(
features,
edge_info = edge_info,
rel_dist = rel_dist,
basis = basis,
D = D
)
*attn_branch_type0, value_branch_type0 = intermediate[0].split(self.intermediate_type0_split, dim = -2)
intermediate[0] = value_branch_type0
# process the attention branch
attentions = []
for fn, attn_intermediate, scale in zip(self.to_attn_logits, attn_branch_type0, self.scale):
attn_intermediate = rearrange(attn_intermediate, '... 1 -> ...')
attn_logits = fn(attn_intermediate)
attn_logits = attn_logits * scale
if exists(neighbor_mask):
attn_logits = attn_logits.masked_fill(~neighbor_mask, -torch.finfo(attn_logits.dtype).max)
attn = attn_logits.softmax(dim = -2) # (batch, source, target, heads)
attentions.append(attn)
# process values branch
values = self.to_values(intermediate)
# aggregate values with attention matrix
outputs = {}
value_einsum_eq = 'b i j h d m' if not one_headed_kv else 'b i j d m'
for degree, (attn, value, h) in enumerate(zip(attentions, values.values(), self.heads)):
if not one_headed_kv:
value = rearrange(value, 'b i j (h d) m -> b i j h d m', h = h)
out = einsum(attn, value, f'b i j h, {value_einsum_eq} -> b i h d m')
out = rearrange(out, 'b i h d m -> b i (h d) m')
outputs[degree] = out
# linear attention
if self.has_linear_attn:
lin_attn_out = self.linear_attn(features[0], mask = mask)
outputs[0] = torch.cat((outputs[0], lin_attn_out), dim = -2)
# combine heads out
return self.to_out(outputs)
# main class
class Equiformer(nn.Module):
@beartype
def __init__(
self,
*,
dim: Union[int, Tuple[int, ...]],
dim_in: Optional[Union[int, Tuple[int, ...]]] = None,
num_degrees = 2,
input_degrees = 1,
heads: Union[int, Tuple[int, ...]] = 8,
dim_head: Union[int, Tuple[int, ...]] = 24,
depth = 2,
valid_radius = 1e5,
num_neighbors = float('inf'),
reduce_dim_out = False,
radial_hidden_dim = 64,
num_tokens = None,
num_positions = None,
num_edge_tokens = None,
edge_dim = None,
attend_self = True,
splits = 4,
linear_out = True,
embedding_grad_frac = 0.5,
single_headed_kv = False, # whether to do single headed key/values for dot product attention, to save on memory and compute
ff_include_htype_norms = False, # whether for type0 projection to also involve norms of all higher types, in feedforward first projection. this allows for all higher types to be gated by other type norms
l2_dist_attention = True, # turn to False to use MLP attention as proposed in paper, but dot product attention with -cdist similarity is still far better, and i haven't even rotated distances (rotary embeddings) into the type 0 features yet
reversible = False, # turns on reversible networks, to scale depth without incurring depth times memory cost
attend_sparse_neighbors = False, # ability to accept an adjacency matrix
num_adj_degrees_embed = None,
adj_dim = 0,
max_sparse_neighbors = float('inf'),
**kwargs
):
super().__init__()
self.embedding_grad_frac = embedding_grad_frac # trick for more stable training
# decide hidden dimensions for all types
self.dim = cast_tuple(dim, num_degrees)
assert len(self.dim) == num_degrees
self.num_degrees = len(self.dim)
# decide input dimensions for all types
dim_in = default(dim_in, (self.dim[0],))
self.dim_in = cast_tuple(dim_in, input_degrees)
assert len(self.dim_in) == input_degrees
self.input_degrees = len(self.dim_in)
# token embedding
type0_feat_dim = self.dim_in[0]
self.type0_feat_dim = type0_feat_dim
self.token_emb = nn.Embedding(num_tokens, type0_feat_dim) if exists(num_tokens) else None
# positional embedding
self.num_positions = num_positions
self.pos_emb = nn.Embedding(num_positions, type0_feat_dim) if exists(num_positions) else None
# init embeddings
if exists(self.token_emb):
nn.init.normal_(self.token_emb.weight, std = 1e-2)
if exists(self.pos_emb):
nn.init.normal_(self.pos_emb.weight, std = 1e-2)
# edges
assert not (exists(num_edge_tokens) and not exists(edge_dim)), 'edge dimension (edge_dim) must be supplied if equiformer is to have edge tokens'
self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None
self.has_edges = exists(edge_dim) and edge_dim > 0
# sparse neighbors, derived from adjacency matrix or edges being passed in
self.attend_sparse_neighbors = attend_sparse_neighbors
self.max_sparse_neighbors = max_sparse_neighbors
# adjacent neighbor derivation and embed
assert not exists(num_adj_degrees_embed) or num_adj_degrees_embed >= 1, 'number of adjacent degrees to embed must be 1 or greater'
self.num_adj_degrees_embed = num_adj_degrees_embed
self.adj_emb = nn.Embedding(num_adj_degrees_embed + 1, adj_dim) if exists(num_adj_degrees_embed) and adj_dim > 0 else None
edge_dim = (edge_dim if self.has_edges else 0) + (adj_dim if exists(self.adj_emb) else 0)
# neighbors hyperparameters
self.valid_radius = valid_radius
self.num_neighbors = num_neighbors
# main network
self.tp_in = DTP(
self.dim_in,
self.dim,
edge_dim = edge_dim,
radial_hidden_dim = radial_hidden_dim
)
# trunk
self.layers = []
attention_klass = L2DistAttention if l2_dist_attention else MLPAttention
for ind in range(depth):
self.layers.append((
attention_klass(
self.dim,
heads = heads,
dim_head = dim_head,
attend_self = attend_self,
edge_dim = edge_dim,
single_headed_kv = single_headed_kv,
radial_hidden_dim = radial_hidden_dim,
**kwargs
),
FeedForward(self.dim, include_htype_norms = ff_include_htype_norms)
))
SequenceKlass = ReversibleSequence if reversible else SequentialSequence
self.layers = SequenceKlass(self.layers)
# out
self.norm = Norm(self.dim)
proj_out_klass = Linear if linear_out else FeedForward
self.ff_out = proj_out_klass(self.dim, (1,) * self.num_degrees) if reduce_dim_out else None
# basis is now constant
# pytorch does not have BufferDict yet, just improvise a solution with python property
self.basis = get_basis(self.num_degrees - 1)
@property
def basis(self):
out = dict()
for k in self.basis_keys:
out[k] = getattr(self, f'basis:{k}')
return out
@basis.setter
def basis(self, basis):
self.basis_keys = basis.keys()
for k, v in basis.items():
self.register_buffer(f'basis:{k}', v)
@property
def device(self):
return next(self.parameters()).device
@beartype
def forward(
self,
inputs: Union[Tensor, Dict[int, Tensor]],
coors: Tensor,
mask = None,
adj_mat = None,
edges = None,
return_pooled = False
):
_mask, device = mask, self.device
# apply token embedding and positional embedding to type-0 features
# (if type-0 feats are passed as a tensor they are expected to be of a flattened shape (batch, seq, n_feats)
# but if they are passed in a dict (fiber) they are expected to be of a unified shape (batch, seq, n_feats, 1=2*0+1))
if is_tensor(inputs):
inputs = {0: inputs}
feats = inputs[0]
if feats.ndim == 4:
feats = rearrange(feats, '... 1 -> ...')
if exists(self.token_emb):
assert feats.ndim == 2
feats = self.token_emb(feats)
if exists(self.pos_emb):
seq_len = feats.shape[1]
assert seq_len <= self.num_positions, 'feature sequence length must be less than the number of positions given at init'
feats = feats + self.pos_emb(torch.arange(seq_len, device = device))
feats = self.embedding_grad_frac * feats + (1 - self.embedding_grad_frac) * feats.detach()
assert not (self.has_edges and not exists(edges)), 'edge embedding (num_edge_tokens & edge_dim) must be supplied if one were to train on edge types'
b, n, d = feats.shape
feats = rearrange(feats, 'b n d -> b n d 1')
inputs[0] = feats
assert d == self.type0_feat_dim, f'feature dimension {d} must be equal to dimension given at init {self.type0_feat_dim}'
assert set(map(int, inputs.keys())) == set(range(self.input_degrees)), f'input must have {self.input_degrees} degree'
num_degrees, neighbors, max_sparse_neighbors, valid_radius = self.num_degrees, self.num_neighbors, self.max_sparse_neighbors, self.valid_radius
assert self.attend_sparse_neighbors or neighbors > 0, 'you must either attend to sparsely bonded neighbors, or set number of locally attended neighbors to be greater than 0'
# cannot have a node attend to itself
exclude_self_mask = rearrange(~torch.eye(n, dtype = torch.bool, device = device), 'i j -> 1 i j')
remove_self = lambda t: t.masked_select(exclude_self_mask).reshape(b, n, n - 1)
get_max_value = lambda t: torch.finfo(t.dtype).max
# create N-degrees adjacent matrix from 1st degree connections
if exists(adj_mat) and adj_mat.ndim == 2:
adj_mat = repeat(adj_mat, 'i j -> b i j', b = b)
if exists(self.num_adj_degrees_embed):
adj_indices = adj_mat.long()
for ind in range(self.num_adj_degrees_embed - 1):
degree = ind + 2
next_degree_adj_mat = (adj_mat.float() @ adj_mat.float()) > 0
next_degree_mask = next_degree_adj_mat & ~adj_mat
adj_indices = adj_indices.masked_fill(next_degree_mask, degree)
adj_mat = next_degree_adj_mat.clone()
adj_indices = adj_indices.masked_select(exclude_self_mask)
adj_indices = rearrange(adj_indices, '(b i j) -> b i j', b = b, i = n, j = n - 1)
# calculate sparsely connected neighbors
sparse_neighbor_mask = None
num_sparse_neighbors = 0
if self.attend_sparse_neighbors:
assert exists(adj_mat), 'adjacency matrix must be passed in (keyword argument adj_mat)'
adj_mat = remove_self(adj_mat)
adj_mat_values = adj_mat.float()
adj_mat_max_neighbors = reduce(adj_mat_values, '... i j -> ... i', 'sum').amax().item()
if max_sparse_neighbors < adj_mat_max_neighbors:
eps = 1e-2
noise = torch.empty_like(adj_mat_values).uniform_(-eps, eps)
adj_mat_values += noise
num_sparse_neighbors = int(min(max_sparse_neighbors, adj_mat_max_neighbors))
values, indices = adj_mat_values.topk(num_sparse_neighbors, dim = -1)
sparse_neighbor_mask = torch.zeros_like(adj_mat_values).scatter_(-1, indices, values)
sparse_neighbor_mask = sparse_neighbor_mask > 0.5
# exclude edge of token to itself
indices = repeat(torch.arange(n, device = device), 'j -> b i j', b = b, i = n)
rel_pos = rearrange(coors, 'b n d -> b n 1 d') - rearrange(coors, 'b n d -> b 1 n d')
indices = indices.masked_select(exclude_self_mask).reshape(b, n, n - 1)
rel_pos = rel_pos.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, 3)
if exists(mask):
mask = rearrange(mask, 'b i -> b i 1') * rearrange(mask, 'b j -> b 1 j')
mask = mask.masked_select(exclude_self_mask).reshape(b, n, n - 1)
if exists(edges):
if exists(self.edge_emb):
edges = self.edge_emb(edges)
edges = edges.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, -1)
rel_dist = rel_pos.norm(dim = -1)
# rel_dist gets modified using adjacency or neighbor mask
modified_rel_dist = rel_dist.clone()
max_value = get_max_value(modified_rel_dist) # for masking out nodes from being considered as neighbors
# make sure padding tokens are not considered when ordering by relative distance
if exists(mask):
modified_rel_dist = modified_rel_dist.masked_fill(~mask, max_value)
# use sparse neighbor mask to assign priority of bonded
if exists(sparse_neighbor_mask):
modified_rel_dist = modified_rel_dist.masked_fill(sparse_neighbor_mask, 0.)
# if number of local neighbors by distance is set to 0, then only fetch the sparse neighbors defined by adjacency matrix
if neighbors == 0:
valid_radius = 0
# get neighbors and neighbor mask, excluding self
neighbors = int(min(neighbors, n - 1))
total_neighbors = int(neighbors + num_sparse_neighbors)
assert total_neighbors > 0, 'you must be fetching at least 1 neighbor'
total_neighbors = int(min(total_neighbors, n - 1)) # make sure total neighbors does not exceed the length of the sequence itself
dist_values, nearest_indices = modified_rel_dist.topk(total_neighbors, dim = -1, largest = False)
neighbor_mask = dist_values <= valid_radius
neighbor_rel_dist = batched_index_select(rel_dist, nearest_indices, dim = 2)
neighbor_rel_pos = batched_index_select(rel_pos, nearest_indices, dim = 2)
neighbor_indices = batched_index_select(indices, nearest_indices, dim = 2)
if exists(mask):
neighbor_mask = neighbor_mask & batched_index_select(mask, nearest_indices, dim = 2)
if exists(edges):
edges = batched_index_select(edges, nearest_indices, dim = 2)
# embed relative distances
neighbor_rel_dist = rearrange(neighbor_rel_dist, '... -> ... 1')
# calculate basis
D = get_D_to_from_z_axis(neighbor_rel_pos, num_degrees - 1)
# main logic
edge_info = EdgeInfo(neighbor_indices, neighbor_mask, edges)
x = inputs
# project in
x = self.tp_in(
x,
edge_info = edge_info,
rel_dist = neighbor_rel_dist,
basis = self.basis,
D = D
)
# transformer layers
attn_kwargs = dict(
edge_info = edge_info,
rel_dist = neighbor_rel_dist,
basis = self.basis,
D = D,
mask = _mask
)
x = self.layers(x, **attn_kwargs)
# norm
x = self.norm(x)
# reduce dim if specified
if exists(self.ff_out):
x = self.ff_out(x)
x = {k: rearrange(v, '... 1 c -> ... c') for k, v in x.items()}
if return_pooled:
mask_fn = (lambda t: masked_mean(t, _mask, dim = 1))
x = {k: mask_fn(v) for k, v in x.items()}
# just return type 0 and type 1 features, reduced or not
type0, type1 = x[0], x.get(1, None)
type0 = rearrange(type0, '... 1 -> ...') # for type 0, just squeeze out the last dimension
return Return(type0, type1)
| equiformer-pytorch-main | equiformer_pytorch/equiformer_pytorch.py |
from pathlib import Path
import time
import pickle
import gzip
import torch
import torch.nn.functional as F
import contextlib
from functools import wraps, lru_cache
from filelock import FileLock
from equiformer_pytorch.version import __version__
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def identity(t):
return t
def default(val, d):
return val if exists(val) else d
def to_order(degree):
return 2 * degree + 1
def l2norm(t):
return F.normalize(t, dim = -1)
def pad_for_centering_y_to_x(x, y):
assert y <= x
total_pad = x - y
assert (total_pad % 2) == 0
return total_pad // 2
def slice_for_centering_y_to_x(x, y):
pad = pad_for_centering_y_to_x(x, y)
if pad == 0:
return slice(None)
return slice(pad, -pad)
def safe_cat(arr, el, dim):
if not exists(arr):
return el
return torch.cat((arr, el), dim = dim)
def cast_tuple(val, depth = 1):
return val if isinstance(val, tuple) else (val,) * depth
def batched_index_select(values, indices, dim = 1):
value_dims = values.shape[(dim + 1):]
values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices))
indices = indices[(..., *((None,) * len(value_dims)))]
indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims)
value_expand_len = len(indices_shape) - (dim + 1)
values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)]
value_expand_shape = [-1] * len(values.shape)
expand_slice = slice(dim, (dim + value_expand_len))
value_expand_shape[expand_slice] = indices.shape[expand_slice]
values = values.expand(*value_expand_shape)
dim += value_expand_len
return values.gather(dim, indices)
def fast_split(arr, splits, dim=0):
axis_len = arr.shape[dim]
splits = min(axis_len, max(splits, 1))
chunk_size = axis_len // splits
remainder = axis_len - chunk_size * splits
s = 0
for i in range(splits):
adjust, remainder = 1 if remainder > 0 else 0, remainder - 1
yield torch.narrow(arr, dim, s, chunk_size + adjust)
s += chunk_size + adjust
def masked_mean(tensor, mask, dim = -1):
if not exists(mask):
return tensor.mean(dim = dim)
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def rand_uniform(size, min_val, max_val):
return torch.empty(size).uniform_(min_val, max_val)
# default dtype context manager
@contextlib.contextmanager
def torch_default_dtype(dtype):
prev_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(prev_dtype)
def cast_torch_tensor(fn):
@wraps(fn)
def inner(t):
if not torch.is_tensor(t):
t = torch.tensor(t, dtype = torch.get_default_dtype())
return fn(t)
return inner
# benchmark tool
def benchmark(fn):
def inner(*args, **kwargs):
start = time.time()
res = fn(*args, **kwargs)
diff = time.time() - start
return diff, res
return inner
# caching functions
def cache(cache, key_fn):
def cache_inner(fn):
@wraps(fn)
def inner(*args, **kwargs):
key_name = key_fn(*args, **kwargs)
if key_name in cache:
return cache[key_name]
res = fn(*args, **kwargs)
cache[key_name] = res
return res
return inner
return cache_inner
# cache in directory
def cache_dir(dirname, maxsize=128):
'''
Cache a function with a directory
:param dirname: the directory path
:param maxsize: maximum size of the RAM cache (there is no limit for the directory cache)
'''
def decorator(func):
@lru_cache(maxsize=maxsize)
@wraps(func)
def wrapper(*args, **kwargs):
if not exists(dirname):
return func(*args, **kwargs)
dirpath = Path(dirname)
dirpath.mkdir(parents = True, exist_ok = True)
indexfile = dirpath / 'index.pkl'
lock = FileLock(str(dirpath / 'mutex'))
with lock:
index = {}
if indexfile.exists():
with open(indexfile, "rb") as file:
index = pickle.load(file)
key = (args, frozenset(kwargs), func.__defaults__)
if key in index:
filename = index[key]
else:
index[key] = filename = f"{len(index)}.pkl.gz"
with open(indexfile, "wb") as file:
pickle.dump(index, file)
filepath = dirpath / filename
if filepath.exists():
with lock:
with gzip.open(filepath, "rb") as file:
result = pickle.load(file)
return result
print(f"compute {filename}... ", end="", flush = True)
result = func(*args, **kwargs)
print(f"save {filename}... ", end="", flush = True)
with lock:
with gzip.open(filepath, "wb") as file:
pickle.dump(result, file)
print("done")
return result
return wrapper
return decorator
| equiformer-pytorch-main | equiformer_pytorch/utils.py |
from pathlib import Path
from functools import partial
import torch
import torch.nn.functional as F
from torch import sin, cos, atan2, acos
from einops import rearrange, pack, unpack
from equiformer_pytorch.utils import (
exists,
default,
cast_torch_tensor,
to_order,
identity,
l2norm
)
DATA_PATH = Path(__file__).parents[0] / 'data'
path = DATA_PATH / 'J_dense.pt'
Jd = torch.load(str(path))
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def wigner_d_matrix(degree, alpha, beta, gamma, dtype = None, device = None):
"""Create wigner D matrices for batch of ZYZ Euler angles for degree l."""
batch = alpha.shape[0]
J = Jd[degree].type(dtype).to(device)
order = to_order(degree)
x_a = z_rot_mat(alpha, degree)
x_b = z_rot_mat(beta, degree)
x_c = z_rot_mat(gamma, degree)
res = x_a @ J @ x_b @ J @ x_c
return res.view(batch, order, order)
def z_rot_mat(angle, l):
device, dtype = angle.device, angle.dtype
batch = angle.shape[0]
arange = partial(torch.arange, device = device)
order = to_order(l)
m = angle.new_zeros((batch, order, order))
batch_range = arange(batch, dtype = torch.long)[..., None]
inds = arange(order, dtype = torch.long)[None, ...]
reversed_inds = arange(2 * l, -1, -1, dtype = torch.long)[None, ...]
frequencies = arange(l, -l - 1, -1, dtype = dtype)[None]
m[batch_range, inds, reversed_inds] = sin(frequencies * angle[..., None])
m[batch_range, inds, inds] = cos(frequencies * angle[..., None])
return m
def irr_repr(order, angles):
"""
irreducible representation of SO3 - accepts multiple angles in tensor
"""
dtype, device = angles.dtype, angles.device
angles, ps = pack_one(angles, '* c')
alpha, beta, gamma = angles.unbind(dim = -1)
rep = wigner_d_matrix(order, alpha, beta, gamma, dtype = dtype, device = device)
return unpack_one(rep, ps, '* o1 o2')
@cast_torch_tensor
def rot_z(gamma):
'''
Rotation around Z axis
'''
c = cos(gamma)
s = sin(gamma)
z = torch.zeros_like(gamma)
o = torch.ones_like(gamma)
out = torch.stack((
c, -s, z,
s, c, z,
z, z, o
), dim = -1)
return rearrange(out, '... (r1 r2) -> ... r1 r2', r1 = 3)
@cast_torch_tensor
def rot_y(beta):
'''
Rotation around Y axis
'''
c = cos(beta)
s = sin(beta)
z = torch.zeros_like(beta)
o = torch.ones_like(beta)
out = torch.stack((
c, z, s,
z, o, z,
-s, z, c
), dim = -1)
return rearrange(out, '... (r1 r2) -> ... r1 r2', r1 = 3)
def rot(alpha, beta, gamma):
'''
ZYZ Euler angles rotation
'''
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
def rot_to_euler_angles(R):
'''
Rotation matrix to ZYZ Euler angles
'''
device, dtype = R.device, R.dtype
xyz = R @ torch.tensor([0.0, 1.0, 0.0], device = device, dtype = dtype)
xyz = l2norm(xyz).clamp(-1., 1.)
b = acos(xyz[..., 1])
a = atan2(xyz[..., 0], xyz[..., 2])
R = rot(a, b, torch.zeros_like(a)).transpose(-1, -2) @ R
c = atan2(R[..., 0, 2], R[..., 0, 0])
return torch.stack((a, b, c), dim = -1)
| equiformer-pytorch-main | equiformer_pytorch/irr_repr.py |
from setuptools import setup, find_packages
setup(
name = 'glom-pytorch',
packages = find_packages(),
version = '0.0.14',
license='MIT',
description = 'Glom - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/glom-pytorch',
keywords = [
'artificial intelligence',
'deep learning'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| glom-pytorch-main | setup.py |
from glom_pytorch.glom_pytorch import Glom
| glom-pytorch-main | glom_pytorch/__init__.py |
from math import sqrt
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# constants
TOKEN_ATTEND_SELF_VALUE = -5e-4
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# class
class GroupedFeedForward(nn.Module):
def __init__(self, *, dim, groups, mult = 4):
super().__init__()
total_dim = dim * groups # levels * dim
self.net = nn.Sequential(
Rearrange('b n l d -> b (l d) n'),
nn.Conv1d(total_dim, total_dim * mult, 1, groups = groups),
nn.GELU(),
nn.Conv1d(total_dim * mult, total_dim, 1, groups = groups),
Rearrange('b (l d) n -> b n l d', l = groups)
)
def forward(self, levels):
return self.net(levels)
class ConsensusAttention(nn.Module):
def __init__(self, num_patches_side, attend_self = True, local_consensus_radius = 0):
super().__init__()
self.attend_self = attend_self
self.local_consensus_radius = local_consensus_radius
if self.local_consensus_radius > 0:
coors = torch.stack(torch.meshgrid(
torch.arange(num_patches_side),
torch.arange(num_patches_side)
)).float()
coors = rearrange(coors, 'c h w -> (h w) c')
dist = torch.cdist(coors, coors)
mask_non_local = dist > self.local_consensus_radius
mask_non_local = rearrange(mask_non_local, 'i j -> () i j')
self.register_buffer('non_local_mask', mask_non_local)
def forward(self, levels):
_, n, _, d, device = *levels.shape, levels.device
q, k, v = levels, F.normalize(levels, dim = -1), levels
sim = einsum('b i l d, b j l d -> b l i j', q, k) * (d ** -0.5)
if not self.attend_self:
self_mask = torch.eye(n, device = device, dtype = torch.bool)
self_mask = rearrange(self_mask, 'i j -> () () i j')
sim.masked_fill_(self_mask, TOKEN_ATTEND_SELF_VALUE)
if self.local_consensus_radius > 0:
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(self.non_local_mask, max_neg_value)
attn = sim.softmax(dim = -1)
out = einsum('b l i j, b j l d -> b i l d', attn, levels)
return out
# main class
class Glom(nn.Module):
def __init__(
self,
*,
dim = 512,
levels = 6,
image_size = 224,
patch_size = 14,
consensus_self = False,
local_consensus_radius = 0
):
super().__init__()
# bottom level - incoming image, tokenize and add position
num_patches_side = (image_size // patch_size)
num_patches = num_patches_side ** 2
self.levels = levels
self.image_to_tokens = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_size ** 2 * 3, dim)
)
self.pos_emb = nn.Embedding(num_patches, dim)
# initial embeddings for all levels of a column
self.init_levels = nn.Parameter(torch.randn(levels, dim))
# bottom-up and top-down
self.bottom_up = GroupedFeedForward(dim = dim, groups = levels)
self.top_down = GroupedFeedForward(dim = dim, groups = levels - 1)
# consensus attention
self.attention = ConsensusAttention(num_patches_side, attend_self = consensus_self, local_consensus_radius = local_consensus_radius)
def forward(self, img, iters = None, levels = None, return_all = False):
b, device = img.shape[0], img.device
iters = default(iters, self.levels * 2) # need to have twice the number of levels of iterations in order for information to propagate up and back down. can be overridden
tokens = self.image_to_tokens(img)
n = tokens.shape[1]
pos_embs = self.pos_emb(torch.arange(n, device = device))
pos_embs = rearrange(pos_embs, 'n d -> () n () d')
bottom_level = tokens
bottom_level = rearrange(bottom_level, 'b n d -> b n () d')
if not exists(levels):
levels = repeat(self.init_levels, 'l d -> b n l d', b = b, n = n)
hiddens = [levels]
num_contributions = torch.empty(self.levels, device = device).fill_(4)
num_contributions[-1] = 3 # top level does not get a top-down contribution, so have to account for this when doing the weighted mean
for _ in range(iters):
levels_with_input = torch.cat((bottom_level, levels), dim = -2) # each iteration, attach original input at the most bottom level, to be bottomed-up
bottom_up_out = self.bottom_up(levels_with_input[..., :-1, :])
top_down_out = self.top_down(levels_with_input[..., 2:, :] + pos_embs) # positional embeddings given to top-down networks
top_down_out = F.pad(top_down_out, (0, 0, 0, 1), value = 0.)
consensus = self.attention(levels)
levels_sum = torch.stack((levels, bottom_up_out, top_down_out, consensus)).sum(dim = 0) # hinton said to use the weighted mean of (1) bottom up (2) top down (3) previous level value {t - 1} (4) consensus value
levels_mean = levels_sum / rearrange(num_contributions, 'l -> () () l ()')
levels = levels_mean # set for next iteration
hiddens.append(levels)
if return_all:
return torch.stack(hiddens) # return (time step, batch, num columns, levels, dimension)
return levels
| glom-pytorch-main | glom_pytorch/glom_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'holodeck-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Holodeck - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/holodeck-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'denoising diffusion',
'temporal scene representations',
'hypernetworks'
],
install_requires=[
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| holodeck-pytorch-main | setup.py |
from holodeck_pytorch.holodeck_pytorch import Holodeck
| holodeck-pytorch-main | holodeck_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = nn.LayerNorm(dim)
self.context_norm = nn.LayerNorm(dim_context) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None
):
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
x = self.norm(x)
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k)
mask_value = -torch.finfo(sim.dtype).max
if exists(attn_bias):
sim = sim + attn_bias
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main class
class Holodeck(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
| holodeck-pytorch-main | holodeck_pytorch/holodeck_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'triangle-multiplicative-module',
packages = find_packages(),
version = '0.0.3',
license='MIT',
description = 'Triangle Multiplicative Module',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/triangle-multiplicative-module',
keywords = [
'artificial intelligence',
'deep learning',
'protein folding'
],
install_requires=[
'einops>=0.3',
'torch>=1.7'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| triangle-multiplicative-module-main | setup.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
class TriangleMultiplicativeModule(nn.Module):
def __init__(
self,
*,
dim,
hidden_dim = None,
mix = 'ingoing'
):
super().__init__()
assert mix in {'ingoing', 'outgoing'}, 'mix must be either ingoing or outgoing'
hidden_dim = default(hidden_dim, dim)
self.norm = nn.LayerNorm(dim)
self.left_proj = nn.Linear(dim, hidden_dim)
self.right_proj = nn.Linear(dim, hidden_dim)
self.left_gate = nn.Linear(dim, hidden_dim)
self.right_gate = nn.Linear(dim, hidden_dim)
self.out_gate = nn.Linear(dim, hidden_dim)
# initialize all gating to be identity
for gate in (self.left_gate, self.right_gate, self.out_gate):
nn.init.constant_(gate.weight, 0.)
nn.init.constant_(gate.bias, 1.)
if mix == 'outgoing':
self.mix_einsum_eq = '... i k d, ... j k d -> ... i j d'
elif mix == 'ingoing':
self.mix_einsum_eq = '... k j d, ... k i d -> ... i j d'
self.to_out_norm = nn.LayerNorm(hidden_dim)
self.to_out = nn.Linear(hidden_dim, dim)
def forward(self, x, mask = None):
assert x.shape[1] == x.shape[2], 'feature map must be symmetrical'
if exists(mask):
mask = rearrange(mask, 'b i j -> b i j ()')
x = self.norm(x)
left = self.left_proj(x)
right = self.right_proj(x)
if exists(mask):
left = left * mask
right = right * mask
left_gate = self.left_gate(x).sigmoid()
right_gate = self.right_gate(x).sigmoid()
out_gate = self.out_gate(x).sigmoid()
left = left * left_gate
right = right * right_gate
out = einsum(self.mix_einsum_eq, left, right)
out = self.to_out_norm(out)
out = out * out_gate
return self.to_out(out)
| triangle-multiplicative-module-main | triangle_multiplicative_module/triangle_multiplicative_module.py |
from triangle_multiplicative_module.triangle_multiplicative_module import TriangleMultiplicativeModule
| triangle-multiplicative-module-main | triangle_multiplicative_module/__init__.py |
from setuptools import setup, find_packages
exec(open('naturalspeech2_pytorch/version.py').read())
setup(
name = 'naturalspeech2-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'Natural Speech 2 - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
include_package_data = True,
url = 'https://github.com/lucidrains/naturalspeech2-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'latent diffusion',
'speech synthesis'
],
install_requires=[
'accelerate',
'audiolm-pytorch>=0.30.2',
'beartype',
'einops>=0.6.1',
'ema-pytorch',
'indic-num2words',
'inflect',
'local-attention',
'num2words',
'pyworld',
'pydantic<2.0',
'torch>=1.6',
'tqdm',
'vector-quantize-pytorch>=1.4.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| naturalspeech2-pytorch-main | setup.py |
from typing import Tuple
import numpy as np
import torch
from torch import nn, Tensor
from torch.nn import Module
import torch.nn.functional as F
from einops import rearrange, repeat
from beartype import beartype
from beartype.typing import Optional
def exists(val):
return val is not None
class AlignerNet(Module):
"""alignment model https://arxiv.org/pdf/2108.10447.pdf """
def __init__(
self,
dim_in=80,
dim_hidden=512,
attn_channels=80,
temperature=0.0005,
):
super().__init__()
self.temperature = temperature
self.key_layers = nn.ModuleList([
nn.Conv1d(
dim_hidden,
dim_hidden * 2,
kernel_size=3,
padding=1,
bias=True,
),
nn.ReLU(inplace=True),
nn.Conv1d(dim_hidden * 2, attn_channels, kernel_size=1, padding=0, bias=True)
])
self.query_layers = nn.ModuleList([
nn.Conv1d(
dim_in,
dim_in * 2,
kernel_size=3,
padding=1,
bias=True,
),
nn.ReLU(inplace=True),
nn.Conv1d(dim_in * 2, dim_in, kernel_size=1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv1d(dim_in, attn_channels, kernel_size=1, padding=0, bias=True)
])
@beartype
def forward(
self,
queries: Tensor,
keys: Tensor,
mask: Optional[Tensor] = None
):
key_out = keys
for layer in self.key_layers:
key_out = layer(key_out)
query_out = queries
for layer in self.query_layers:
query_out = layer(query_out)
key_out = rearrange(key_out, 'b c t -> b t c')
query_out = rearrange(query_out, 'b c t -> b t c')
attn_logp = torch.cdist(query_out, key_out)
attn_logp = rearrange(attn_logp, 'b ... -> b 1 ...')
if exists(mask):
mask = rearrange(mask.bool(), '... c -> ... 1 c')
attn_logp.data.masked_fill_(~mask, -torch.finfo(attn_logp.dtype).max)
attn = attn_logp.softmax(dim = -1)
return attn, attn_logp
def pad_tensor(input, pad, value=0):
pad = [item for sublist in reversed(pad) for item in sublist] # Flatten the tuple
assert len(pad) // 2 == len(input.shape), 'Padding dimensions do not match input dimensions'
return F.pad(input, pad, mode='constant', value=value)
def maximum_path(value, mask, const=None):
device = value.device
dtype = value.dtype
if not exists(const):
const = torch.tensor(float('-inf')).to(device) # Patch for Sphinx complaint
value = value * mask
b, t_x, t_y = value.shape
direction = torch.zeros(value.shape, dtype=torch.int64, device=device)
v = torch.zeros((b, t_x), dtype=torch.float32, device=device)
x_range = torch.arange(t_x, dtype=torch.float32, device=device).view(1, -1)
for j in range(t_y):
v0 = pad_tensor(v, ((0, 0), (1, 0)), value = const)[:, :-1]
v1 = v
max_mask = v1 >= v0
v_max = torch.where(max_mask, v1, v0)
direction[:, :, j] = max_mask
index_mask = x_range <= j
v = torch.where(index_mask.view(1,-1), v_max + value[:, :, j], const)
direction = torch.where(mask.bool(), direction, 1)
path = torch.zeros(value.shape, dtype=torch.float32, device=device)
index = mask[:, :, 0].sum(1).long() - 1
index_range = torch.arange(b, device=device)
for j in reversed(range(t_y)):
path[index_range, index, j] = 1
index = index + direction[index_range, index, j] - 1
path = path * mask.float()
path = path.to(dtype=dtype)
return path
class ForwardSumLoss(Module):
def __init__(
self,
blank_logprob = -1
):
super().__init__()
self.blank_logprob = blank_logprob
self.ctc_loss = torch.nn.CTCLoss(
blank = 0, # check this value
zero_infinity = True
)
def forward(self, attn_logprob, key_lens, query_lens):
device, blank_logprob = attn_logprob.device, self.blank_logprob
max_key_len = attn_logprob.size(-1)
# Reorder input to [query_len, batch_size, key_len]
attn_logprob = rearrange(attn_logprob, 'b 1 c t -> c b t')
# Add blank label
attn_logprob = F.pad(attn_logprob, (1, 0, 0, 0, 0, 0), value = blank_logprob)
# Convert to log probabilities
# Note: Mask out probs beyond key_len
attn_logprob.masked_fill_(torch.arange(max_key_len + 1, device=device, dtype=torch.long).view(1, 1, -1) > key_lens.view(1, -1, 1), -1e15)
attn_logprob = attn_logprob.log_softmax(dim = -1)
# Target sequences
target_seqs = torch.arange(1, max_key_len + 1, device=device, dtype=torch.long)
target_seqs = repeat(target_seqs, 'n -> b n', b = key_lens.numel())
# Evaluate CTC loss
cost = self.ctc_loss(attn_logprob, target_seqs, query_lens, key_lens)
return cost
class Aligner(Module):
def __init__(
self,
dim_in,
dim_hidden,
attn_channels=80,
temperature=0.0005
):
super().__init__()
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.attn_channels = attn_channels
self.temperature = temperature
self.aligner = AlignerNet(
dim_in = self.dim_in,
dim_hidden = self.dim_hidden,
attn_channels = self.attn_channels,
temperature = self.temperature
)
def forward(
self,
x,
x_mask,
y,
y_mask
):
alignment_soft, alignment_logprob = self.aligner(y, rearrange(x, 'b d t -> b t d'), x_mask)
x_mask = rearrange(x_mask, '... i -> ... i 1')
y_mask = rearrange(y_mask, '... j -> ... 1 j')
attn_mask = x_mask * y_mask
attn_mask = rearrange(attn_mask, 'b 1 i j -> b i j')
alignment_soft = rearrange(alignment_soft, 'b 1 c t -> b t c')
alignment_mask = maximum_path(alignment_soft, attn_mask)
alignment_hard = torch.sum(alignment_mask, -1).int()
return alignment_hard, alignment_soft, alignment_logprob, alignment_mask
if __name__ == '__main__':
batch_size = 10
seq_len_y = 200 # length of sequence y
seq_len_x = 35
feature_dim = 80 # feature dimension
x = torch.randn(batch_size, 512, seq_len_x)
y = torch.randn(batch_size, seq_len_y, feature_dim)
# Create masks
x_mask = torch.ones(batch_size, 1, seq_len_x)
y_mask = torch.ones(batch_size, 1, seq_len_y)
align = Aligner(dim_in = 80, dim_hidden=512, attn_channels=80)
alignment_hard, alignment_soft, alignment_logprob, alignment_mas = align(x, x_mask, y, y_mask)
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/aligner.py |
__version__ = '0.1.5'
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/version.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from naturalspeech2_pytorch.naturalspeech2_pytorch import (
NaturalSpeech2,
Transformer,
Wavenet,
Model,
Trainer,
PhonemeEncoder,
DurationPitchPredictor,
SpeechPromptEncoder,
Tokenizer,
ESpeak
)
from audiolm_pytorch import (
SoundStream,
EncodecWrapper
)
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash = use_flash
assert not (use_flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash:
return self.flash_attn(q, k, v, mask = mask)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/attend.py |
import math
import copy
from multiprocessing import cpu_count
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchaudio.transforms as T
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from audiolm_pytorch import SoundStream, EncodecWrapper
from audiolm_pytorch.data import SoundDataset, get_dataloader
from beartype import beartype
from beartype.typing import Tuple, Union, Optional, List
from beartype.door import is_bearable
from naturalspeech2_pytorch.attend import Attend
from naturalspeech2_pytorch.aligner import Aligner, ForwardSumLoss
from naturalspeech2_pytorch.utils.tokenizer import Tokenizer, ESpeak
from naturalspeech2_pytorch.utils.utils import average_over_durations, create_mask
from naturalspeech2_pytorch.version import __version__
from accelerate import Accelerator
from ema_pytorch import EMA
from tqdm.auto import tqdm
import pyworld as pw
# constants
mlist = nn.ModuleList
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def divisible_by(num, den):
return (num % den) == 0
def identity(t, *args, **kwargs):
return t
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
# tensor helpers
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
def generate_mask_from_repeats(repeats):
repeats = repeats.int()
device = repeats.device
lengths = repeats.sum(dim = -1)
max_length = lengths.amax().item()
cumsum = repeats.cumsum(dim = -1)
cumsum_exclusive = F.pad(cumsum, (1, -1), value = 0.)
seq = torch.arange(max_length, device = device)
seq = repeat(seq, '... j -> ... i j', i = repeats.shape[-1])
cumsum = rearrange(cumsum, '... i -> ... i 1')
cumsum_exclusive = rearrange(cumsum_exclusive, '... i -> ... i 1')
lengths = rearrange(lengths, 'b -> b 1 1')
mask = (seq < cumsum) & (seq >= cumsum_exclusive) & (seq < lengths)
return mask
# sinusoidal positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert divisible_by(dim, 2)
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# compute pitch
def compute_pitch_pytorch(wav, sample_rate):
#https://pytorch.org/audio/main/generated/torchaudio.functional.compute_kaldi_pitch.html#torchaudio.functional.compute_kaldi_pitch
pitch_feature = torchaudio.functional.compute_kaldi_pitch(wav, sample_rate)
pitch, nfcc = pitch_feature.unbind(dim = -1)
return pitch
#as mentioned in paper using pyworld
def compute_pitch_pyworld(wav, sample_rate, hop_length, pitch_fmax=640.0):
is_tensor_input = torch.is_tensor(wav)
if is_tensor_input:
device = wav.device
wav = wav.contiguous().cpu().numpy()
if divisible_by(len(wav), hop_length):
wav = np.pad(wav, (0, hop_length // 2), mode="reflect")
wav = wav.astype(np.double)
outs = []
for sample in wav:
f0, t = pw.dio(
sample,
fs = sample_rate,
f0_ceil = pitch_fmax,
frame_period = 1000 * hop_length / sample_rate,
)
f0 = pw.stonemask(sample, f0, t, sample_rate)
outs.append(f0)
outs = np.stack(outs)
if is_tensor_input:
outs = torch.from_numpy(outs).to(device)
return outs
def f0_to_coarse(f0, f0_bin = 256, f0_max = 1100.0, f0_min = 50.0):
f0_mel_max = 1127 * torch.log(1 + torch.tensor(f0_max) / 700)
f0_mel_min = 1127 * torch.log(1 + torch.tensor(f0_min) / 700)
f0_mel = 1127 * (1 + f0 / 700).log()
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).int()
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse
# peripheral models
# audio to mel
class AudioToMel(nn.Module):
def __init__(
self,
*,
n_mels = 100,
sampling_rate = 24000,
f_max = 8000,
n_fft = 1024,
win_length = 640,
hop_length = 160,
log = True
):
super().__init__()
self.log = log
self.n_mels = n_mels
self.n_fft = n_fft
self.f_max = f_max
self.win_length = win_length
self.hop_length = hop_length
self.sampling_rate = sampling_rate
def forward(self, audio):
stft_transform = T.Spectrogram(
n_fft = self.n_fft,
win_length = self.win_length,
hop_length = self.hop_length,
window_fn = torch.hann_window
)
spectrogram = stft_transform(audio)
mel_transform = T.MelScale(
n_mels = self.n_mels,
sample_rate = self.sampling_rate,
n_stft = self.n_fft // 2 + 1,
f_max = self.f_max
)
mel = mel_transform(spectrogram)
if self.log:
mel = T.AmplitudeToDB()(mel)
return mel
# phoneme - pitch - speech prompt - duration predictors
class PhonemeEncoder(nn.Module):
def __init__(
self,
*,
tokenizer: Optional[Tokenizer] = None,
num_tokens = None,
dim = 512,
dim_hidden = 512,
kernel_size = 9,
depth = 6,
dim_head = 64,
heads = 8,
conv_dropout = 0.2,
attn_dropout = 0.,
use_flash = False
):
super().__init__()
self.tokenizer = tokenizer
num_tokens = default(num_tokens, tokenizer.vocab_size if exists(tokenizer) else None)
self.token_emb = nn.Embedding(num_tokens + 1, dim) if exists(num_tokens) else nn.Identity()
self.pad_id = num_tokens
same_padding = (kernel_size - 1) // 2
self.conv = nn.Sequential(
Rearrange('b n c -> b c n'),
CausalConv1d(dim, dim_hidden, kernel_size),
nn.SiLU(),
nn.Dropout(conv_dropout),
Rearrange('b c n -> b n c'),
)
self.transformer = Transformer(
dim = dim_hidden,
depth = depth,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout,
use_flash = use_flash
)
@beartype
def forward(
self,
x: Union[Tensor, List[str]],
mask = None
):
if is_bearable(x, List[str]):
assert exists(self.tokenizer)
x = self.tokenizer.texts_to_tensor_ids(x)
is_padding = x < 0
x = x.masked_fill(is_padding, self.pad_id)
x = self.token_emb(x)
x = self.conv(x)
x = self.transformer(x, mask = mask)
return x
class SpeechPromptEncoder(nn.Module):
@beartype
def __init__(
self,
dim_codebook,
dims: Tuple[int] = (256, 2048, 2048, 2048, 2048, 512, 512, 512),
*,
depth = 6,
heads = 8,
dim_head = 64,
dropout = 0.2,
kernel_size = 9,
padding = 4,
use_flash_attn = True
):
super().__init__()
dims = [dim_codebook, *dims]
self.dim, self.dim_out = dims[0], dims[-1]
dim_pairs = zip(dims[:-1], dims[1:])
modules = []
for dim_in, dim_out in dim_pairs:
modules.extend([
nn.Conv1d(dim_in, dim_out, kernel_size, padding = padding),
nn.SiLU()
])
self.conv = nn.Sequential(
Rearrange('b n c -> b c n'),
*modules,
Rearrange('b c n -> b n c')
)
self.transformer = Transformer(
dim = dims[-1],
depth = depth,
heads = heads,
dim_head = dim_head,
dropout = dropout,
use_flash = use_flash_attn
)
def forward(self, x):
assert x.shape[-1] == self.dim
x = self.conv(x)
x = self.transformer(x)
return x
# duration and pitch predictor seems to be the same
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel = 3,
groups = 8,
dropout = 0.
):
super().__init__()
self.proj = nn.Conv1d(dim, dim_out, kernel, padding = kernel // 2)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
x = self.act(x)
x = self.dropout(x)
return x
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel,
*,
dropout = 0.,
groups = 8,
num_convs = 2
):
super().__init__()
blocks = []
for ind in range(num_convs):
is_first = ind == 0
dim_in = dim if is_first else dim_out
block = Block(
dim_in,
dim_out,
kernel,
groups = groups,
dropout = dropout
)
blocks.append(block)
self.blocks = nn.Sequential(*blocks)
self.res_conv = nn.Conv1d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x):
x = rearrange(x, 'b n c -> b c n')
h = self.blocks(x)
out = h + self.res_conv(x)
return rearrange(out, 'b c n -> b n c')
def ConvBlock(dim, dim_out, kernel, dropout = 0.):
return nn.Sequential(
Rearrange('b n c -> b c n'),
nn.Conv1d(dim, dim_out, kernel, padding = kernel // 2),
nn.SiLU(),
nn.Dropout(dropout),
Rearrange('b c n -> b n c'),
)
class DurationPitchPredictorTrunk(nn.Module):
def __init__(
self,
dim = 512,
depth = 10,
kernel_size = 3,
dim_context = None,
heads = 8,
dim_head = 64,
dropout = 0.2,
use_resnet_block = True,
num_convs_per_resnet_block = 2,
num_convolutions_per_block = 3,
use_flash_attn = False,
):
super().__init__()
self.layers = nn.ModuleList([])
conv_klass = ConvBlock if not use_resnet_block else partial(ResnetBlock, num_convs = num_convs_per_resnet_block)
for _ in range(depth):
layer = nn.ModuleList([
nn.Sequential(*[
conv_klass(dim, dim, kernel_size) for _ in range(num_convolutions_per_block)
]),
RMSNorm(dim),
Attention(
dim,
dim_context = dim_context,
heads = heads,
dim_head = dim_head,
dropout = dropout,
use_flash = use_flash_attn,
cross_attn_include_queries = True
)
])
self.layers.append(layer)
self.to_pred = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... 1 -> ...'),
nn.ReLU()
)
def forward(
self,
x,
encoded_prompts,
prompt_mask = None,
):
for conv, norm, attn in self.layers:
x = conv(x)
x = attn(norm(x), encoded_prompts, mask = prompt_mask) + x
return self.to_pred(x)
class DurationPitchPredictor(nn.Module):
def __init__(
self,
*,
dim,
num_phoneme_tokens = None,
tokenizer: Optional[Tokenizer] = None,
dim_encoded_prompts = None,
num_convolutions_per_block = 3,
use_resnet_block = True,
num_convs_per_resnet_block = 2,
depth = 10,
kernel_size = 3,
heads = 8,
dim_head = 64,
dim_hidden = 512,
dropout = 0.2,
use_flash_attn = False
):
super().__init__()
self.tokenizer = tokenizer
num_phoneme_tokens = default(num_phoneme_tokens, tokenizer.vocab_size if exists(tokenizer) else None)
dim_encoded_prompts = default(dim_encoded_prompts, dim)
self.phoneme_token_emb = nn.Embedding(num_phoneme_tokens, dim) if exists(num_phoneme_tokens) else nn.Identity()
self.to_pitch_pred = DurationPitchPredictorTrunk(
dim = dim_hidden,
depth = depth,
kernel_size = kernel_size,
dim_context = dim_encoded_prompts,
heads = heads,
dim_head = dim_head,
dropout = dropout,
use_resnet_block = use_resnet_block,
num_convs_per_resnet_block = num_convs_per_resnet_block,
num_convolutions_per_block = num_convolutions_per_block,
use_flash_attn = use_flash_attn,
)
self.to_duration_pred = copy.deepcopy(self.to_pitch_pred)
@beartype
def forward(
self,
x: Union[Tensor, List[str]],
encoded_prompts,
prompt_mask = None
):
if is_bearable(x, List[str]):
assert exists(self.tokenizer)
x = self.tokenizer.texts_to_tensor_ids(x)
x = self.phoneme_token_emb(x)
duration_pred, pitch_pred = map(lambda fn: fn(x, encoded_prompts = encoded_prompts, prompt_mask = prompt_mask), (self.to_duration_pred, self.to_pitch_pred))
return duration_pred, pitch_pred
# use perceiver resampler from flamingo paper - https://arxiv.org/abs/2204.14198
# in lieu of "q-k-v" attention with the m queries becoming key / values on which ddpm network is conditioned on
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_context = None,
num_latents = 64, # m in the paper
dim_head = 64,
heads = 8,
ff_mult = 4,
use_flash_attn = False
):
super().__init__()
dim_context = default(dim_context, dim)
self.proj_context = nn.Linear(dim_context, dim) if dim_context != dim else nn.Identity()
self.latents = nn.Parameter(torch.randn(num_latents, dim))
nn.init.normal_(self.latents, std = 0.02)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
use_flash = use_flash_attn,
cross_attn_include_queries = True
),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = RMSNorm(dim)
def forward(self, x, mask = None):
batch = x.shape[0]
x = self.proj_context(x)
latents = repeat(self.latents, 'n d -> b n d', b = batch)
for attn, ff in self.layers:
latents = attn(latents, x, mask = mask) + latents
latents = ff(latents) + latents
return self.norm(latents)
# model, which is wavenet + transformer
class CausalConv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
kernel_size, = self.kernel_size
dilation, = self.dilation
stride, = self.stride
assert stride == 1
self.causal_padding = dilation * (kernel_size - 1)
def forward(self, x):
causal_padded_x = F.pad(x, (self.causal_padding, 0), value = 0.)
return super().forward(causal_padded_x)
class WavenetResBlock(nn.Module):
def __init__(
self,
dim,
*,
dilation,
kernel_size = 3,
skip_conv = False,
dim_cond_mult = None
):
super().__init__()
self.cond = exists(dim_cond_mult)
self.to_time_cond = None
if self.cond:
self.to_time_cond = nn.Linear(dim * dim_cond_mult, dim * 2)
self.conv = CausalConv1d(dim, dim, kernel_size, dilation = dilation)
self.res_conv = CausalConv1d(dim, dim, 1)
self.skip_conv = CausalConv1d(dim, dim, 1) if skip_conv else None
def forward(self, x, t = None):
if self.cond:
assert exists(t)
t = self.to_time_cond(t)
t = rearrange(t, 'b c -> b c 1')
t_gamma, t_beta = t.chunk(2, dim = -2)
res = self.res_conv(x)
x = self.conv(x)
if self.cond:
x = x * t_gamma + t_beta
x = x.tanh() * x.sigmoid()
x = x + res
skip = None
if exists(self.skip_conv):
skip = self.skip_conv(x)
return x, skip
class WavenetStack(nn.Module):
def __init__(
self,
dim,
*,
layers,
kernel_size = 3,
has_skip = False,
dim_cond_mult = None
):
super().__init__()
dilations = 2 ** torch.arange(layers)
self.has_skip = has_skip
self.blocks = mlist([])
for dilation in dilations.tolist():
block = WavenetResBlock(
dim = dim,
kernel_size = kernel_size,
dilation = dilation,
skip_conv = has_skip,
dim_cond_mult = dim_cond_mult
)
self.blocks.append(block)
def forward(self, x, t):
residuals = []
skips = []
if isinstance(x, Tensor):
x = (x,) * len(self.blocks)
for block_input, block in zip(x, self.blocks):
residual, skip = block(block_input, t)
residuals.append(residual)
skips.append(skip)
if self.has_skip:
return torch.stack(skips)
return residuals
class Wavenet(nn.Module):
def __init__(
self,
dim,
*,
stacks,
layers,
init_conv_kernel = 3,
dim_cond_mult = None
):
super().__init__()
self.init_conv = CausalConv1d(dim, dim, init_conv_kernel)
self.stacks = mlist([])
for ind in range(stacks):
is_last = ind == (stacks - 1)
stack = WavenetStack(
dim,
layers = layers,
dim_cond_mult = dim_cond_mult,
has_skip = is_last
)
self.stacks.append(stack)
self.final_conv = CausalConv1d(dim, dim, 1)
def forward(self, x, t = None):
x = self.init_conv(x)
for stack in self.stacks:
x = stack(x, t)
return self.final_conv(x.sum(dim = 0))
class RMSNorm(nn.Module):
def __init__(self, dim, scale = True, dim_cond = None):
super().__init__()
self.cond = exists(dim_cond)
self.to_gamma_beta = nn.Linear(dim_cond, dim * 2) if self.cond else None
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim)) if scale else None
def forward(self, x, cond = None):
gamma = default(self.gamma, 1)
out = F.normalize(x, dim = -1) * self.scale * gamma
if not self.cond:
return out
assert exists(cond)
gamma, beta = self.to_gamma_beta(cond).chunk(2, dim = -1)
gamma, beta = map(lambda t: rearrange(t, 'b d -> b 1 d'), (gamma, beta))
return out * gamma + beta
class ConditionableTransformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
ff_causal_conv = False,
dim_cond_mult = None,
cross_attn = False,
use_flash = False
):
super().__init__()
self.dim = dim
self.layers = mlist([])
cond = exists(dim_cond_mult)
maybe_adaptive_norm_kwargs = dict(scale = not cond, dim_cond = dim * dim_cond_mult) if cond else dict()
rmsnorm = partial(RMSNorm, **maybe_adaptive_norm_kwargs)
for _ in range(depth):
self.layers.append(mlist([
rmsnorm(dim),
Attention(dim = dim, dim_head = dim_head, heads = heads, use_flash = use_flash),
rmsnorm(dim) if cross_attn else None,
Attention(dim = dim, dim_head = dim_head, heads = heads, use_flash = use_flash) if cross_attn else None,
rmsnorm(dim),
FeedForward(dim = dim, mult = ff_mult, causal_conv = ff_causal_conv)
]))
self.to_pred = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim, bias = False)
)
def forward(
self,
x,
times = None,
context = None
):
t = times
for attn_norm, attn, cross_attn_norm, cross_attn, ff_norm, ff in self.layers:
res = x
x = attn_norm(x, cond = t)
x = attn(x) + res
if exists(cross_attn):
assert exists(context)
res = x
x = cross_attn_norm(x, cond = t)
x = cross_attn(x, context = context) + res
res = x
x = ff_norm(x, cond = t)
x = ff(x) + res
return self.to_pred(x)
class Model(nn.Module):
@beartype
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
wavenet_layers = 8,
wavenet_stacks = 4,
dim_cond_mult = 4,
use_flash_attn = True,
dim_prompt = None,
num_latents_m = 32, # number of latents to be perceiver resampled ('q-k-v' with 'm' queries in the paper)
resampler_depth = 2,
cond_drop_prob = 0.,
condition_on_prompt= False
):
super().__init__()
self.dim = dim
# time condition
dim_time = dim * dim_cond_mult
self.to_time_cond = Sequential(
LearnedSinusoidalPosEmb(dim),
nn.Linear(dim + 1, dim_time),
nn.SiLU()
)
# prompt condition
self.cond_drop_prob = cond_drop_prob # for classifier free guidance
self.condition_on_prompt = condition_on_prompt
self.to_prompt_cond = None
if self.condition_on_prompt:
self.null_prompt_cond = nn.Parameter(torch.randn(dim_time))
self.null_prompt_tokens = nn.Parameter(torch.randn(num_latents_m, dim))
nn.init.normal_(self.null_prompt_cond, std = 0.02)
nn.init.normal_(self.null_prompt_tokens, std = 0.02)
self.to_prompt_cond = Sequential(
Reduce('b n d -> b d', 'mean'),
nn.Linear(dim_prompt, dim_time),
nn.SiLU()
)
self.perceiver_resampler = PerceiverResampler(
dim = dim,
dim_context = dim_prompt,
num_latents = num_latents_m,
depth = resampler_depth,
dim_head = dim_head,
heads = heads,
use_flash_attn = use_flash_attn
)
# conditioning includes time and optionally prompt
dim_cond_mult = dim_cond_mult * (2 if condition_on_prompt else 1)
# wavenet
self.wavenet = Wavenet(
dim = dim,
stacks = wavenet_stacks,
layers = wavenet_layers,
dim_cond_mult = dim_cond_mult
)
# transformer
self.transformer = ConditionableTransformer(
dim = dim,
depth = depth,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
ff_causal_conv = True,
dim_cond_mult = dim_cond_mult,
use_flash = use_flash_attn,
cross_attn = condition_on_prompt
)
@property
def device(self):
return next(self.parameters()).device
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1.:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
times,
prompt = None,
prompt_mask = None,
cond= None,
cond_drop_prob = None
):
b = x.shape[0]
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
drop_mask = prob_mask_like((b,), cond_drop_prob, self.device)
t = self.to_time_cond(times)
c = None
if exists(self.to_prompt_cond):
assert exists(prompt)
prompt_cond = self.to_prompt_cond(prompt)
prompt_cond = torch.where(
rearrange(drop_mask, 'b -> b 1'),
self.null_prompt_cond,
prompt_cond,
)
t = torch.cat((t, prompt_cond), dim = -1)
resampled_prompt_tokens = self.perceiver_resampler(prompt, mask = prompt_mask)
c = torch.where(
rearrange(drop_mask, 'b -> b 1 1'),
self.null_prompt_tokens,
resampled_prompt_tokens
)
x = rearrange(x, 'b n d -> b d n')
x = self.wavenet(x, t)
x = rearrange(x, 'b d n -> b n d')
x = self.transformer(x, t, context = c)
return x
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, causal_conv = False):
dim_inner = int(dim * mult * 2 / 3)
conv = None
if causal_conv:
conv = nn.Sequential(
Rearrange('b n d -> b d n'),
CausalConv1d(dim_inner, dim_inner, 3),
Rearrange('b d n -> b n d'),
)
return Sequential(
nn.Linear(dim, dim_inner * 2),
GEGLU(),
conv,
nn.Linear(dim_inner, dim)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_context = None,
causal = False,
dim_head = 64,
heads = 8,
dropout = 0.,
use_flash = False,
cross_attn_include_queries = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.cross_attn_include_queries = cross_attn_include_queries
dim_inner = dim_head * heads
dim_context = default(dim_context, dim)
self.attend = Attend(causal = causal, dropout = dropout, use_flash = use_flash)
self.to_q = nn.Linear(dim, dim_inner, bias = False)
self.to_kv = nn.Linear(dim_context, dim_inner * 2, bias = False)
self.to_out = nn.Linear(dim_inner, dim, bias = False)
def forward(self, x, context = None, mask = None):
h, has_context = self.heads, exists(context)
context = default(context, x)
if has_context and self.cross_attn_include_queries:
context = torch.cat((x, context), dim = -2)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer encoder
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
causal = False,
dim_head = 64,
heads = 8,
use_flash = False,
dropout = 0.,
ff_mult = 4,
final_norm = False
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
RMSNorm(dim),
Attention(
dim,
causal = causal,
dim_head = dim_head,
heads = heads,
dropout = dropout,
use_flash = use_flash
),
RMSNorm(dim),
FeedForward(
dim,
mult = ff_mult
)
]))
self.norm = RMSNorm(dim) if final_norm else nn.Identity()
def forward(self, x, mask = None):
for attn_norm, attn, ff_norm, ff in self.layers:
x = attn(attn_norm(x), mask = mask) + x
x = ff(ff_norm(x)) + x
return self.norm(x)
# tensor helper functions
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def safe_div(numer, denom):
return numer / denom.clamp(min = 1e-10)
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
# noise schedules
def simple_linear_schedule(t, clip_min = 1e-9):
return (1 - t).clamp(min = clip_min)
def cosine_schedule(t, start = 0, end = 1, tau = 1, clip_min = 1e-9):
power = 2 * tau
v_start = math.cos(start * math.pi / 2) ** power
v_end = math.cos(end * math.pi / 2) ** power
output = math.cos((t * (end - start) + start) * math.pi / 2) ** power
output = (v_end - output) / (v_end - v_start)
return output.clamp(min = clip_min)
def sigmoid_schedule(t, start = -3, end = 3, tau = 1, clamp_min = 1e-9):
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
gamma = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
return gamma.clamp_(min = clamp_min, max = 1.)
# converting gamma to alpha, sigma or logsnr
def gamma_to_alpha_sigma(gamma, scale = 1):
return torch.sqrt(gamma) * scale, torch.sqrt(1 - gamma)
def gamma_to_log_snr(gamma, scale = 1, eps = 1e-5):
return log(gamma * (scale ** 2) / (1 - gamma), eps = eps)
# gaussian diffusion
class NaturalSpeech2(nn.Module):
@beartype
def __init__(
self,
model: Model,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
*,
tokenizer: Optional[Tokenizer] = None,
target_sample_hz = None,
timesteps = 1000,
use_ddim = True,
noise_schedule = 'sigmoid',
objective = 'v',
schedule_kwargs: dict = dict(),
time_difference = 0.,
min_snr_loss_weight = True,
min_snr_gamma = 5,
train_prob_self_cond = 0.9,
rvq_cross_entropy_loss_weight = 0., # default this to off until we are sure it is working. not totally sold that this is critical
dim_codebook: int = 128,
duration_pitch_dim: int = 512,
aligner_dim_in: int = 80,
aligner_dim_hidden: int = 512,
aligner_attn_channels: int = 80,
num_phoneme_tokens: int = 150,
pitch_emb_dim: int = 256,
pitch_emb_pp_hidden_dim: int= 512,
calc_pitch_with_pyworld = True, # pyworld or kaldi from torchaudio
mel_hop_length = 160,
audio_to_mel_kwargs: dict = dict(),
scale = 1., # this will be set to < 1. for better convergence when training on higher resolution images
duration_loss_weight = 1.,
pitch_loss_weight = 1.,
aligner_loss_weight = 1.
):
super().__init__()
self.conditional = model.condition_on_prompt
# model and codec
self.model = model
self.codec = codec
assert exists(codec) or exists(target_sample_hz)
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = None
if exists(codec):
self.target_sample_hz = codec.target_sample_hz
self.seq_len_multiple_of = codec.seq_len_multiple_of
# preparation for conditioning
if self.conditional:
if exists(self.target_sample_hz):
audio_to_mel_kwargs.update(sampling_rate = self.target_sample_hz)
self.mel_hop_length = mel_hop_length
self.audio_to_mel = AudioToMel(
n_mels = aligner_dim_in,
hop_length = mel_hop_length,
**audio_to_mel_kwargs
)
self.calc_pitch_with_pyworld = calc_pitch_with_pyworld
self.phoneme_enc = PhonemeEncoder(tokenizer=tokenizer, num_tokens=num_phoneme_tokens)
self.prompt_enc = SpeechPromptEncoder(dim_codebook=dim_codebook)
self.duration_pitch = DurationPitchPredictor(dim=duration_pitch_dim)
self.aligner = Aligner(dim_in=aligner_dim_in, dim_hidden=aligner_dim_hidden, attn_channels=aligner_attn_channels)
self.pitch_emb = nn.Embedding(pitch_emb_dim, pitch_emb_pp_hidden_dim)
self.aligner_loss = ForwardSumLoss()
# rest of ddpm
assert not exists(codec) or model.dim == codec.codebook_dim, f'transformer model dimension {model.dim} must be equal to codec dimension {codec.codebook_dim}'
self.dim = codec.codebook_dim if exists(codec) else model.dim
assert objective in {'x0', 'eps', 'v'}, 'objective must be either predict x0 or noise'
self.objective = objective
if noise_schedule == "linear":
self.gamma_schedule = simple_linear_schedule
elif noise_schedule == "cosine":
self.gamma_schedule = cosine_schedule
elif noise_schedule == "sigmoid":
self.gamma_schedule = sigmoid_schedule
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
# the main finding presented in Ting Chen's paper - that higher resolution images requires more noise for better training
assert scale <= 1, 'scale must be less than or equal to 1'
self.scale = scale
# gamma schedules
self.gamma_schedule = partial(self.gamma_schedule, **schedule_kwargs)
self.timesteps = timesteps
self.use_ddim = use_ddim
# proposed in the paper, summed to time_next
# as a way to fix a deficiency in self-conditioning and lower FID when the number of sampling timesteps is < 400
self.time_difference = time_difference
# probability for self conditioning during training
self.train_prob_self_cond = train_prob_self_cond
# min snr loss weight
self.min_snr_loss_weight = min_snr_loss_weight
self.min_snr_gamma = min_snr_gamma
# weight of the cross entropy loss to residual vq codebooks
self.rvq_cross_entropy_loss_weight = rvq_cross_entropy_loss_weight
# loss weight for duration and pitch
self.duration_loss_weight = duration_loss_weight
self.pitch_loss_weight = pitch_loss_weight
self.aligner_loss_weight = aligner_loss_weight
@property
def device(self):
return next(self.model.parameters()).device
def print(self, s):
return self.accelerator.print(s)
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
@torch.no_grad()
def ddpm_sample(self, shape, prompt = None, time_difference = None, cond_scale = 1., cond = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
audio = torch.randn(shape, device=device)
x_start = None
last_latents = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step', total = self.timesteps):
# add the time delay
time_next = (time_next - self.time_difference).clamp(min = 0.)
noise_cond = time
# get predicted x0
model_output = self.model.forward_with_cond_scale(audio, noise_cond, prompt = prompt, cond_scale = cond_scale, cond = cond)
# get log(snr)
gamma = self.gamma_schedule(time)
gamma_next = self.gamma_schedule(time_next)
gamma, gamma_next = map(partial(right_pad_dims_to, audio), (gamma, gamma_next))
# get alpha sigma of time and next time
alpha, sigma = gamma_to_alpha_sigma(gamma, self.scale)
alpha_next, sigma_next = gamma_to_alpha_sigma(gamma_next, self.scale)
# calculate x0 and noise
if self.objective == 'x0':
x_start = model_output
elif self.objective == 'eps':
x_start = safe_div(audio - sigma * model_output, alpha)
elif self.objective == 'v':
x_start = alpha * audio - sigma * model_output
# derive posterior mean and variance
log_snr, log_snr_next = map(gamma_to_log_snr, (gamma, gamma_next))
c = -expm1(log_snr - log_snr_next)
mean = alpha_next * (audio * (1 - c) / alpha + c * x_start)
variance = (sigma_next ** 2) * c
log_variance = log(variance)
# get noise
noise = torch.where(
rearrange(time_next > 0, 'b -> b 1 1 1'),
torch.randn_like(audio),
torch.zeros_like(audio)
)
audio = mean + (0.5 * log_variance).exp() * noise
return audio
@torch.no_grad()
def ddim_sample(self, shape, prompt = None, time_difference = None, cond_scale = 1., cond = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
audio = torch.randn(shape, device = device)
x_start = None
last_latents = None
for times, times_next in tqdm(time_pairs, desc = 'sampling loop time step'):
# get times and noise levels
gamma = self.gamma_schedule(times)
gamma_next = self.gamma_schedule(times_next)
padded_gamma, padded_gamma_next = map(partial(right_pad_dims_to, audio), (gamma, gamma_next))
alpha, sigma = gamma_to_alpha_sigma(padded_gamma, self.scale)
alpha_next, sigma_next = gamma_to_alpha_sigma(padded_gamma_next, self.scale)
# add the time delay
times_next = (times_next - time_difference).clamp(min = 0.)
# predict x0
model_output = self.model.forward_with_cond_scale(audio, times, prompt = prompt, cond_scale = cond_scale, cond = cond)
# calculate x0 and noise
if self.objective == 'x0':
x_start = model_output
elif self.objective == 'eps':
x_start = safe_div(audio - sigma * model_output, alpha)
elif self.objective == 'v':
x_start = alpha * audio - sigma * model_output
# get predicted noise
pred_noise = safe_div(audio - alpha * x_start, sigma)
# calculate x next
audio = x_start * alpha_next + pred_noise * sigma_next
return audio
def process_prompt(self, prompt = None):
if not exists(prompt):
return None
assert self.model.condition_on_prompt
is_raw_prompt = prompt.ndim == 2
assert not (is_raw_prompt and not exists(self.codec)), 'codec must be passed in if one were to train on raw prompt'
if is_raw_prompt:
with torch.no_grad():
self.codec.eval()
prompt, _, _ = self.codec(prompt, curtail_from_left = True, return_encoded = True)
return prompt
def expand_encodings(self, phoneme_enc, attn, pitch):
expanded_dur = einsum('k l m n, k j m -> k j n', attn, phoneme_enc)
pitch_emb = self.pitch_emb(rearrange(f0_to_coarse(pitch), 'b 1 t -> b t'))
pitch_emb = rearrange(pitch_emb, 'b t d -> b d t')
expanded_pitch = einsum('k l m n, k j m -> k j n', attn, pitch_emb)
expanded_encodings = expanded_dur + expanded_pitch
return expanded_encodings
@torch.no_grad()
def sample(
self,
*,
length,
prompt = None,
batch_size = 1,
cond_scale = 1.,
text = None,
text_lens = None,
):
sample_fn = self.ddpm_sample if not self.use_ddim else self.ddim_sample
prompt_enc = cond = None
if self.conditional:
assert exists(prompt) and exists(text)
prompt = self.process_prompt(prompt)
prompt_enc = self.prompt_enc(prompt)
phoneme_enc = self.phoneme_enc(text)
duration, pitch = self.duration_pitch(phoneme_enc, prompt_enc)
pitch = rearrange(pitch, 'b n -> b 1 n')
aln_mask = generate_mask_from_repeats(duration).float()
cond = self.expand_encodings(rearrange(phoneme_enc, 'b n d -> b d n'), rearrange(aln_mask, 'b n c -> b 1 n c'), pitch)
if exists(prompt):
batch_size = prompt.shape[0]
audio = sample_fn(
(batch_size, length, self.dim),
prompt = prompt_enc,
cond = cond,
cond_scale = cond_scale
)
if exists(self.codec):
audio = self.codec.decode(audio)
if audio.ndim == 3:
audio = rearrange(audio, 'b 1 n -> b n')
return audio
def forward(
self,
audio,
text = None,
text_lens = None,
mel = None,
mel_lens = None,
codes = None,
prompt = None,
pitch = None,
*args,
**kwargs
):
batch, is_raw_audio = audio.shape[0], audio.ndim == 2
# compute the prompt encoding and cond
prompt_enc = None
cond = None
duration_pitch_loss = 0.
if self.conditional:
batch = prompt.shape[0]
assert exists(text)
text_max_length = text.shape[-1]
if not exists(text_lens):
text_lens = torch.full((batch,), text_max_length, device = self.device, dtype = torch.long)
text_lens.clamp_(max = text_max_length)
text_mask = rearrange(create_mask(text_lens, text_max_length), 'b n -> b 1 n')
prompt = self.process_prompt(prompt)
prompt_enc = self.prompt_enc(prompt)
phoneme_enc = self.phoneme_enc(text)
# process pitch with kaldi
if not exists(pitch):
assert exists(audio) and audio.ndim == 2
assert exists(self.target_sample_hz)
if self.calc_pitch_with_pyworld:
pitch = compute_pitch_pyworld(
audio,
sample_rate = self.target_sample_hz,
hop_length = self.mel_hop_length
)
else:
pitch = compute_pitch_pytorch(audio, self.target_sample_hz)
pitch = rearrange(pitch, 'b n -> b 1 n')
# process mel
if not exists(mel):
assert exists(audio) and audio.ndim == 2
mel = self.audio_to_mel(audio)
if exists(pitch):
mel = mel[..., :pitch.shape[-1]]
mel_max_length = mel.shape[-1]
if not exists(mel_lens):
mel_lens = torch.full((batch,), mel_max_length, device = self.device, dtype = torch.long)
mel_lens.clamp_(max = mel_max_length)
mel_mask = rearrange(create_mask(mel_lens, mel_max_length), 'b n -> b 1 n')
# alignment
aln_hard, aln_soft, aln_log, aln_mas = self.aligner(phoneme_enc, text_mask, mel, mel_mask)
duration_pred, pitch_pred = self.duration_pitch(phoneme_enc, prompt_enc)
pitch = average_over_durations(pitch, aln_hard)
cond = self.expand_encodings(rearrange(phoneme_enc, 'b n d -> b d n'), rearrange(aln_mas, 'b n c -> b 1 n c'), pitch)
# pitch and duration loss
duration_loss = F.l1_loss(aln_hard, duration_pred)
pitch = rearrange(pitch, 'b 1 d -> b d')
pitch_loss = F.l1_loss(pitch, pitch_pred)
align_loss = self.aligner_loss(aln_log , text_lens, mel_lens)
# weigh the losses
aux_loss = (duration_loss * self.duration_loss_weight) \
+ (pitch_loss * self.pitch_loss_weight) \
+ (align_loss * self.aligner_loss_weight)
# automatically encode raw audio to residual vq with codec
assert not (is_raw_audio and not exists(self.codec)), 'codec must be passed in if one were to train on raw audio'
if is_raw_audio:
with torch.no_grad():
self.codec.eval()
audio, codes, _ = self.codec(audio, return_encoded = True)
# shapes and device
batch, n, d, device = *audio.shape, self.device
assert d == self.dim, f'codec codebook dimension {d} must match model dimensions {self.dim}'
# sample random times
times = torch.zeros((batch,), device = device).float().uniform_(0, 1.)
# noise sample
noise = torch.randn_like(audio)
gamma = self.gamma_schedule(times)
padded_gamma = right_pad_dims_to(audio, gamma)
alpha, sigma = gamma_to_alpha_sigma(padded_gamma, self.scale)
noised_audio = alpha * audio + sigma * noise
# predict and take gradient step
pred = self.model(noised_audio, times, prompt = prompt_enc, cond = cond)
if self.objective == 'eps':
target = noise
elif self.objective == 'x0':
target = audio
elif self.objective == 'v':
target = alpha * noise - sigma * audio
loss = F.mse_loss(pred, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b', 'mean')
# min snr loss weight
snr = (alpha * alpha) / (sigma * sigma)
maybe_clipped_snr = snr.clone()
if self.min_snr_loss_weight:
maybe_clipped_snr.clamp_(max = self.min_snr_gamma)
if self.objective == 'eps':
loss_weight = maybe_clipped_snr / snr
elif self.objective == 'x0':
loss_weight = maybe_clipped_snr
elif self.objective == 'v':
loss_weight = maybe_clipped_snr / (snr + 1)
loss = (loss * loss_weight).mean()
# cross entropy loss to codebooks
if self.rvq_cross_entropy_loss_weight == 0 or not exists(codes):
return loss
if self.objective == 'x0':
x_start = pred
elif self.objective == 'eps':
x_start = safe_div(audio - sigma * pred, alpha)
elif self.objective == 'v':
x_start = alpha * audio - sigma * pred
_, ce_loss = self.codec.rq(x_start, codes)
return loss + (self.rvq_cross_entropy_loss_weight * ce_loss) + duration_pitch_loss
# trainer
def cycle(dl):
while True:
for data in dl:
yield data
class Trainer(object):
def __init__(
self,
diffusion_model: NaturalSpeech2,
*,
dataset: Optional[Dataset] = None,
folder = None,
train_batch_size = 16,
gradient_accumulate_every = 1,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 1,
results_folder = './results',
amp = False,
mixed_precision_type = 'fp16',
use_ema = True,
split_batches = True,
dataloader = None,
data_max_length = None,
data_max_length_seconds = 2,
sample_length = None
):
super().__init__()
# accelerator
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = mixed_precision_type if amp else 'no'
)
# model
self.model = diffusion_model
assert exists(diffusion_model.codec)
self.dim = diffusion_model.dim
# training hyperparameters
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
# dataset and dataloader
dl = dataloader
if not exists(dl):
assert exists(dataset) or exists(folder)
if exists(dataset):
self.ds = dataset
elif exists(folder):
# create dataset
if exists(data_max_length_seconds):
assert not exists(data_max_length)
data_max_length = int(data_max_length_seconds * diffusion_model.target_sample_hz)
else:
assert exists(data_max_length)
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = diffusion_model.target_sample_hz,
seq_len_multiple_of = diffusion_model.seq_len_multiple_of
)
dl = DataLoader(
self.ds,
batch_size = train_batch_size,
shuffle = True,
pin_memory = True,
num_workers = cpu_count()
)
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
self.use_ema = use_ema
self.ema = None
if self.accelerator.is_main_process and use_ema:
# make sure codec is not part of the EMA
# encodec seems to be not deepcopyable, so this is a necessary hack
codec = diffusion_model.codec
diffusion_model.codec = None
self.ema = EMA(
diffusion_model,
beta = ema_decay,
update_every = ema_update_every,
ignore_startswith_names = set(['codec.'])
).to(self.device)
diffusion_model.codec = codec
self.ema.ema_model.codec = codec
# sampling hyperparameters
self.sample_length = default(sample_length, data_max_length)
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
# results folder
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def print(self, msg):
return self.accelerator.print(msg)
@property
def unwrapped_model(self):
return self.accelerator.unwrap_model(self.model)
@property
def device(self):
return self.accelerator.device
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None,
'version': __version__
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
if self.accelerator.is_main_process:
self.ema.load_state_dict(data["ema"])
if 'version' in data:
print(f"loading from version {data['version']}")
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
self.step += 1
if accelerator.is_main_process:
self.ema.update()
if divisible_by(self.step, self.save_and_sample_every):
milestone = self.step // self.save_and_sample_every
models = [(self.unwrapped_model, str(self.step))]
if self.use_ema:
models.append((self.ema.ema_model, f'{self.step}.ema'))
for model, label in models:
model.eval()
with torch.no_grad():
generated = model.sample(
batch_size = self.num_samples,
length = self.sample_length
)
for ind, t in enumerate(generated):
filename = str(self.results_folder / f'sample_{label}.flac')
t = rearrange(t, 'n -> 1 n')
torchaudio.save(filename, t.cpu().detach(), self.unwrapped_model.target_sample_hz)
self.print(f'{self.step}: saving to {str(self.results_folder)}')
self.save(milestone)
pbar.update(1)
self.print('training complete')
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/naturalspeech2_pytorch.py |
naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/__init__.py |
|
import re
from pathlib import Path
from naturalspeech2_pytorch.utils.expand.abbreviations import AbbreviationExpander
from naturalspeech2_pytorch.utils.expand.number_norm import NumberNormalizer
from naturalspeech2_pytorch.utils.expand.time_norm import TimeExpander
CURRENT_DIR = Path(__file__).resolve().parent
class TextProcessor:
def __init__(self, lang="en"):
self.lang = lang
self._whitespace_re = re.compile(r"\s+")
# Example usage
self.ab_expander = AbbreviationExpander(str(CURRENT_DIR / 'expand/abbreviations.csv'))
self.time_expander = TimeExpander()
self.num_normalizer = NumberNormalizer()
# Add currency conversion rates
symbol = '$'
conversion_rates ={0.01: "cent", 0.02: "cents", 1: "dollar", 2: "dollars" }
self.num_normalizer.add_currency(symbol, conversion_rates)
def lowercase(self, text):
return text.lower()
def collapse_whitespace(self, text):
return re.sub(self._whitespace_re, " ", text).strip()
def remove_aux_symbols(self, text):
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
return text
def phoneme_cleaners(self, text, language = 'en'):
text = self.time_expander.expand_time(text, language=language)
text = self.num_normalizer.normalize_numbers(text, language=language)
text = self.ab_expander.replace_text_abbreviations(text, language=language)
text = self.remove_aux_symbols(text)
text = self.collapse_whitespace(text)
return text
if __name__ == "__main__":
# Create an instance for English
text_processor_en = TextProcessor(lang="en")
# Process English text
english_text = "Hello, Mr. Example, this is 9:30 am and my number is 30."
processed_english_text = text_processor_en.phoneme_cleaners(english_text, language='en')
print(processed_english_text)
# Create an instance for Spanish
text_processor_es = TextProcessor(lang="es")
# Process Spanish text
spanish_text = "Hola, Sr. Ejemplo, son las 9:30 am y mi número es el 30."
processed_spanish_text = text_processor_es.phoneme_cleaners(spanish_text, language='es')
print(processed_spanish_text)
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/cleaner.py |
import torch
from torch import Tensor
from typing import Callable, List, Optional, Tuple
from torch.nn.utils.rnn import pad_sequence
from naturalspeech2_pytorch.utils.cleaner import TextProcessor
from naturalspeech2_pytorch.utils.phonemizers.espeak_wrapper import ESpeak
# default phoneme set
_vowels = "iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻ"
_non_pulmonic_consonants = "ʘɓǀɗǃʄǂɠǁʛ"
_pulmonic_consonants = "pbtdʈɖcɟkɡqɢʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟ"
_suprasegmentals = "'̃ˈˌːˑ. ,-"
_other_symbols = "ʍwɥʜʢʡɕʑɺɧʲ"
_diacrilics = "ɚ˞ɫ"
_phonemes = _vowels + _non_pulmonic_consonants + _pulmonic_consonants + _suprasegmentals + _other_symbols + _diacrilics
# default map
LANGUAGE_MAP = {
'en-us': 'en',
'fr-fr': 'es',
'hi': 'hi'
}
# functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# main class
class Tokenizer:
def __init__(
self,
vocab = _phonemes,
text_cleaner: Optional[Callable] = None,
phonemizer: Optional[Callable] = None,
default_lang = "en-us",
add_blank: bool = False,
use_eos_bos = False,
pad_id = -1
):
self.text_cleaner = default(text_cleaner, TextProcessor().phoneme_cleaners)
self.add_blank = add_blank
self.use_eos_bos = use_eos_bos
self.pad_id = pad_id
self.vocab = vocab
self.vocab_size = len(vocab)
self.char_to_id = {char: idx for idx, char in enumerate(self.vocab)}
self.id_to_char = {idx: char for idx, char in enumerate(self.vocab)}
self.phonemizer = phonemizer
if not exists(self.phonemizer):
self.phonemizer = ESpeak(language = default_lang)
self.language = self.phonemizer.language
self.not_found_characters = []
@property
def espeak_language(self):
return LANGUAGE_MAP.get(self.language, None)
def encode(self, text: str) -> List[int]:
"""Encodes a string of text as a sequence of IDs."""
token_ids = []
for char in text:
try:
idx = self.char_to_id[char]
token_ids.append(idx)
except KeyError:
# discard but store not found characters
if char not in self.not_found_characters:
self.not_found_characters.append(char)
print(text)
print(f" [!] Character {repr(char)} not found in the vocabulary. Discarding it.")
return token_ids
def decode(self, token_ids: List[int]) -> str:
"""Decodes a sequence of IDs to a string of text."""
text = ""
for token_id in token_ids:
text += self.id_to_char[token_id]
return text
def text_to_ids(
self,
text: str,
language: str = None
) -> Tuple[List[int], str, str]:
"""Converts a string of text to a sequence of token IDs.
Args:
text(str):
The text to convert to token IDs.
language(str):
The language code of the text. Defaults to None.
TODO:
- Add support for language-specific processing.
1. Text normalizatin
2. Phonemization (if use_phonemes is True)
3. Add blank char between characters
4. Add BOS and EOS characters
5. Text to token IDs
"""
language = default(language, self.espeak_language)
cleaned_text = None
if self.text_cleaner is not None:
text = self.text_cleaner(text, language=language)
cleaned_text = text
phonemized = self.phonemizer.phonemize(text, separator="", language=language)
if self.add_blank:
phonemized = self.intersperse_blank_char(phonemized, True)
if self.use_eos_bos:
phonemized = self.pad_with_bos_eos(phonemized)
return self.encode(phonemized), cleaned_text, phonemized
def texts_to_tensor_ids(self, texts: List[str], language: str = None) -> Tensor:
all_ids = []
for text in texts:
ids, *_ = self.text_to_ids(text, language = language)
all_ids.append(torch.tensor(ids))
return pad_sequence(all_ids, batch_first = True, padding_value = self.pad_id)
def ids_to_text(self, id_sequence: List[int]) -> str:
"""Converts a sequence of token IDs to a string of text."""
return self.decode(id_sequence)
def pad_with_bos_eos(self, char_sequence: List[str]):
"""Pads a sequence with the special BOS and EOS characters."""
return [self.characters.bos] + list(char_sequence) + [self.characters.eos]
def intersperse_blank_char(self, char_sequence: List[str], use_blank_char: bool = False):
"""Intersperses the blank character between characters in a sequence.
Use the ```blank``` character if defined else use the ```pad``` character.
"""
char_to_use = self.characters.blank if use_blank_char else self.characters.pad
result = [char_to_use] * (len(char_sequence) * 2 + 1)
result[1::2] = char_sequence
return result
if __name__ == "__main__":
txt_cleaner = TextProcessor()
tokenizer = Tokenizer(vocab = _phonemes, text_cleaner = txt_cleaner.phoneme_cleaners, phonemizer = ESpeak(language="en-us"))
print(tokenizer.text_to_ids("Hello, Mr. Example, this is 9:30 am and my number is 30.", language="en"))
tokenizer = Tokenizer(vocab = _phonemes, text_cleaner = txt_cleaner.phoneme_cleaners, phonemizer = ESpeak(language="fr-fr"))
print(tokenizer.text_to_ids("Hola, Sr. Ejemplo, son las 9:30 am y mi número es el 30.", language="es"))
tokenizer = Tokenizer(vocab = _phonemes, text_cleaner = txt_cleaner.phoneme_cleaners, phonemizer = ESpeak(language="hi"))
print(tokenizer.text_to_ids("हैलो, मिस्टर उदाहरण, यह सुबह 9:30 बजे है और मेरा नंबर 30 है।", language="hi"))
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/tokenizer.py |
import torch
from einops import repeat, rearrange
def average_over_durations(values, durs):
"""
- in:
- values: B, 1, T_de
- durs: B, T_en
- out:
- avg: B, 1, T_en
"""
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = torch.nn.functional.pad(durs_cums_ends[:, :-1], (1, 0))
values_nonzero_cums = torch.nn.functional.pad(torch.cumsum(values != 0.0, dim=2), (1, 0))
values_cums = torch.nn.functional.pad(torch.cumsum(values, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = values.size(1)
dcs = repeat(durs_cums_starts, 'bs l -> bs n l', n=n_formants)
dce = repeat(durs_cums_ends, 'bs l -> bs n l', n=n_formants)
values_sums = (torch.gather(values_cums, 2, dce) - torch.gather(values_cums, 2, dcs)).to(values.dtype)
values_nelems = (torch.gather(values_nonzero_cums, 2, dce) - torch.gather(values_nonzero_cums, 2, dcs)).to(values.dtype)
avg = torch.where(values_nelems == 0.0, values_nelems, values_sums / values_nelems).to(values.dtype)
return avg
def create_mask(sequence_length, max_len):
dtype, device = sequence_length.dtype, sequence_length.device
seq_range = torch.arange(max_len, dtype=dtype, device=device)
sequence_length = rearrange(sequence_length, 'b -> b 1')
seq_range = rearrange(seq_range, 't -> 1 t')
return seq_range < sequence_length
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/utils.py |
import csv
import re
class AbbreviationExpander:
def __init__(self, abbreviations_file):
self.abbreviations = {}
self.patterns = {}
self.load_abbreviations(abbreviations_file)
def load_abbreviations(self, abbreviations_file):
with open(abbreviations_file, 'r') as file:
reader = csv.DictReader(file)
for row in reader:
abbreviation = row['abbreviation']
expansion = row['expansion']
language = row['language'].lower()
self.abbreviations.setdefault(language, {})[abbreviation] = expansion
if language not in self.patterns:
self.patterns[language] = re.compile(
r"\b(" + "|".join(re.escape(key) for key in self.abbreviations[language].keys()) + r")\b",
re.IGNORECASE
)
def replace_abbreviations(self, match, language):
return self.abbreviations[language][match.group(0).lower()]
def replace_text_abbreviations(self, text, language):
if language.lower() in self.patterns:
return self.patterns[language.lower()].sub(
lambda match: self.replace_abbreviations(match, language.lower()),
text
)
else:
return text
if __name__ == "__main__":
# Example usage
expander = AbbreviationExpander('abbreviations.csv')
text_en = "Hello, Mr. Example. How are you today? I work at Intl. Corp."
replaced_text_en = expander.replace_text_abbreviations(text_en, 'en')
print(replaced_text_en)
text_fr = "Bonjour, Sr. Example. Comment ça va aujourd'hui? Je travaille chez Intl. Corp."
replaced_text_fr = expander.replace_text_abbreviations(text_fr, 'fr')
print(replaced_text_fr)
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/abbreviations.py |
naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/__init__.py |
|
import re
import inflect
from num2words import num2words
from num_to_words import num_to_word
class NumberNormalizer:
def __init__(self):
self._inflect = inflect.engine()
self._number_re = re.compile(r"-?[0-9]+")
self._currency_re = re.compile(r"([$€£¥₹])([0-9\,\.]*[0-9]+)")
self._currencies = {}
def add_currency(self, symbol, conversion_rates):
self._currencies[symbol] = conversion_rates
def normalize_numbers(self, text, language='en'):
self._inflect = inflect.engine()
self._set_language(language)
text = re.sub(self._currency_re, self._expand_currency, text)
text = re.sub(self._number_re, lambda match: self._expand_number(match, language), text)
return text
def _set_language(self, language):
if language == 'en':
self._inflect = inflect.engine()
else:
self._inflect = inflect.engine()
# Add support for additional languages here
def _expand_currency(self, match):
unit = match.group(1)
currency = self._currencies.get(unit)
if currency:
value = match.group(2)
return self._expand_currency_value(value, currency)
return match.group(0)
def _expand_currency_value(self, value, inflection):
parts = value.replace(",", "").split(".")
if len(parts) > 2:
return f"{value} {inflection[2]}" # Unexpected format
text = []
integer = int(parts[0]) if parts[0] else 0
if integer > 0:
integer_unit = inflection.get(integer, inflection[2])
text.append(f"{integer} {integer_unit}")
fraction = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if fraction > 0:
fraction_unit = inflection.get(fraction / 100, inflection[0.02])
text.append(f"{fraction} {fraction_unit}")
if not text:
return f"zero {inflection[2]}"
return " ".join(text)
def _expand_number(self, match, language: str) -> str:
num = int(match.group(0))
if 1000 < num < 3000:
if num == 2000:
return self._number_to_words(num, language)
if 2000 < num < 2010:
return f"{self._number_to_words(2000, language)} {self._number_to_words(num % 100, language)}"
if num % 100 == 0:
return f"{self._number_to_words(num // 100, language)} {self._get_word('hundred')}"
return self._number_to_words(num, language)
return self._number_to_words(num, language)
def _number_to_words(self, n: int, language: str) -> str:
try:
if language == 'en':
return self._inflect.number_to_words(n)
else:
return num2words(n, lang=language)
except:
try:
return num_to_word(n, lang=language)
except:
raise NotImplementedError("language not implemented")
def _get_word(self, word):
return word
if __name__ == "__main__":
# Create an instance of NumberNormalizer
normalizer = NumberNormalizer()
# Add currency conversion rates
symbol = '$'
conversion_rates ={
0.01: "cent",
0.02: "cents",
1: "dollar",
2: "dollars",
}
normalizer.add_currency(symbol, conversion_rates)
# Example 1: English (en) language
text_en = "I have $1,000 and 5 apples."
normalized_text_en = normalizer.normalize_numbers(text_en, language='en')
print(normalized_text_en)
# Output: "I have one thousand dollars and five apples."
# Example 2: Spanish (es) language
text_es = "Tengo $1.000 y 5 manzanas."
normalized_text_es = normalizer.normalize_numbers(text_es, language='es')
print(normalized_text_es)
# Output: "Tengo mil dólares y cinco manzanas."
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/number_norm.py |
import re
import inflect
from num2words import num2words
from num_to_words import num_to_word
class TimeExpander:
def __init__(self):
self._inflect = inflect.engine()
self._time_re = self._get_time_regex()
def _get_time_regex(self):
return re.compile(
r"""\b
((0?[0-9])|(1[0-1])|(1[2-9])|(2[0-3])) # hours
:
([0-5][0-9]) # minutes
\s*(a\\.m\\.|am|pm|p\\.m\\.|a\\.m|p\\.m)? # am/pm
\b""",
re.IGNORECASE | re.X,
)
def _expand_num(self, n: int, language: str) -> str:
try:
if language == 'en':
return self._inflect.number_to_words(n)
else:
return num2words(n, lang=language)
except:
try:
return num_to_word(n, lang=language)
except:
raise NotImplementedError("language not implemented")
def _expand_time(self, match: "re.Match", language: str) -> str:
hour = int(match.group(1))
past_noon = hour >= 12
time = []
if hour > 12:
hour -= 12
elif hour == 0:
hour = 12
past_noon = True
time.append(self._expand_num(hour, language))
minute = int(match.group(6))
if minute > 0:
if minute < 10:
time.append("oh")
time.append(self._expand_num(minute, language))
am_pm = match.group(7)
if am_pm is not None:
time.extend(list(am_pm.replace(".", "")))
return " ".join(time)
def expand_time(self, text: str, language: str) -> str:
return re.sub(self._time_re, lambda match: self._expand_time(match, language), text)
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/expand/time_norm.py |
""" from https://github.com/coqui-ai/TTS/"""
import logging
import re
import subprocess
from typing import Dict, List
from packaging.version import Version
from naturalspeech2_pytorch.utils.phonemizers.base import BasePhonemizer
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
def is_tool(name):
from shutil import which
return which(name) is not None
# Use a regex pattern to match the espeak version, because it may be
# symlinked to espeak-ng, which moves the version bits to another spot.
espeak_version_pattern = re.compile(r"text-to-speech:\s(?P<version>\d+\.\d+(\.\d+)?)")
def get_espeak_version():
output = subprocess.getoutput("espeak --version")
match = espeak_version_pattern.search(output)
return match.group("version")
def get_espeakng_version():
output = subprocess.getoutput("espeak-ng --version")
return output.split()[3]
# priority: espeakng > espeak
if is_tool("espeak-ng"):
_DEF_ESPEAK_LIB = "espeak-ng"
_DEF_ESPEAK_VER = get_espeakng_version()
elif is_tool("espeak"):
_DEF_ESPEAK_LIB = "espeak"
_DEF_ESPEAK_VER = get_espeak_version()
else:
_DEF_ESPEAK_LIB = None
_DEF_ESPEAK_VER = None
def _espeak_exe(espeak_lib: str, args: List, sync=False) -> List[str]:
"""Run espeak with the given arguments."""
cmd = [
espeak_lib,
"-q",
"-b",
"1", # UTF8 text encoding
]
cmd.extend(args)
logging.debug("espeakng: executing %s", repr(cmd))
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
res = iter(p.stdout.readline, b"")
if not sync:
p.stdout.close()
if p.stderr:
p.stderr.close()
if p.stdin:
p.stdin.close()
return res
res2 = []
for line in res:
res2.append(line)
p.stdout.close()
if p.stderr:
p.stderr.close()
if p.stdin:
p.stdin.close()
p.wait()
return res2
class ESpeak(BasePhonemizer):
"""ESpeak wrapper calling `espeak` or `espeak-ng` from the command-line the perform G2P
Args:
language (str):
Valid language code for the used backend.
backend (str):
Name of the backend library to use. `espeak` or `espeak-ng`. If None, set automatically
prefering `espeak-ng` over `espeak`. Defaults to None.
punctuations (str):
Characters to be treated as punctuation. Defaults to Punctuation.default_puncs().
keep_puncs (bool):
If True, keep the punctuations after phonemization. Defaults to True.
Example:
>>> phonemizer = ESpeak("tr")
>>> phonemizer.phonemize("Bu Türkçe, bir örnektir.", separator="|")
'b|ʊ t|ˈø|r|k|tʃ|ɛ, b|ɪ|r œ|r|n|ˈɛ|c|t|ɪ|r.'
"""
_ESPEAK_LIB = _DEF_ESPEAK_LIB
_ESPEAK_VER = _DEF_ESPEAK_VER
def __init__(self, language: str, backend=None, punctuations=Punctuation.default_puncs(), keep_puncs=True):
if self._ESPEAK_LIB is None:
raise Exception(" [!] No espeak backend found. Install espeak-ng or espeak to your system.")
self.backend = self._ESPEAK_LIB
# band-aid for backwards compatibility
if language == "en":
language = "en-us"
if language == "zh-cn":
language = "cmn"
super().__init__(language, punctuations=punctuations, keep_puncs=keep_puncs)
if backend is not None:
self.backend = backend
@property
def backend(self):
return self._ESPEAK_LIB
@property
def backend_version(self):
return self._ESPEAK_VER
@backend.setter
def backend(self, backend):
if backend not in ["espeak", "espeak-ng"]:
raise Exception("Unknown backend: %s" % backend)
self._ESPEAK_LIB = backend
self._ESPEAK_VER = get_espeakng_version() if backend == "espeak-ng" else get_espeak_version()
def auto_set_espeak_lib(self) -> None:
if is_tool("espeak-ng"):
self._ESPEAK_LIB = "espeak-ng"
self._ESPEAK_VER = get_espeakng_version()
elif is_tool("espeak"):
self._ESPEAK_LIB = "espeak"
self._ESPEAK_VER = get_espeak_version()
else:
raise Exception("Cannot set backend automatically. espeak-ng or espeak not found")
@staticmethod
def name():
return "espeak"
def phonemize_espeak(self, text: str, separator: str = "|", tie=False) -> str:
"""Convert input text to phonemes.
Args:
text (str):
Text to be converted to phonemes.
tie (bool, optional) : When True use a '͡' character between
consecutive characters of a single phoneme. Else separate phoneme
with '_'. This option requires espeak>=1.49. Default to False.
"""
# set arguments
args = ["-v", f"{self._language}"]
# espeak and espeak-ng parses `ipa` differently
if tie:
# use '͡' between phonemes
if self.backend == "espeak":
args.append("--ipa=1")
else:
args.append("--ipa=3")
else:
# split with '_'
if self.backend == "espeak":
if Version(self.backend_version) >= Version("1.48.15"):
args.append("--ipa=1")
else:
args.append("--ipa=3")
else:
args.append("--ipa=1")
if tie:
args.append("--tie=%s" % tie)
args.append('"' + text + '"')
# compute phonemes
phonemes = ""
for line in _espeak_exe(self._ESPEAK_LIB, args, sync=True):
logging.debug("line: %s", repr(line))
ph_decoded = line.decode("utf8").strip()
# espeak need to skip first two characters of the retuned text:
# version 1.48.03: "_ p_ɹ_ˈaɪ_ɚ t_ə n_oʊ_v_ˈɛ_m_b_ɚ t_w_ˈɛ_n_t_i t_ˈuː\n"
# version 1.48.15: " p_ɹ_ˈaɪ_ɚ t_ə n_oʊ_v_ˈɛ_m_b_ɚ t_w_ˈɛ_n_t_i t_ˈuː\n"
# espeak-ng need to skip the first character of the retuned text:
# "_p_ɹ_ˈaɪ_ɚ t_ə n_oʊ_v_ˈɛ_m_b_ɚ t_w_ˈɛ_n_t_i t_ˈuː\n"
# dealing with the conditions descrived above
ph_decoded = ph_decoded[:1].replace("_", "") + ph_decoded[1:]
# espeak-ng backend can add language flags that need to be removed:
# "sɛʁtˈɛ̃ mˈo kɔm (en)fˈʊtbɔːl(fr) ʒenˈɛʁ de- flˈaɡ də- lˈɑ̃ɡ."
# phonemize needs to remove the language flags of the returned text:
# "sɛʁtˈɛ̃ mˈo kɔm fˈʊtbɔːl ʒenˈɛʁ de- flˈaɡ də- lˈɑ̃ɡ."
ph_decoded = re.sub(r"\(.+?\)", "", ph_decoded)
phonemes += ph_decoded.strip()
return phonemes.replace("_", separator)
def _phonemize(self, text, separator=None):
return self.phonemize_espeak(text, separator, tie=False)
@staticmethod
def supported_languages() -> Dict:
"""Get a dictionary of supported languages.
Returns:
Dict: Dictionary of language codes.
"""
if _DEF_ESPEAK_LIB is None:
return {}
args = ["--voices"]
langs = {}
count = 0
for line in _espeak_exe(_DEF_ESPEAK_LIB, args, sync=True):
line = line.decode("utf8").strip()
if count > 0:
cols = line.split()
lang_code = cols[1]
lang_name = cols[3]
langs[lang_code] = lang_name
logging.debug("line: %s", repr(line))
count += 1
return langs
def version(self) -> str:
"""Get the version of the used backend.
Returns:
str: Version of the used backend.
"""
args = ["--version"]
for line in _espeak_exe(self.backend, args, sync=True):
version = line.decode("utf8").strip().split()[2]
logging.debug("line: %s", repr(line))
return version
@classmethod
def is_available(cls):
"""Return true if ESpeak is available else false"""
return is_tool("espeak") or is_tool("espeak-ng")
if __name__ == "__main__":
e = ESpeak(language="en-us")
print(e.supported_languages())
print(e.version())
print(e.language)
print(e.name())
print(e.is_available())
e = ESpeak(language="en-us", keep_puncs=False)
print("`" + e.phonemize("hello how are you today?") + "`")
e = ESpeak(language="en-us", keep_puncs=True)
print("`" + e.phonemize("hello how are you today?") + "`")
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/espeak_wrapper.py |
""" from https://github.com/coqui-ai/TTS/"""
import collections
import re
from enum import Enum
import six
_DEF_PUNCS = ';:,.!?¡¿—…"«»“”'
_PUNC_IDX = collections.namedtuple("_punc_index", ["punc", "position"])
class PuncPosition(Enum):
"""Enum for the punctuations positions"""
BEGIN = 0
END = 1
MIDDLE = 2
ALONE = 3
class Punctuation:
"""Handle punctuations in text.
Just strip punctuations from text or strip and restore them later.
Args:
puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`.
Example:
>>> punc = Punctuation()
>>> punc.strip("This is. example !")
'This is example'
>>> text_striped, punc_map = punc.strip_to_restore("This is. example !")
>>> ' '.join(text_striped)
'This is example'
>>> text_restored = punc.restore(text_striped, punc_map)
>>> text_restored[0]
'This is. example !'
"""
def __init__(self, puncs: str = _DEF_PUNCS):
self.puncs = puncs
@staticmethod
def default_puncs():
"""Return default set of punctuations."""
return _DEF_PUNCS
@property
def puncs(self):
return self._puncs
@puncs.setter
def puncs(self, value):
if not isinstance(value, six.string_types):
raise ValueError("[!] Punctuations must be of type str.")
self._puncs = "".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder
self.puncs_regular_exp = re.compile(rf"(\s*[{re.escape(self._puncs)}]+\s*)+")
def strip(self, text):
"""Remove all the punctuations by replacing with `space`.
Args:
text (str): The text to be processed.
Example::
"This is. example !" -> "This is example "
"""
return re.sub(self.puncs_regular_exp, " ", text).rstrip().lstrip()
def strip_to_restore(self, text):
"""Remove punctuations from text to restore them later.
Args:
text (str): The text to be processed.
Examples ::
"This is. example !" -> [["This is", "example"], [".", "!"]]
"""
text, puncs = self._strip_to_restore(text)
return text, puncs
def _strip_to_restore(self, text):
"""Auxiliary method for Punctuation.preserve()"""
matches = list(re.finditer(self.puncs_regular_exp, text))
if not matches:
return [text], []
# the text is only punctuations
if len(matches) == 1 and matches[0].group() == text:
return [], [_PUNC_IDX(text, PuncPosition.ALONE)]
# build a punctuation map to be used later to restore punctuations
puncs = []
for match in matches:
position = PuncPosition.MIDDLE
if match == matches[0] and text.startswith(match.group()):
position = PuncPosition.BEGIN
elif match == matches[-1] and text.endswith(match.group()):
position = PuncPosition.END
puncs.append(_PUNC_IDX(match.group(), position))
# convert str text to a List[str], each item is separated by a punctuation
splitted_text = []
for idx, punc in enumerate(puncs):
split = text.split(punc.punc)
prefix, suffix = split[0], punc.punc.join(split[1:])
splitted_text.append(prefix)
# if the text does not end with a punctuation, add it to the last item
if idx == len(puncs) - 1 and len(suffix) > 0:
splitted_text.append(suffix)
text = suffix
return splitted_text, puncs
@classmethod
def restore(cls, text, puncs):
"""Restore punctuation in a text.
Args:
text (str): The text to be processed.
puncs (List[str]): The list of punctuations map to be used for restoring.
Examples ::
['This is', 'example'], ['.', '!'] -> "This is. example!"
"""
return cls._restore(text, puncs, 0)
@classmethod
def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements
"""Auxiliary method for Punctuation.restore()"""
if not puncs:
return text
# nothing have been phonemized, returns the puncs alone
if not text:
return ["".join(m.punc for m in puncs)]
current = puncs[0]
if current.position == PuncPosition.BEGIN:
return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)
if current.position == PuncPosition.END:
return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)
if current.position == PuncPosition.ALONE:
return [current.mark] + cls._restore(text, puncs[1:], num + 1)
# POSITION == MIDDLE
if len(text) == 1: # pragma: nocover
# a corner case where the final part of an intermediate
# mark (I) has not been phonemized
return cls._restore([text[0] + current.punc], puncs[1:], num)
return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)
# if __name__ == "__main__":
# punc = Punctuation()
# text = "This is. This is, example!"
# print(punc.strip(text))
# split_text, puncs = punc.strip_to_restore(text)
# print(split_text, " ---- ", puncs)
# restored_text = punc.restore(split_text, puncs)
# print(restored_text)
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/punctuation.py |
naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/__init__.py |
|
""" from https://github.com/coqui-ai/TTS/"""
import abc
from typing import List, Tuple
from naturalspeech2_pytorch.utils.phonemizers.punctuation import Punctuation
class BasePhonemizer(abc.ABC):
"""Base phonemizer class
Phonemization follows the following steps:
1. Preprocessing:
- remove empty lines
- remove punctuation
- keep track of punctuation marks
2. Phonemization:
- convert text to phonemes
3. Postprocessing:
- join phonemes
- restore punctuation marks
Args:
language (str):
Language used by the phonemizer.
punctuations (List[str]):
List of punctuation marks to be preserved.
keep_puncs (bool):
Whether to preserve punctuation marks or not.
"""
def __init__(self, language, punctuations=Punctuation.default_puncs(), keep_puncs=False):
# ensure the backend is installed on the system
if not self.is_available():
raise RuntimeError("{} not installed on your system".format(self.name())) # pragma: nocover
# ensure the backend support the requested language
self._language = self._init_language(language)
# setup punctuation processing
self._keep_puncs = keep_puncs
self._punctuator = Punctuation(punctuations)
def _init_language(self, language):
"""Language initialization
This method may be overloaded in child classes (see Segments backend)
"""
if not self.is_supported_language(language):
raise RuntimeError(f'language "{language}" is not supported by the ' f"{self.name()} backend")
return language
@property
def language(self):
"""The language code configured to be used for phonemization"""
return self._language
@staticmethod
@abc.abstractmethod
def name():
"""The name of the backend"""
...
@classmethod
@abc.abstractmethod
def is_available(cls):
"""Returns True if the backend is installed, False otherwise"""
...
@classmethod
@abc.abstractmethod
def version(cls):
"""Return the backend version as a tuple (major, minor, patch)"""
...
@staticmethod
@abc.abstractmethod
def supported_languages():
"""Return a dict of language codes -> name supported by the backend"""
...
def is_supported_language(self, language):
"""Returns True if `language` is supported by the backend"""
return language in self.supported_languages()
@abc.abstractmethod
def _phonemize(self, text, separator):
"""The main phonemization method"""
def _phonemize_preprocess(self, text) -> Tuple[List[str], List]:
"""Preprocess the text before phonemization
1. remove spaces
2. remove punctuation
Override this if you need a different behaviour
"""
text = text.strip()
if self._keep_puncs:
# a tuple (text, punctuation marks)
return self._punctuator.strip_to_restore(text)
return [self._punctuator.strip(text)], []
def _phonemize_postprocess(self, phonemized, punctuations) -> str:
"""Postprocess the raw phonemized output
Override this if you need a different behaviour
"""
if self._keep_puncs:
return self._punctuator.restore(phonemized, punctuations)[0]
return phonemized[0]
def phonemize(self, text: str, separator="|", language: str = None) -> str: # pylint: disable=unused-argument
"""Returns the `text` phonemized for the given language
Args:
text (str):
Text to be phonemized.
separator (str):
string separator used between phonemes. Default to '_'.
Returns:
(str): Phonemized text
"""
text, punctuations = self._phonemize_preprocess(text)
phonemized = []
for t in text:
p = self._phonemize(t, separator)
phonemized.append(p)
phonemized = self._phonemize_postprocess(phonemized, punctuations)
return phonemized
def print_logs(self, level: int = 0):
indent = "\t" * level
print(f"{indent}| > phoneme language: {self.language}")
print(f"{indent}| > phoneme backend: {self.name()}")
| naturalspeech2-pytorch-main | naturalspeech2_pytorch/utils/phonemizers/base.py |
from setuptools import setup, find_packages
setup(
name = 'CoLT5-attention',
packages = find_packages(),
version = '0.10.15',
license='MIT',
description = 'Conditionally Routed Attention',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/CoLT5-attention',
keywords = [
'artificial intelligence',
'attention mechanism',
'dynamic routing'
],
install_requires=[
'einops>=0.6.1',
'local-attention>=1.8.6',
'packaging',
'torch>=1.10'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| CoLT5-attention-main | setup.py |
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn, einsum
from typing import Tuple, Optional
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
from colt5_attention.attend import Attend
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_to_multiple(tensor, multiple, dim=-1, value=0):
seq_len = tensor.shape[dim]
m = seq_len / multiple
if m.is_integer():
return tensor, seq_len
remainder = math.ceil(m) * multiple - seq_len
pad_offset = (0,) * (-1 - dim) * 2
padded_tensor = F.pad(tensor, (*pad_offset, 0, remainder), value = value)
return padded_tensor, seq_len
def batched_gather(x, indices):
batch_range = create_batch_range(indices, indices.ndim - 1)
return x[batch_range, indices]
def identity(t):
return t
def l2norm(t):
return F.normalize(t, dim = -1)
# tensor helpers
def create_batch_range(t, right_pad_dims = 1):
b, device = t.shape[0], t.device
batch_range = torch.arange(b, device = device)
pad_dims = ((1,) * right_pad_dims)
return batch_range.reshape(-1, *pad_dims)
# rotary positional embeddign
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
@property
def device(self):
return next(self.buffers()).device
def forward(self, seq_len):
t = torch.arange(seq_len, device = self.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return t * pos.cos() + rotate_half(t) * pos.sin()
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
normed = F.normalize(x, dim = -1)
return normed * self.scale * self.gamma
# modules
def FeedForward(dim, mult = 4):
dim_hidden = int(dim * mult)
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Linear(dim_hidden, dim)
)
class SelfAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
use_flash = False,
prenorm = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_hidden = dim_head * heads
self.norm = RMSNorm(dim) if prenorm else nn.Identity()
self.attend = Attend(use_flash = use_flash)
self.to_qkv = nn.Linear(dim, dim_hidden * 3, bias = False)
self.to_out = nn.Linear(dim_hidden, dim, bias = False)
def forward(self, x):
h = self.heads
x = self.norm(x)
# get queries, keys, values
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# attention
out = self.attend(q, k, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
multiply_keys_by_score = False,
use_flash = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
dim_hidden = dim_head * heads
self.multiply_keys_by_score = multiply_keys_by_score
self.norm = RMSNorm(dim)
self.null_kv = nn.Parameter(torch.randn(2, heads, dim_head))
self.attend = Attend(use_flash = use_flash)
self.to_q = nn.Linear(dim, dim_hidden, bias = False)
self.to_kv = nn.Linear(dim, dim_hidden * 2, bias = False)
self.to_out = nn.Linear(dim_hidden, dim, bias = False)
def forward(
self,
x,
context = None,
mask = None,
normalized_scores_kv = None,
normalized_scores_q = None,
rotary_emb: Optional[Tuple[Tensor, Tensor]] = None
):
"""
einops:
b - batch
h - heads, or number of heads per route
r - routing dimension, for routing different sets of key / values - should be more expressive
n - sequence dimension
d - head dimension
i - input model dimension
"""
batch, h = x.shape[0], self.heads
x = self.norm(x)
if exists(context):
context = self.norm(context)
context = default(context, x)
# if routing dimension is not there, unsqueeze for 1 routing dimension
if context.ndim == 3:
context = rearrange(context, 'b n d -> b 1 n d')
if exists(normalized_scores_kv) and isinstance(normalized_scores_kv, torch.Tensor):
if normalized_scores_kv.ndim == 2:
normalized_scores_kv = rearrange(normalized_scores_kv, 'b n -> b 1 n')
normalized_scores_kv = rearrange(normalized_scores_kv, 'b r n -> b r 1 n 1')
num_kv_routes = context.shape[1]
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if exists(normalized_scores_q) and isinstance(normalized_scores_q, torch.Tensor):
q = q * rearrange(normalized_scores_q, 'b n -> b 1 n 1')
# handle key / values, with the routing dimension, dividing the number of heads in between the routes
assert divisible_by(h, num_kv_routes), 'number of heads must be divisible by the number of key / value routes'
heads_per_route = h // num_kv_routes
kv_weight = rearrange(self.to_kv.weight, '(r h d) i -> r h d i', h = heads_per_route, r = num_kv_routes)
kv = einsum('r h d i, b r n i -> b r h n d', kv_weight, context)
k, v = kv.chunk(2, dim = -1)
if exists(normalized_scores_kv):
# in paper, not sure how they passed back the signal from heavy attention to normalized scores for key/values. just multiply the values by the normalized kv scores for now
v = v * normalized_scores_kv
if self.multiply_keys_by_score:
k = k * normalized_scores_kv
# apply rotary embeddings if needed
if exists(rotary_emb):
q_rotary_emb, k_rotary_emb = rotary_emb
q = apply_rotary_pos_emb(q_rotary_emb, q)
if k_rotary_emb.ndim == 4:
k_rotary_emb = repeat(k_rotary_emb, 'b 1 n d -> b r 1 n d', r = k.shape[1])
k = apply_rotary_pos_emb(k_rotary_emb, k)
# merge routing dimension with heads for key / values
k, v = map(lambda t: rearrange(t, 'b r h n d -> b (r h) n d'), (k, v))
# null key values
nk, nv = map(lambda t: repeat(t, 'h d -> b h 1 d', b = batch), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# masking
if exists(mask):
if mask.ndim == 3:
mask = repeat(mask, 'b r j -> b (r h) 1 j', h = heads_per_route)
else:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = F.pad(mask, (1, 0), value = True)
# attention
out = self.attend(q, k, v, mask = mask)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# routing related logic
from colt5_attention.coor_descent import coor_descent
RouterReturn = namedtuple('RouterReturn', ['indices', 'scores', 'routed_tokens', 'routed_mask'])
class CoordinateDescentRouter(nn.Module):
"""
from Wright et al. https://arxiv.org/abs/1502.04759
then adopted by https://arxiv.org/abs/2211.01267 for multi-vector document retrieval by Qian et al
finally, used successfully by this paper for routing to heavy branch attention / feedforward
"""
def __init__(
self,
dim,
straight_through = True,
n_iters = 20, # 20 iterations in a new paper, utilizing ε-scaling
fetch_k_ratio = 9 / 8, # in the paper, they do a bit slightly higher k (times this ratio) for better learning
eps = 0.03, # the epsilon for coordinate descent. in a recent paper, they used 0.03 for text and 1.0 for speech
eps_decay = 0.7,
eps_init = 4.,
num_routing_tokens = 1,
learned_routing_tokens = False,
use_triton = False,
cosine_sim_routing = False,
cosine_sim_scale = 8,
route_block_size = None,
triton_checkpoint_segments = None # whether to recompute the coordinate descent in segments, with 4 and 50 iterations, backwards is sped up 3x times at the expense of forwards and some memory for saving initial a and b
):
super().__init__()
assert fetch_k_ratio >= 1.
self.n_iters = n_iters
self.fetch_k_ratio = fetch_k_ratio
self.coor_descent = coor_descent
# epsilon related hparams, for ε-scaling
self.eps = eps
self.eps_decay = eps_decay
self.eps_init = eps_init
if use_triton:
from colt5_attention.triton_coor_descent import triton_coor_descent
triton_checkpoint_segments = default(triton_checkpoint_segments, n_iters // 5)
self.coor_descent = partial(triton_coor_descent, checkpoint_segments = triton_checkpoint_segments)
self.is_one_routing_token = num_routing_tokens == 1
self.num_routing_tokens = num_routing_tokens
self.route_block_size = route_block_size
self.routing_token = nn.Parameter(torch.randn(num_routing_tokens, dim)) if not learned_routing_tokens else None
self.straight_through = straight_through
# whether to use cosine sim for routing
self.cosine_sim_routing = cosine_sim_routing
self.cosine_sim_scale = cosine_sim_scale
def route_back(self, src, routed_tokens, indices):
batch_range = create_batch_range(routed_tokens)
src[batch_range, indices] = routed_tokens
return src
def forward(
self,
x,
*,
num_tokens,
mask = None,
random_route = False,
routing_tokens = None,
keep_one_route_dim = False # if only one route, whether to keepdim
):
n, device, eps, eps_init, eps_decay, num_routes, route_block_size = x.shape[-2], x.device, self.eps, self.eps_init, self.eps_decay, self.num_routing_tokens, self.route_block_size
# do not route if the sequence length is less than the number of tokens
has_route_dim = keep_one_route_dim or not self.is_one_routing_token
if n <= num_tokens:
b = x.shape[0]
r = self.num_routing_tokens
if has_route_dim:
scores_shape = (b, r, n)
x = repeat(x, 'b n d -> b r n d', r = r)
if exists(mask):
mask = repeat(mask, 'b n -> b r n', r = r)
else:
scores_shape = (b, n)
scores = torch.ones(scores_shape, device = device, dtype = x.dtype)
return RouterReturn(None, scores, x, mask)
# whether to route even amounts from blocks of the sequence
if exists(route_block_size):
num_blocks = n // route_block_size
prev_seq_mult = num_blocks * route_block_size
# just curtail to last multiple of route block size
x = x[:, :prev_seq_mult]
# group sequence into blocks to route
x = rearrange(x, 'b (n w) d -> (b n) w d', w = route_block_size)
if exists(mask):
mask = mask[:, :prev_seq_mult]
mask = rearrange(mask, 'b (n w) -> (b n) w', w = route_block_size)
n = route_block_size
num_tokens = math.ceil(num_tokens / num_blocks)
# s stands for eventual normalized score
maybe_l2norm = l2norm if self.cosine_sim_routing else identity
if exists(self.routing_token):
s = einsum('b n d, r d -> b r n', maybe_l2norm(x), maybe_l2norm(self.routing_token))
else:
assert exists(routing_tokens)
if routing_tokens.ndim == 2:
routing_tokens = rearrange(routing_tokens, 'b d -> b 1 d')
s = einsum('b n d, b r d -> b r n', maybe_l2norm(x), maybe_l2norm(routing_tokens))
if self.cosine_sim_routing:
s = s * self.cosine_sim_scale
# merge routing dimension into batch
x = repeat(x, 'b ... -> (b r) ...', r = num_routes)
s, ps = pack_one(s, '* n')
if exists(mask):
mask = repeat(mask, 'b ... -> (b r) ...', r = num_routes)
# k, which controls the sparsity of the outputted scores from iterative coordinate descent
effective_k = min(num_tokens * self.fetch_k_ratio, n)
# coordinate descent
scores = self.coor_descent(
s,
n_iters = self.n_iters,
mask = mask,
k = effective_k,
eps = eps,
eps_init = eps_init,
eps_decay = eps_decay
)
# force random routing, if negative control
if random_route:
scores = torch.randn_like(scores)
scores = scores.masked_fill(~mask, -torch.finfo(scores.dtype).max)
# get the topk scores and indices from the sparse matrix
selected_scores, selected_indices = scores.topk(num_tokens, dim = -1)
if self.straight_through:
# this would make sure all normalized scores returned are 1., but still differentiable using straight-through trick
selected_scores = selected_scores + (1. - selected_scores).detach()
if exists(mask):
selected_mask = batched_gather(mask, selected_indices)
selected_scores = selected_scores.masked_fill(~selected_mask, 0.)
# split out routing dimension again if need be
if has_route_dim:
selected_scores = unpack_one(selected_scores, ps, '* n')
selected_indices = unpack_one(selected_indices, ps, '* n')
# undo the windowing, if one were routing uniformly in blocks
if exists(route_block_size):
selected_scores = rearrange(selected_scores, '(b n) ... w -> b ... (n w)', n = num_blocks)
selected_indices = rearrange(selected_indices, '(b n) ... w -> b ... n w', n = num_blocks)
indices_offset = torch.arange(num_blocks, device = device) * route_block_size
selected_indices = selected_indices + rearrange(indices_offset, 'n -> n 1')
selected_indices = rearrange(selected_indices, 'b ... n w -> b ... (n w)')
# auto-gather the routed tokens and mask (if not None)
routed_tokens = batched_gather(x, selected_indices)
routed_mask = None
if exists(mask):
routed_mask = batched_gather(mask, selected_indices)
# return indices, scores, routed tokens and mask
return RouterReturn(selected_indices, selected_scores, routed_tokens, routed_mask)
# main classes
class ConditionalRoutedFeedForward(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens,
light_ff_mult = 0.5,
heavy_ff_mult = 4,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_kwargs: dict = {},
use_triton = False
):
super().__init__()
self.num_heavy_tokens = num_heavy_tokens
if use_triton:
router_kwargs = {**router_kwargs, 'use_triton': True}
self.router = CoordinateDescentRouter(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.light_ff = FeedForward(dim, light_ff_mult)
self.heavy_ff = FeedForward(dim, heavy_ff_mult)
def forward(
self,
x,
mask = None,
num_heavy_tokens = None
):
device, num_heavy_tokens = x.device, default(num_heavy_tokens, self.num_heavy_tokens)
# light feedforward sees all the tokens (hidden dimension is only 1/2 of model dimensions)
light_out = self.light_ff(x)
# route tokens appropriately for heavy branch
indices, normalized_scores, routed_tokens, _ = self.router(x, num_tokens = num_heavy_tokens, mask = mask)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_ff(routed_tokens) * rearrange(normalized_scores, '... -> ... 1')
# scatter back the output of the heavy feedforward branch
if exists(indices):
heavy_out = torch.zeros_like(x)
heavy_out = self.router.route_back(heavy_out, routed_tokens_out, indices)
else:
heavy_out = routed_tokens_out
# sum light and heavy branches
return light_out + heavy_out
class ConditionalRoutedAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False,
use_triton = False,
use_null_q_tokens = True,
use_flash_attn = False,
rotary_emb = False
):
super().__init__()
if use_triton:
router_kwargs = {**router_kwargs, 'use_triton': True}
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.light_attn = LocalMHA(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
window_size = light_window_size // 2,
prenorm = True,
causal = False,
use_rotary_pos_emb = False,
look_backward = 1,
look_forward = 1
)
self.null_q_token = None
if use_null_q_tokens:
self.null_q_token = nn.Parameter(torch.randn(dim)) # for the query tokens not selected by the router, give it a learned output embed
self.q_router = CoordinateDescentRouter(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = CoordinateDescentRouter(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score,
use_flash = use_flash_attn
)
# rotary embedding
self.rotary_emb = RotaryEmbedding(heavy_dim_head) if rotary_emb else None
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None,
mask = None
):
batch, seq, device = *x.shape[:2], x.device
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_out = self.light_attn(x, mask = mask)
# route tokens appropriately for heavy branch
indices_q, normalized_scores_q, routed_tokens_q, _ = self.q_router(x, num_tokens = num_heavy_tokens_q, mask = mask)
indices_kv, normalized_scores_kv, routed_tokens_kv, routed_tokens_kv_mask = self.kv_router(x, num_tokens = num_heavy_tokens_kv, mask = mask)
# get rotary embeddings if specified
rotary_emb = None
if exists(self.rotary_emb):
seq_rotary_emb = self.rotary_emb(seq)
q_rotary_emb = rearrange(seq_rotary_emb[indices_q], 'b n d -> b 1 n d') if exists(indices_q) else seq_rotary_emb
k_rotary_emb = rearrange(seq_rotary_emb[indices_kv], '... n d -> ... 1 n d') if exists(indices_kv) else seq_rotary_emb
rotary_emb = (q_rotary_emb, k_rotary_emb)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
rotary_emb = rotary_emb,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
if exists(indices_q):
if exists(self.null_q_token):
heavy_out = rearrange(self.null_q_token, 'd -> 1 1 d')
heavy_out = heavy_out.expand_as(x).clone()
else:
heavy_out = torch.zeros_like(x)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
else:
heavy_out = routed_tokens_out
# sum light and heavy branches
return light_out + heavy_out
# conditionally routed image feature map attention
class ConditionalRoutedImageAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False,
use_triton = False,
use_null_q_tokens = True,
use_flash_attn = False,
channel_first = False
):
super().__init__()
self.channel_first = channel_first
if use_triton:
router_kwargs = {**router_kwargs, 'use_triton': True}
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.light_window_size = light_window_size
self.light_attn = SelfAttention(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
prenorm = True
)
self.null_q_token = None
if use_null_q_tokens:
self.null_q_token = nn.Parameter(torch.randn(dim)) # for the query tokens not selected by the router, give it a learned output embed
self.q_router = CoordinateDescentRouter(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = CoordinateDescentRouter(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score,
use_flash = use_flash_attn
)
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None,
mask = None
):
assert x.ndim == 4
batch, device, channel_first, w = x.shape[0], x.device, self.channel_first, self.light_window_size
if channel_first:
x = rearrange(x, 'b d ... -> b ... d')
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_input = rearrange(x, 'b (h p1) (w p2) d -> b h w (p1 p2) d', p1 = w, p2 = w)
x, ps = pack_one(light_input, '* n d')
light_out = self.light_attn(x)
light_out = unpack_one(light_out, ps, '* n d')
light_out = rearrange(light_out, 'b h w (p1 p2) d -> b (h p1) (w p2) d', p1 = w, p2 = w)
# route tokens appropriately for heavy branch
indices_q, normalized_scores_q, routed_tokens_q, _ = self.q_router(x, num_tokens = num_heavy_tokens_q, mask = mask)
indices_kv, normalized_scores_kv, routed_tokens_kv, routed_tokens_kv_mask = self.kv_router(x, num_tokens = num_heavy_tokens_kv, mask = mask)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
if exists(self.null_q_token):
heavy_out = rearrange(self.null_q_token, 'd -> 1 1 d')
heavy_out = heavy_out.expand_as(x).clone()
else:
heavy_out = torch.zeros_like(x)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
heavy_out = unpack_one(heavy_out, ps, '* n d')
heavy_out = rearrange(heavy_out, 'b h w (p1 p2) d -> b (h p1) (w p2) d', p1 = w, p2 = w)
# sum light and heavy branches
out = light_out + heavy_out
if channel_first:
out = rearrange(out, 'b ... d -> b d ...')
return out
# improvised conditionally routed autoregressive attention
class ConditionalRoutedAutoregressiveAttention(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_tokens_q,
num_heavy_tokens_kv,
num_routed_kv = 1,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128, # each token would see ~ 64 tokens either way to left or right
heavy_window_size = None,
heavy_dim_head = 64,
heavy_heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False,
use_triton = False,
use_null_q_tokens = True,
use_flash_attn = False,
rotary_emb = False
):
super().__init__()
if use_triton:
router_kwargs = {**router_kwargs, 'use_triton': True}
self.num_heavy_tokens_q = num_heavy_tokens_q
self.num_heavy_tokens_kv = num_heavy_tokens_kv
self.multiply_queries_by_score = multiply_queries_by_score
self.heavy_window_size = default(heavy_window_size, light_window_size)
self.light_attn = LocalMHA(
dim = dim,
dim_head = light_dim_head,
heads = light_heads,
window_size = light_window_size,
prenorm = True,
causal = True,
exact_windowsize = False,
use_rotary_pos_emb = False
)
self.null_q_token = None
if use_null_q_tokens:
self.null_q_token = nn.Parameter(torch.randn(dim)) # for the query tokens not selected by the router, give it a learned output embed
self.q_router = CoordinateDescentRouter(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = CoordinateDescentRouter(
dim = dim,
num_routing_tokens = num_routed_kv,
straight_through = router_straight_through,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = heavy_dim_head,
heads = heavy_heads,
multiply_keys_by_score = multiply_keys_by_score,
use_flash = use_flash_attn
)
# rotary embedding
self.rotary_emb = RotaryEmbedding(heavy_dim_head) if rotary_emb else None
def forward(
self,
x,
*,
num_heavy_tokens_q = None,
num_heavy_tokens_kv = None,
random_route = False
):
batch, seq, device = *x.shape[:2], x.device
num_heavy_tokens_q = default(num_heavy_tokens_q, self.num_heavy_tokens_q)
num_heavy_tokens_kv = default(num_heavy_tokens_kv, self.num_heavy_tokens_kv)
# light local attention sees all tokens in a limited context
light_out = self.light_attn(x)
# pad sequence to multiple of the heavy window size
# routing will take place within each heavy window block size
window_size = self.heavy_window_size
x, seq_len = pad_to_multiple(x, window_size, dim = -2)
padded_seq_len = x.shape[-2]
# construct mask, and make sure not to attend to padding
q_mask = torch.ones((batch, seq_len), dtype = torch.bool, device = device)
q_mask = F.pad(q_mask, (0, padded_seq_len - seq_len), value = False)
# handy function
merge_to_batch = lambda t: rearrange(t, 'b n ... -> (b n) ...')
# block the sequence and mask into windows for the queries
q = rearrange(x, 'b (n w) d -> b n w d', w = window_size)
q_mask = rearrange(q_mask, 'b (n w) -> b n w', w = window_size)
q, q_mask = map(merge_to_batch, (q[:, 1:], q_mask[:, 1:]))
# each block of queries attend to sequences that are causally masked out appropriately
windows = padded_seq_len // window_size
kv = repeat(x, 'b n d -> b m n d', m = windows)
kv_mask = torch.ones((windows, windows), dtype = torch.bool, device = device).tril(-1)
kv_mask = repeat(kv_mask, 'm n -> b m (n w)', b = batch, w = window_size)
kv, kv_mask = map(merge_to_batch, (kv[:, 1:], kv_mask[:, 1:]))
# route tokens appropriately for heavy branch, if need be
should_route_q = q.shape[-2] > num_heavy_tokens_q
should_route_kv = kv.shape[-2] > num_heavy_tokens_kv
indices_q, normalized_scores_q, routed_tokens_q, _ = self.q_router(q, num_tokens = num_heavy_tokens_q, mask = q_mask, random_route = random_route)
indices_kv, normalized_scores_kv, routed_tokens_kv, routed_tokens_kv_mask = self.kv_router(kv, num_tokens = num_heavy_tokens_kv, mask = kv_mask, random_route = random_route)
# get rotary embeddings if specified
rotary_emb = None
if exists(self.rotary_emb):
seq_rotary_emb = self.rotary_emb(padded_seq_len)
windowed_rotary_emb = rearrange(seq_rotary_emb, '(n w) d -> n w d', w = window_size)
windowed_rotary_emb = windowed_rotary_emb[1:]
windowed_rotary_emb = repeat(windowed_rotary_emb, 'n w d -> (b n) w d', b = batch)
if exists(indices_q):
rotary_indices_q = repeat(indices_q, '... -> ... d', d = windowed_rotary_emb.shape[-1])
q_rotary_emb = windowed_rotary_emb.gather(1, rotary_indices_q)
else:
q_rotary_emb = windowed_rotary_emb
q_rotary_emb = rearrange(q_rotary_emb, 'b n d -> b 1 n d')
k_rotary_emb = rearrange(seq_rotary_emb[indices_kv], '... n d -> ... 1 n d') if exists(indices_kv) else seq_rotary_emb
rotary_emb = (q_rotary_emb, k_rotary_emb)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
rotary_emb = rotary_emb,
normalized_scores_kv = normalized_scores_kv,
normalized_scores_q = normalized_scores_q if self.multiply_queries_by_score else None
)
if exists(indices_q):
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# scatter back the output of the heavy branch
if exists(self.null_q_token):
heavy_out = rearrange(self.null_q_token, 'd -> 1 1 d')
heavy_out = heavy_out.expand_as(q).clone()
else:
heavy_out = torch.zeros_like(q)
heavy_out = self.q_router.route_back(heavy_out, routed_tokens_out, indices_q)
else:
heavy_out = routed_tokens_out
# un-window and slice out original sequence
heavy_out = rearrange(heavy_out, '(b n) w d -> b (n w) d', b = batch)
heavy_out = heavy_out[:, :(seq_len - window_size)]
heavy_out = F.pad(heavy_out, (0, 0, window_size, 0), value = 0.)
# sum light and heavy branches
return light_out + heavy_out
# adapting the conditional routed self attention to cross attention
class ConditionalRoutedCrossAttention(nn.Module):
def __init__(
self,
dim,
*,
num_tokens_q,
num_tokens_kv,
num_sets_kv = 1, # setting this greater than 1 would route multiple sets of key / values, each of size num_tokens_kv, using this many routing tokens
dim_head = 64,
heads = 8,
router_straight_through = True, # would make sure all normalized scores are 1., still differentiable
router_kwargs: dict = {},
kv_routing_tokens = 1,
multiply_keys_by_score = False,
use_triton = False,
use_null_q_tokens = True,
use_flash_attn = False,
route_block_size = None
):
super().__init__()
if use_triton:
router_kwargs = {**router_kwargs, 'use_triton': True}
self.num_tokens_q = num_tokens_q
self.num_tokens_kv = num_tokens_kv
self.null_q_token = None
if use_null_q_tokens:
self.null_q_token = nn.Parameter(torch.randn(dim)) # for the query tokens not selected by the router, give it a learned output embed
self.q_router = CoordinateDescentRouter(
dim = dim,
straight_through = router_straight_through,
**router_kwargs
)
self.kv_router = CoordinateDescentRouter(
dim = dim,
straight_through = router_straight_through,
num_routing_tokens = kv_routing_tokens,
route_block_size = route_block_size,
**router_kwargs
)
self.heavy_attn = Attention(
dim = dim,
dim_head = dim_head,
heads = heads,
multiply_keys_by_score = multiply_keys_by_score,
use_flash = use_flash_attn
)
def forward(
self,
x,
context,
*,
num_tokens_q = None,
num_tokens_kv = None,
mask = None,
context_mask = None
):
batch, device = x.shape[0], x.device
# route the queries
query_length = x.shape[-2]
num_tokens_q = default(num_tokens_q, self.num_tokens_q)
indices_q, normalized_scores_q, routed_tokens_q, _ = self.q_router(x, num_tokens = num_tokens_q, mask = mask)
# route the long contexts
key_value_length = context.shape[-2]
num_tokens_kv = default(num_tokens_kv, self.num_tokens_kv)
routed_tokens_kv = context
routed_tokens_kv_mask = context_mask
normalized_scores_kv = None
should_route_kv = key_value_length > num_tokens_kv
if should_route_kv:
indices_kv, normalized_scores_kv, routed_tokens_kv, routed_tokens_kv_mask = self.kv_router(context, num_tokens = num_tokens_kv, mask = context_mask)
# do the heavier branch with only routed tokens
routed_tokens_out = self.heavy_attn(
routed_tokens_q,
mask = routed_tokens_kv_mask,
context = routed_tokens_kv,
normalized_scores_kv = normalized_scores_kv
)
if should_route_queries:
routed_tokens_out = routed_tokens_out * rearrange(normalized_scores_q, '... -> ... 1')
# early return if queries did not undergo routing
if not should_route_queries:
return routed_tokens_out
# otherwise, scatter back the query outputs
if exists(self.null_q_token):
out = rearrange(self.null_q_token, 'd -> 1 1 d')
out = out.expand_as(x).clone()
else:
out = torch.zeros_like(x)
if exists(indices_q):
out = self.q_router.route_back(out, routed_tokens_out, indices_q)
return out
# block
class ConditionalRoutedTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
num_heavy_attn_tokens_q,
num_heavy_attn_tokens_kv,
num_routed_kv = 1,
num_heavy_ff_tokens,
light_dim_head = 64,
light_heads = 8,
light_window_size = 128,
heavy_dim_head = 64,
heavy_heads = 8,
light_ff_mult = 0.5,
heavy_ff_mult = 4,
router_straight_through = True,
router_kwargs: dict = {},
multiply_keys_by_score = False,
multiply_queries_by_score = False,
use_triton = False,
use_null_q_tokens = True,
use_flash_attn = False
):
super().__init__()
self.conditional_ff = ConditionalRoutedFeedForward(
dim,
num_heavy_tokens = num_heavy_ff_tokens,
light_ff_mult = light_ff_mult,
heavy_ff_mult = heavy_ff_mult,
router_straight_through = router_straight_through,
router_kwargs = router_kwargs,
use_triton = use_triton
)
self.conditional_attn = ConditionalRoutedAttention(
dim,
light_dim_head = light_dim_head,
light_heads = light_heads,
light_window_size = light_window_size,
heavy_dim_head = heavy_dim_head,
heavy_heads = heavy_heads,
num_heavy_tokens_q = num_heavy_attn_tokens_q,
num_heavy_tokens_kv = num_heavy_attn_tokens_kv,
num_routed_kv = num_routed_kv,
router_straight_through = router_straight_through,
router_kwargs = router_kwargs,
multiply_keys_by_score = multiply_keys_by_score,
multiply_queries_by_score = multiply_queries_by_score,
use_triton = use_triton,
use_null_q_tokens = use_null_q_tokens,
use_flash_attn = use_flash_attn
)
def forward(
self,
x,
mask = None,
num_heavy_attn_tokens_q = None,
num_heavy_attn_tokens_kv = None,
num_heavy_ff_tokens = None
):
x = self.conditional_attn(x, mask = mask, num_heavy_tokens_q = num_heavy_attn_tokens_q, num_heavy_tokens_kv = num_heavy_attn_tokens_kv) + x
x = self.conditional_ff(x, mask = mask, num_heavy_tokens = num_heavy_ff_tokens) + x
return x
| CoLT5-attention-main | colt5_attention/transformer_block.py |
from math import log
import torch
from torch import Tensor
from torch import autograd
import torch.nn.functional as F
from colt5_attention.coor_descent import coor_descent
from einops import pack, unpack, repeat
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
# make sure it is latest triton
from packaging import version
assert version.parse(triton.__version__) >= version.parse('2.0')
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_num_warps(block_size):
num_warps = 4
if block_size >= 2048:
num_warps = 8
if block_size >= 4096:
num_warps = 16
return num_warps
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def num_to_groups(num, groups):
assert 0 < groups <= num
floor = num // groups
remainder = num % groups
out = []
for ind in range(groups):
out.append(floor + int(ind < remainder))
assert sum(out) == num
return out
# forwards
@triton.jit
def coor_descent_kernel_forward(
a_ptr,
b_ptr,
input_ptr,
mask_ptr,
k_ptr,
a_iter_stride,
b_row_stride,
b_iter_stride,
input_row_stride,
mask_row_stride,
n_iters,
current_eps,
eps_decay,
eps,
n_cols,
BLOCK_SIZE: tl.constexpr
):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
col_mask = col_offsets < n_cols
# load mask as ints (for some reason as boolean breaks triton)
mask_start_ptr = mask_ptr + row_idx * mask_row_stride
mask_ptrs = mask_start_ptr + col_offsets
mask_ints = tl.load(mask_ptrs, mask = col_mask, other = 0)
mask = mask_ints == 1
# load a and b
a_ptr = a_ptr + row_idx
a = tl.load(a_ptr)
b_start_ptr = b_ptr + row_idx * b_row_stride
b_ptrs = b_start_ptr + col_offsets
b = tl.load(b_ptrs, mask = col_mask, other = 0)
# load the scores s
row_start_ptr = input_ptr + row_idx * input_row_stride
input_ptrs = row_start_ptr + col_offsets
s = tl.load(input_ptrs, mask = mask, other = -float('inf'))
# load k - controls the sparsity of output
k_ptr = k_ptr + row_idx
k = tl.load(k_ptr)
# initialize some constants
logk = tl.log(k)
for _ in range(n_iters):
a = (s + b) / current_eps
a = tl.where(mask, a, -float('inf'))
# stable log sum exp
a_max = tl.max(a, axis = 0)
a_minus_max = tl.where(mask, a - a_max, -float('inf'))
exp = tl.exp(a_minus_max)
sum_exp = tl.sum(exp, axis = 0)
log_sum_exp = tl.log(sum_exp) + a_max
a = current_eps * (logk - log_sum_exp)
# update b
b = s + a
b = tl.where(b >= 0., -b, 0.)
# decay epsilon, from epsilon-scaling
current_eps *= eps_decay
if current_eps < eps:
current_eps = eps
# store a and b for next round
next_a_ptrs = a_ptr + a_iter_stride
next_b_ptrs = b_ptrs + b_iter_stride
tl.store(next_a_ptrs, a)
tl.store(next_b_ptrs, b, mask = col_mask)
# backwards
@triton.jit
def coor_descent_kernel_backward(
dk_ptr,
input_ptr,
a_ptr,
b_ptr,
mask_ptr,
ds_ptr,
db_ptr,
k_ptr,
input_row_stride,
b_row_stride,
mask_row_stride,
ds_row_stride,
db_row_stride,
n_iters,
eps_init,
eps_decay,
eps,
n_cols,
BLOCK_SIZE: tl.constexpr
):
row_idx = tl.program_id(0)
col_offsets = tl.arange(0, BLOCK_SIZE)
# load and generate mask
col_mask = col_offsets < n_cols
# load mask as ints (for some reason as boolean breaks triton)
mask_start_ptr = mask_ptr + row_idx * mask_row_stride
mask_ptrs = mask_start_ptr + col_offsets
mask_ints = tl.load(mask_ptrs, mask = col_mask, other = 0)
mask = mask_ints == 1
# load a and b
a_ptr = a_ptr + row_idx
init_a = tl.load(a_ptr)
b_start_ptr = b_ptr + row_idx * b_row_stride
b_ptrs = b_start_ptr + col_offsets
init_b = tl.load(b_ptrs, mask = mask, other = 0)
# load input
row_start_ptr = input_ptr + row_idx * input_row_stride
input_ptrs = row_start_ptr + col_offsets
s = tl.load(input_ptrs, mask = mask, other = -float('inf'))
# load k - controls the sparsity of output
k_ptr = k_ptr + row_idx
k = tl.load(k_ptr)
logk = tl.log(k)
# load initial ds
ds_row_start_ptr = ds_ptr + row_idx * ds_row_stride
ds_ptrs = ds_row_start_ptr + col_offsets
ds = tl.load(ds_ptrs, mask = mask, other = 0.)
# load initial db
db_row_start_ptr = db_ptr + row_idx * db_row_stride
db_ptrs = db_row_start_ptr + col_offsets
db = tl.load(db_ptrs, mask = mask, other = 0.)
# load initial dk
dk_ptr = dk_ptr + row_idx
dk = tl.load(dk_ptr)
# temp variables
last_da = tl.sum(ds, axis = 0)
# backwards
for ind in range(n_iters):
a = init_a
b = init_b
sa = s * 0
softmax = s * 0
# calculate epsilon
current_eps = eps_init / eps_decay
# recompute
for _ in range(n_iters - ind):
# update epsilon
current_eps *= eps_decay
if current_eps < eps:
current_eps = eps
# updating a
sb = (s + b) / current_eps
sb = tl.where(mask, sb, -float('inf'))
# stable log sum exp
sb_max = tl.max(sb, axis = 0)
sb_minus_max = tl.where(mask, sb - sb_max, -float('inf'))
exp = tl.exp(sb_minus_max)
sum_exp = tl.sum(exp, axis = 0)
softmax = exp / sum_exp
log_sum_exp = tl.log(sum_exp) + sb_max
a = current_eps * (logk - log_sum_exp)
# update b
sa = s + a
b = tl.where(sa > 0., -sa, 0.)
# go backwards
dsa = db * tl.where(sa > 0, -1., 0.)
ds += dsa
da = tl.sum(dsa, axis = 0) + last_da
dk += da * current_eps
dsb = da * -softmax
ds += dsb
db = dsb
last_da = 0.
# store dk
tl.store(dk_ptr, dk)
# store ds
tl.store(ds_ptrs, ds, mask = col_mask)
# store db
tl.store(db_ptrs, db, mask = col_mask)
# function forwards and backwards
class _coor_descent(autograd.Function):
@classmethod
def forward(
self,
ctx,
x,
n_iters,
k,
eps,
eps_init,
eps_decay,
mask,
checkpoint_segments
):
assert n_iters > 0
assert x.is_cuda, 'triton coordinate descent must be on cuda'
batch, requires_grad, device, dtype = x.shape[0], x.requires_grad, x.device, x.dtype
if not exists(mask):
mask = torch.ones_like(x, dtype = torch.bool, device = x.device)
x, shape = pack_one(x, '* n')
mask, _ = pack_one(mask, '* n')
x = x.masked_fill(~mask, -torch.finfo(x.dtype).max)
mask_ints = mask.int()
epsilons = []
eps_init = default(eps_init, eps)
current_eps = float(max(eps_init, eps))
n_rows, n_cols = x.shape
if isinstance(k, (int, float)):
k = torch.full((n_rows,), k)
assert k.numel() == n_rows
k = k.to(x)
BLOCK_SIZE = triton.next_power_of_2(n_cols)
assert BLOCK_SIZE <= 131072, 'the maximum block size allowed is 131072 for triton cuda kernel - set the `route_block_size` for the CoordinateDescentRouter to be this value or less in order to uniformly route to get around this limitation'
num_warps = calc_num_warps(BLOCK_SIZE)
checkpointed_a = torch.empty((checkpoint_segments + 1, n_rows), device = device, dtype = dtype)
checkpointed_b = torch.empty((checkpoint_segments + 1, n_rows, n_cols), device = device, dtype = dtype)
checkpointed_a[0] = torch.zeros_like(k)
checkpointed_b[0] = -x
for ind, segment_iters in enumerate(num_to_groups(n_iters, checkpoint_segments)):
is_last = ind == (checkpoint_segments - 1)
epsilons.append(current_eps)
coor_descent_kernel_forward[(n_rows,)](
checkpointed_a[ind],
checkpointed_b[ind],
x,
mask_ints,
k,
checkpointed_a.stride(0),
n_cols,
checkpointed_b.stride(0),
x.stride(0),
mask_ints.stride(0),
segment_iters,
current_eps,
eps_decay,
eps,
n_cols,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE,
)
current_eps *= (eps_decay ** segment_iters)
current_eps = max(current_eps, eps)
last_a, last_b = map(lambda t: t[-1], (checkpointed_a, checkpointed_b))
y = torch.exp((last_a[..., None] + last_b + x) / current_eps)
epsilons.append(current_eps)
if requires_grad:
checkpointed_a = checkpointed_a[:-1]
checkpointed_b = checkpointed_b[:-1]
ctx.args = (n_iters, checkpoint_segments, epsilons, eps_decay, eps)
ctx.save_for_backward(x, y, k, mask, checkpointed_a, checkpointed_b)
y = unpack_one(y, shape, '* n')
return y
@classmethod
def backward(
self,
ctx,
grad_probs
):
assert grad_probs.is_cuda
batch = grad_probs.shape[0]
n_iters, checkpoint_segments, epsilons, eps_decay, eps = ctx.args
x, y, k, mask, checkpointed_a, checkpointed_b = ctx.saved_tensors
grad_probs, shape = pack_one(grad_probs, '* n')
if exists(mask):
grad_probs = grad_probs.masked_fill(~mask, 0.)
n_rows, n_cols = grad_probs.shape
BLOCK_SIZE = triton.next_power_of_2(n_cols)
num_warps = calc_num_warps(BLOCK_SIZE)
*epsilons, last_eps = epsilons
ds = grad_probs * y / last_eps
db = ds.clone()
dk = torch.zeros_like(k)
mask_int = mask.int()
items = zip(
reversed(checkpointed_a.unbind(dim = 0)),
reversed(checkpointed_b.unbind(dim = 0)),
reversed(num_to_groups(n_iters, checkpoint_segments)),
reversed(epsilons)
)
for init_a, init_b, segment_iters, eps_init, in items:
coor_descent_kernel_backward[(n_rows,)](
dk,
x,
init_a,
init_b,
mask_int,
ds,
db,
k,
x.stride(0),
init_b.stride(0),
mask_int.stride(0),
ds.stride(0),
db.stride(0),
segment_iters,
eps_init,
eps_decay,
eps,
n_cols,
num_warps = num_warps,
BLOCK_SIZE = BLOCK_SIZE
)
ds += -db
ds = unpack_one(ds, shape, '* n')
if not k.requires_grad:
dk = None
else:
dk /= k
return ds, None, dk, None, None, None, None, None
def triton_coor_descent(
s,
*,
n_iters,
k,
eps = 1e-1,
eps_init = None,
eps_decay = 1.,
mask = None,
checkpoint_segments = 1
):
if not s.is_cuda:
return coor_descent(s, n_iters = n_iters, k = k, eps = eps, eps_init = eps_init, eps_decay = eps_decay, mask = mask)
return _coor_descent.apply(s, n_iters, k, eps, eps_init, eps_decay, mask, checkpoint_segments)
| CoLT5-attention-main | colt5_attention/triton_coor_descent.py |
from colt5_attention.transformer_block import (
ConditionalRoutedFeedForward,
ConditionalRoutedAttention,
ConditionalRoutedImageAttention,
ConditionalRoutedAutoregressiveAttention,
ConditionalRoutedCrossAttention,
ConditionalRoutedTransformerBlock,
CoordinateDescentRouter
)
from colt5_attention.coor_descent import coor_descent
from colt5_attention.topk import topk
from colt5_attention.vit import ConditionalRoutedViT
| CoLT5-attention-main | colt5_attention/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash = use_flash
assert not (use_flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
if mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
| CoLT5-attention-main | colt5_attention/attend.py |
import torch
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def coor_descent(
s,
*,
n_iters,
k,
eps = 1e-1,
eps_init = None,
eps_decay = 1.,
mask = None
):
"""
coordinate descent - https://arxiv.org/abs/1502.04759, utilized in https://arxiv.org/abs/2303.09752
ε-scaling - https://arxiv.org/abs/1610.06519, utilized in https://arxiv.org/abs/2304.04947
in a follow up paper applying coordinate descent routing to efficient fine tuning
they were able to cut n_iters from 50 -> 20 by setting eps_init = 4 and eps_decay = 0.7
eps was dependent on the task, and ranged from 0.02 to 1
"""
assert n_iters > 0
mask_value = -torch.finfo(s.dtype).max
if not isinstance(k, torch.Tensor):
k = torch.Tensor([k]).to(s)
else:
k = rearrange(k, '... -> ... 1')
logk = log(k)
if exists(mask):
s = s.masked_fill(~mask, mask_value)
a = 0
b = -s
current_eps = max(default(eps_init, eps), eps)
for _ in range(n_iters):
sb = ((s + b) / current_eps)
if exists(mask):
sb = sb.masked_fill(~mask, mask_value)
a = current_eps * (logk - sb.logsumexp(dim = -1, keepdim = True))
b = -F.relu(s + a)
current_eps = max(current_eps * eps_decay, eps)
scores = ((s + a + b) / current_eps).exp()
if exists(mask):
scores = scores.masked_fill(~mask, 0.)
return scores
| CoLT5-attention-main | colt5_attention/coor_descent.py |
import torch
from torch import nn
from einops import rearrange, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from colt5_attention.transformer_block import (
ConditionalRoutedImageAttention,
ConditionalRoutedFeedForward
)
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
def posemb_sincos_2d(patches, temperature = 10000, dtype = torch.float32):
_, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype
y, x = torch.meshgrid(torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij')
assert (dim % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb'
omega = torch.arange(dim // 4, device = device) / (dim // 4 - 1)
omega = 1. / (temperature ** omega)
y = y.flatten()[:, None] * omega[None, :]
x = x.flatten()[:, None] * omega[None, :]
pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim = 1)
pe = pe.type(dtype)
return rearrange(pe, '(h w) d -> h w d', h = h, w = w)
# classes
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
attn_num_heavy_tokens_q,
attn_num_heavy_tokens_kv,
attn_light_dim_head,
attn_light_heads,
attn_light_window_size,
attn_heavy_dim_head,
attn_heavy_heads,
ff_num_heavy_tokens,
ff_light_mult,
ff_heavy_mult,
router_straight_through = True,
router_kwargs: dict = {},
router_use_triton = False,
flash_attn = True,
attn_num_routed_kv = 1
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
ff = ConditionalRoutedFeedForward(
dim,
num_heavy_tokens = ff_num_heavy_tokens,
light_ff_mult = ff_light_mult,
heavy_ff_mult = ff_heavy_mult,
router_straight_through = router_straight_through,
router_kwargs = router_kwargs,
use_triton = router_use_triton
)
attn = ConditionalRoutedImageAttention(
dim,
num_heavy_tokens_q = attn_num_heavy_tokens_q,
num_heavy_tokens_kv = attn_num_heavy_tokens_kv,
num_routed_kv = attn_num_routed_kv,
light_dim_head = attn_light_dim_head,
light_heads = attn_light_heads,
light_window_size = attn_light_window_size,
heavy_dim_head = attn_heavy_dim_head,
heavy_heads = attn_heavy_heads,
router_straight_through = router_straight_through,
router_kwargs = router_kwargs,
use_triton = router_use_triton,
use_flash_attn = flash_attn,
channel_first = False,
use_null_q_tokens = True
)
self.layers.append(nn.ModuleList([attn, ff]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x, ps = pack([x], 'b * d')
x = ff(x) + x
x, = unpack(x, ps, 'b * d')
return x
class ConditionalRoutedViT(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
num_classes,
dim,
depth,
attn_num_heavy_tokens_q,
attn_num_heavy_tokens_kv,
attn_heavy_dim_head,
attn_heavy_heads,
attn_light_dim_head,
attn_light_heads,
attn_light_window_size,
ff_num_heavy_tokens,
ff_heavy_mult,
ff_light_mult,
channels = 3,
router_straight_through = True,
router_kwargs: dict = {},
router_use_triton = False,
flash_attn = True,
attn_num_routed_kv = 1,
default_coor_descent_eps = 1.
):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b h w (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim),
)
# not sure what the correct epsilon is for images, but for a recent paper, they used 1. for speech, 0.02 for text
# images are probably closer to speech than to text
router_kwargs = {'eps': default_coor_descent_eps, **router_kwargs}
self.transformer = Transformer(
dim,
depth,
attn_num_heavy_tokens_q,
attn_num_heavy_tokens_kv,
attn_light_dim_head,
attn_light_heads,
attn_light_window_size,
attn_heavy_dim_head,
attn_heavy_heads,
ff_num_heavy_tokens,
ff_light_mult,
ff_heavy_mult,
router_straight_through,
router_kwargs,
router_use_triton,
flash_attn,
attn_num_routed_kv
)
self.linear_head = nn.Sequential(
Reduce('b h w c -> b c', 'mean'),
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img):
*_, h, w, dtype = *img.shape, img.dtype
x = self.to_patch_embedding(img)
x = x + posemb_sincos_2d(x)
x = self.transformer(x)
return self.linear_head(x)
| CoLT5-attention-main | colt5_attention/vit.py |
import torch
from collections import namedtuple
from colt5_attention.coor_descent import coor_descent
TopkReturn = namedtuple('TopkReturn', ['values', 'indices', 'coor_descent_values', 'gates'])
def topk(
x,
k,
coor_descent_k_ratio = 9 / 8,
n_iters = 20,
eps = 1e-1,
eps_init = None,
eps_decay = 1.,
mask = None,
fused = False,
non_differentiable = False
):
"""
differentiable top-k on last dimension
"""
if non_differentiable:
values, indices = torch.topk(x, k = k, dim = -1)
return TopkReturn(values, indices, None, None)
assert coor_descent_k_ratio >= 1.
assert k > 0
# whether to used fused kernel or not
fn = coor_descent
if fused and x.is_cuda:
from colt5_attention.triton_coor_descent import triton_coor_descent
fn = triton_coor_descent
# do coordinate descent for gradients
coor_descent_out = fn(
x,
k = min(k * coor_descent_k_ratio, x.shape[-1]), # fetch a bit more for better learning, as in CoLT5 paper (they fetched 9 / 8 times more)
mask = mask,
n_iters = n_iters,
eps = eps,
eps_init = eps_init,
eps_decay = eps_decay
)
# do straight through
gates = coor_descent_out + (1 - coor_descent_out).detach()
x = x * gates
# hard topk
values, indices = torch.topk(x, k, dim = -1)
# return something that looks like a usual topk, but now differentiable
coor_descent_values = coor_descent_out.gather(-1, indices)
gates = gates.gather(-1, indices)
return TopkReturn(values, indices, coor_descent_values, gates)
| CoLT5-attention-main | colt5_attention/topk.py |
from setuptools import setup, find_packages
setup(
name = 'FLASH-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.8',
license='MIT',
description = 'FLASH - Transformer Quality in Linear Time - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/FLASH-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism'
],
install_requires=[
'einops>=0.4',
'rotary-embedding-torch>=0.1.5',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| FLASH-pytorch-main | setup.py |
from flash_pytorch import FLASHTransformer
from flash_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = FLASHTransformer(
num_tokens = 256,
dim = 512,
depth = 8,
causal = True,
group_size = 256,
shift_tokens = True,
laplace_attn_fn = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
| FLASH-pytorch-main | train.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from rotary_embedding_torch import RotaryEmbedding
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def padding_to_multiple_of(n, mult):
remainder = n % mult
if remainder == 0:
return 0
return mult - remainder
# scalenorm
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# absolute positional encodings
class ScaledSinuEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = nn.Parameter(torch.ones(1,))
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
n, device = x.shape[1], x.device
t = torch.arange(n, device = device).type_as(self.inv_freq)
sinu = einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinu.sin(), sinu.cos()), dim = -1)
return emb * self.scale
# T5 relative positional bias
class T5RelativePositionBias(nn.Module):
def __init__(
self,
scale,
causal = False,
num_buckets = 32,
max_distance = 128
):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, 1)
@staticmethod
def _relative_position_bucket(
relative_position,
causal = True,
num_buckets = 32,
max_distance = 128
):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, x):
i, j, device = *x.shape[-2:], x.device
q_pos = torch.arange(i, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j 1 -> i j')
return bias * self.scale
# class
class OffsetScale(nn.Module):
def __init__(self, dim, heads = 1):
super().__init__()
self.gamma = nn.Parameter(torch.ones(heads, dim))
self.beta = nn.Parameter(torch.zeros(heads, dim))
nn.init.normal_(self.gamma, std = 0.02)
def forward(self, x):
out = einsum('... d, h d -> ... h d', x, self.gamma) + self.beta
return out.unbind(dim = -2)
# activation functions
class ReLUSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
class LaplacianAttnFn(nn.Module):
""" https://arxiv.org/abs/2209.10655 claims this is more stable than Relu squared """
def forward(self, x):
mu = math.sqrt(0.5)
std = math.sqrt((4 * math.pi) ** -1)
return (1 + torch.special.erf((x - mu) / (std * math.sqrt(2)))) * 0.5
# gated attention unit
class GAU(nn.Module):
def __init__(
self,
*,
dim,
query_key_dim = 128,
expansion_factor = 2.,
add_residual = True,
causal = False,
dropout = 0.,
laplace_attn_fn = False,
rel_pos_bias = False,
norm_klass = nn.LayerNorm
):
super().__init__()
hidden_dim = int(expansion_factor * dim)
self.norm = norm_klass(dim)
self.causal = causal
self.dropout = nn.Dropout(dropout)
self.attn_fn = ReLUSquared() if not laplace_attn_fn else LaplacianAttnFn()
self.rel_pos_bias = T5RelativePositionBias(scale = dim ** 0.5, causal = causal)
self.to_hidden = nn.Sequential(
nn.Linear(dim, hidden_dim * 2),
nn.SiLU()
)
self.to_qk = nn.Sequential(
nn.Linear(dim, query_key_dim),
nn.SiLU()
)
self.offsetscale = OffsetScale(query_key_dim, heads = 2)
self.to_out = nn.Sequential(
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
self.add_residual = add_residual
def forward(
self,
x,
rel_pos_bias = None,
mask = None
):
seq_len, device = x.shape[-2], x.device
normed_x = self.norm(x)
v, gate = self.to_hidden(normed_x).chunk(2, dim = -1)
qk = self.to_qk(normed_x)
q, k = self.offsetscale(qk)
sim = einsum('b i d, b j d -> b i j', q, k)
if exists(self.rel_pos_bias):
sim = sim + self.rel_pos_bias(sim)
if exists(rel_pos_bias):
sim = sim + rel_pos_bias
attn = self.attn_fn(sim / seq_len)
attn = self.dropout(attn)
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 j')
attn = attn.masked_fill(~mask, 0.)
if self.causal:
causal_mask = torch.ones((seq_len, seq_len), dtype = torch.bool, device = device).triu(1)
attn = attn.masked_fill(causal_mask, 0.)
out = einsum('b i j, b j d -> b i d', attn, v)
out = out * gate
out = self.to_out(out)
if self.add_residual:
out = out + x
return out
# FLASH
class FLASH(nn.Module):
def __init__(
self,
*,
dim,
group_size = 256,
query_key_dim = 128,
expansion_factor = 2.,
causal = False,
dropout = 0.,
rotary_pos_emb = None,
norm_klass = nn.LayerNorm,
shift_tokens = False,
laplace_attn_fn = False,
reduce_group_non_causal_attn = True
):
super().__init__()
hidden_dim = int(dim * expansion_factor)
self.group_size = group_size
self.causal = causal
self.shift_tokens = shift_tokens
self.attn_fn = ReLUSquared() if not laplace_attn_fn else LaplacianAttnFn()
# positional embeddings
self.rotary_pos_emb = rotary_pos_emb
self.rel_pos_bias = T5RelativePositionBias(query_key_dim ** 0.5, causal = causal)
# norm
self.norm = norm_klass(dim)
self.dropout = nn.Dropout(dropout)
# whether to reduce groups in non causal linear attention
self.reduce_group_non_causal_attn = reduce_group_non_causal_attn
# projections
self.to_hidden = nn.Sequential(
nn.Linear(dim, hidden_dim * 2),
nn.SiLU()
)
self.to_qk = nn.Sequential(
nn.Linear(dim, query_key_dim),
nn.SiLU()
)
self.qk_offset_scale = OffsetScale(query_key_dim, heads = 4)
self.to_out = nn.Linear(hidden_dim, dim)
def forward(
self,
x,
*,
mask = None
):
"""
b - batch
n - sequence length (within groups)
g - group dimension
d - feature dimension (keys)
e - feature dimension (values)
i - sequence dimension (source)
j - sequence dimension (target)
"""
b, n, device, g = x.shape[0], x.shape[-2], x.device, self.group_size
# prenorm
normed_x = self.norm(x)
# do token shift - a great, costless trick from an independent AI researcher in Shenzhen
if self.shift_tokens:
x_shift, x_pass = normed_x.chunk(2, dim = -1)
x_shift = F.pad(x_shift, (0, 0, 1, -1), value = 0.)
normed_x = torch.cat((x_shift, x_pass), dim = -1)
# initial projections
v, gate = self.to_hidden(normed_x).chunk(2, dim = -1)
qk = self.to_qk(normed_x)
# offset and scale
quad_q, lin_q, quad_k, lin_k = self.qk_offset_scale(qk)
# mask out linear attention keys
if exists(mask):
lin_mask = rearrange(mask, '... -> ... 1')
lin_k = lin_k.masked_fill(~lin_mask, 0.)
# rotate queries and keys
if exists(self.rotary_pos_emb):
quad_q, lin_q, quad_k, lin_k = map(self.rotary_pos_emb.rotate_queries_or_keys, (quad_q, lin_q, quad_k, lin_k))
# padding for groups
padding = padding_to_multiple_of(n, g)
if padding > 0:
quad_q, quad_k, lin_q, lin_k, v = map(lambda t: F.pad(t, (0, 0, 0, padding), value = 0.), (quad_q, quad_k, lin_q, lin_k, v))
mask = default(mask, torch.ones((b, n), device = device, dtype = torch.bool))
mask = F.pad(mask, (0, padding), value = False)
# group along sequence
quad_q, quad_k, lin_q, lin_k, v = map(lambda t: rearrange(t, 'b (n g) d -> b n g d', g = self.group_size), (quad_q, quad_k, lin_q, lin_k, v))
if exists(mask):
mask = rearrange(mask, 'b (g j) -> b g 1 j', j = g)
# calculate quadratic attention output
sim = einsum('... i d, ... j d -> ... i j', quad_q, quad_k) / g
sim = sim + self.rel_pos_bias(sim)
attn = self.attn_fn(sim)
attn = self.dropout(attn)
if exists(mask):
attn = attn.masked_fill(~mask, 0.)
if self.causal:
causal_mask = torch.ones((g, g), dtype = torch.bool, device = device).triu(1)
attn = attn.masked_fill(causal_mask, 0.)
quad_out = einsum('... i j, ... j d -> ... i d', attn, v)
# calculate linear attention output
if self.causal:
lin_kv = einsum('b g n d, b g n e -> b g d e', lin_k, v) / g
# exclusive cumulative sum along group dimension
lin_kv = lin_kv.cumsum(dim = 1)
lin_kv = F.pad(lin_kv, (0, 0, 0, 0, 1, -1), value = 0.)
lin_out = einsum('b g d e, b g n d -> b g n e', lin_kv, lin_q)
else:
context_einsum_eq = 'b d e' if self.reduce_group_non_causal_attn else 'b g d e'
lin_kv = einsum(f'b g n d, b g n e -> {context_einsum_eq}', lin_k, v) / n
lin_out = einsum(f'b g n d, {context_einsum_eq} -> b g n e', lin_q, lin_kv)
# fold back groups into full sequence, and excise out padding
quad_attn_out, lin_attn_out = map(lambda t: rearrange(t, 'b g n d -> b (g n) d')[:, :n], (quad_out, lin_out))
# gate
out = gate * (quad_attn_out + lin_attn_out)
# projection out and residual
return self.to_out(out) + x
# FLASH Transformer
class FLASHTransformer(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
group_size = 256,
query_key_dim = 128,
expansion_factor = 2.,
causal = False,
attn_dropout = 0.,
norm_type = 'scalenorm',
shift_tokens = True,
laplace_attn_fn = False,
reduce_group_non_causal_attn = True
):
super().__init__()
assert norm_type in ('scalenorm', 'layernorm'), 'norm_type must be one of scalenorm or layernorm'
if norm_type == 'scalenorm':
norm_klass = ScaleNorm
elif norm_type == 'layernorm':
norm_klass = nn.LayerNorm
self.token_emb = nn.Embedding(num_tokens, dim)
self.abs_pos_emb = ScaledSinuEmbedding(dim)
self.group_size = group_size
rotary_pos_emb = RotaryEmbedding(dim = min(32, query_key_dim))
# max rotary embedding dimensions of 32, partial Rotary embeddings, from Wang et al - GPT-J
self.layers = nn.ModuleList([FLASH(dim = dim, group_size = group_size, query_key_dim = query_key_dim, expansion_factor = expansion_factor, causal = causal, dropout = attn_dropout, rotary_pos_emb = rotary_pos_emb, norm_klass = norm_klass, shift_tokens = shift_tokens, reduce_group_non_causal_attn = reduce_group_non_causal_attn, laplace_attn_fn = laplace_attn_fn) for _ in range(depth)])
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(
self,
x,
*,
mask = None
):
x = self.token_emb(x)
x = self.abs_pos_emb(x) + x
for flash in self.layers:
x = flash(x, mask = mask)
return self.to_logits(x)
| FLASH-pytorch-main | flash_pytorch/flash_pytorch.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_thres = 0.9, **kwargs):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(out, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = (out == eos_token)
if is_eos_token.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
logits = self.net(x_inp, **kwargs)
return F.cross_entropy(rearrange(logits, 'b c n -> b n c'), x_labels)
| FLASH-pytorch-main | flash_pytorch/autoregressive_wrapper.py |
from flash_pytorch.flash_pytorch import GAU, FLASH, FLASHTransformer
| FLASH-pytorch-main | flash_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'En-transformer',
packages = find_packages(),
version = '1.2.0',
license='MIT',
description = 'E(n)-Equivariant Transformer',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/En-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'equivariance',
'transformer'
],
install_requires=[
'einops>=0.3',
'torch>=1.7'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| En-transformer-main | setup.py |
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from en_transformer.en_transformer import EnTransformer
torch.set_default_dtype(torch.float64)
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader, len_thres = 200):
while True:
for data in loader:
if data.seqs.shape[1] > len_thres:
continue
yield data
transformer = EnTransformer(
num_tokens = 21,
dim = 32,
dim_head = 64,
heads = 4,
depth = 4,
rel_pos_emb = True, # there is inherent order in the sequence (backbone atoms of amino acid chain)
neighbors = 16
)
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = BATCH_SIZE,
dynamic_batching = False
)
dl = cycle(data['train'])
optim = Adam(transformer.parameters(), lr=1e-3)
transformer = transformer.cuda()
for _ in range(10000):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seqs, coords, masks = batch.seqs, batch.crds, batch.msks
seqs = seqs.cuda().argmax(dim = -1)
coords = coords.cuda().type(torch.float64)
masks = masks.cuda().bool()
l = seqs.shape[1]
coords = rearrange(coords, 'b (l s) c -> b l s c', s = 14)
# keeping only the backbone coordinates
coords = coords[:, :, 0:3, :]
coords = rearrange(coords, 'b l s c -> b (l s) c')
seq = repeat(seqs, 'b n -> b (n c)', c = 3)
masks = repeat(masks, 'b n -> b (n c)', c = 3)
noised_coords = coords + torch.randn_like(coords)
feats, denoised_coords = transformer(seq, noised_coords, mask = masks)
loss = F.mse_loss(denoised_coords[masks], coords[masks])
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
| En-transformer-main | denoise.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint_sequential
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = -1)
def small_init_(t: nn.Linear):
nn.init.normal_(t.weight, std = 0.02)
nn.init.zeros_(t.bias)
def batched_index_select(values, indices, dim = 1):
value_dims = values.shape[(dim + 1):]
values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices))
indices = indices[(..., *((None,) * len(value_dims)))]
indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims)
value_expand_len = len(indices_shape) - (dim + 1)
values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)]
value_expand_shape = [-1] * len(values.shape)
expand_slice = slice(dim, (dim + value_expand_len))
value_expand_shape[expand_slice] = indices.shape[expand_slice]
values = values.expand(*value_expand_shape)
dim += value_expand_len
return values.gather(dim, indices)
# dynamic positional bias
class DynamicPositionBias(nn.Module):
def __init__(
self,
dim,
*,
heads,
depth,
dim_head,
input_dim = 1,
norm = True
):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(input_dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
self.heads = heads
self.qk_pos_head = nn.Linear(dim, heads)
self.value_pos_head = nn.Linear(dim, dim_head * heads)
def forward(self, pos):
for layer in self.mlp:
pos = layer(pos)
qk_pos = self.qk_pos_head(pos)
value_pos = self.value_pos_head(pos)
qk_pos = rearrange(qk_pos, 'b 1 i j h -> b h i j')
value_pos = rearrange(value_pos, 'b 1 i j (h d) -> b h i j d', h = self.heads)
return qk_pos, value_pos
# classes
# this follows the same strategy for normalization as done in SE3 Transformers
# https://github.com/lucidrains/se3-transformer-pytorch/blob/main/se3_transformer_pytorch/se3_transformer_pytorch.py#L95
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer('beta', torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
class CoorsNorm(nn.Module):
def __init__(self, eps = 1e-8, scale_init = 1.):
super().__init__()
self.eps = eps
scale = torch.zeros(1).fill_(scale_init)
self.scale = nn.Parameter(scale)
def forward(self, coors):
norm = coors.norm(dim = -1, keepdim = True)
normed_coors = coors / norm.clamp(min = self.eps)
return normed_coors * self.scale
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, feats, coors, **kwargs):
feats_out, coors_delta = self.fn(feats, coors, **kwargs)
return feats + feats_out, coors + coors_delta
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(
self,
*,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
inner_dim = int(dim * mult * 2 / 3)
self.net = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
LayerNorm(inner_dim),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
def forward(self, feats, coors):
return self.net(feats), 0
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.heads = heads
self.dim_hidden = dim_head * heads
self.to_qkv = nn.Linear(dim, self.dim_hidden * 3)
def forward(self, x, mask = None):
has_degree_m_dim = x.ndim == 4
if has_degree_m_dim:
x = rearrange(x, '... 1 -> ...')
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
if exists(mask):
mask = rearrange(mask, 'b n -> b 1 n 1')
k = k.masked_fill(~mask, -torch.finfo(q.dtype).max)
v = v.masked_fill(~mask, 0.)
k = k.softmax(dim = -2)
q = q.softmax(dim = -1)
kv = einsum('b h n d, b h n e -> b h d e', k, v)
out = einsum('b h d e, b h n d -> b h n e', kv, q)
out = rearrange(out, 'b h n d -> b n (h d)')
if has_degree_m_dim:
out = rearrange(out, '... -> ... 1')
return out
class EquivariantAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 4,
edge_dim = 0,
coors_hidden_dim = 16,
neighbors = 0,
only_sparse_neighbors = False,
valid_neighbor_radius = float('inf'),
init_eps = 1e-3,
rel_pos_emb = None,
edge_mlp_mult = 2,
norm_rel_coors = True,
norm_coors_scale_init = 1.,
use_cross_product = False,
talking_heads = False,
dropout = 0.,
num_global_linear_attn_heads = 0
):
super().__init__()
self.scale = dim_head ** -0.5
self.norm = LayerNorm(dim)
self.neighbors = neighbors
self.only_sparse_neighbors = only_sparse_neighbors
self.valid_neighbor_radius = valid_neighbor_radius
attn_inner_dim = heads * dim_head
self.heads = heads
self.has_linear_attn = num_global_linear_attn_heads > 0
self.linear_attn = LinearAttention(dim = dim, dim_head = dim_head, heads = num_global_linear_attn_heads)
self.to_qkv = nn.Linear(dim, attn_inner_dim * 3, bias = False)
self.to_out = nn.Linear(attn_inner_dim + self.linear_attn.dim_hidden, dim)
self.talking_heads = nn.Conv2d(heads, heads, 1, bias = False) if talking_heads else None
self.edge_mlp = None
has_edges = edge_dim > 0
if has_edges:
edge_input_dim = heads + edge_dim
edge_hidden = edge_input_dim * edge_mlp_mult
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_hidden, bias = False),
nn.GELU(),
nn.Linear(edge_hidden, heads, bias = False)
)
self.coors_mlp = nn.Sequential(
nn.GELU(),
nn.Linear(heads, heads, bias = False)
)
else:
self.coors_mlp = nn.Sequential(
nn.Linear(heads, coors_hidden_dim, bias = False),
nn.GELU(),
nn.Linear(coors_hidden_dim, heads, bias = False)
)
self.coors_gate = nn.Linear(heads, heads)
small_init_(self.coors_gate)
self.use_cross_product = use_cross_product
if use_cross_product:
self.cross_coors_mlp = nn.Sequential(
nn.Linear(heads, coors_hidden_dim, bias = False),
nn.GELU(),
nn.Linear(coors_hidden_dim, heads * 2, bias = False)
)
self.cross_coors_gate_i = nn.Linear(heads, heads)
self.cross_coors_gate_j = nn.Linear(heads, heads)
small_init_(self.cross_coors_gate_i)
small_init_(self.cross_coors_gate_j)
self.norm_rel_coors = CoorsNorm(scale_init = norm_coors_scale_init) if norm_rel_coors else nn.Identity()
num_coors_combine_heads = (2 if use_cross_product else 1) * heads
self.coors_combine = nn.Parameter(torch.randn(num_coors_combine_heads))
# positional embedding
# for both along the sequence (if specified by rel_pos_emb) and the relative distance between each residue / atom
self.rel_pos_emb = rel_pos_emb
self.dynamic_pos_bias_mlp = DynamicPositionBias(
dim = dim // 2,
heads = heads,
dim_head = dim_head,
depth = 3,
input_dim = (2 if rel_pos_emb else 1)
)
# dropouts
self.node_dropout = nn.Dropout(dropout)
self.coor_dropout = nn.Dropout(dropout)
# init
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
nn.init.normal_(module.weight, std = self.init_eps)
def forward(
self,
feats,
coors,
edges = None,
mask = None,
adj_mat = None
):
b, n, d, h, num_nn, only_sparse_neighbors, valid_neighbor_radius, device = *feats.shape, self.heads, self.neighbors, self.only_sparse_neighbors, self.valid_neighbor_radius, feats.device
_mask = mask
feats = self.norm(feats)
assert not (only_sparse_neighbors and not exists(adj_mat)), 'adjacency matrix must be passed in if only_sparse_neighbors is turned on'
if exists(mask):
num_nodes = mask.sum(dim = -1)
rel_coors = rearrange(coors, 'b i d -> b i 1 d') - rearrange(coors, 'b j d -> b 1 j d')
rel_dist = rel_coors.norm(p = 2, dim = -1)
# calculate neighborhood indices
nbhd_indices = None
nbhd_masks = None
nbhd_ranking = rel_dist.clone()
if exists(adj_mat):
if len(adj_mat.shape) == 2:
adj_mat = repeat(adj_mat, 'i j -> b i j', b = b)
self_mask = torch.eye(n, device = device).bool()
self_mask = rearrange(self_mask, 'i j -> 1 i j')
adj_mat.masked_fill_(self_mask, False)
max_adj_neighbors = adj_mat.long().sum(dim = -1).max().item() + 1
num_nn = max_adj_neighbors if only_sparse_neighbors else (num_nn + max_adj_neighbors)
valid_neighbor_radius = 0 if only_sparse_neighbors else valid_neighbor_radius
nbhd_ranking = nbhd_ranking.masked_fill(self_mask, -1.)
nbhd_ranking = nbhd_ranking.masked_fill(adj_mat, 0.)
if 0 < num_nn < n:
# make sure padding does not end up becoming neighbors
if exists(mask):
ranking_mask = mask[:, :, None] * mask[:, None, :]
nbhd_ranking = nbhd_ranking.masked_fill(~ranking_mask, 1e5)
nbhd_values, nbhd_indices = nbhd_ranking.topk(num_nn, dim = -1, largest = False)
nbhd_masks = nbhd_values <= valid_neighbor_radius
# derive queries keys and values
q, k, v = self.to_qkv(feats).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# calculate nearest neighbors
i = j = n
if exists(nbhd_indices):
i, j = nbhd_indices.shape[-2:]
nbhd_indices_with_heads = repeat(nbhd_indices, 'b n d -> b h n d', h = h)
k = batched_index_select(k, nbhd_indices_with_heads, dim = 2)
v = batched_index_select(v, nbhd_indices_with_heads, dim = 2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim = 2)
rel_coors = batched_index_select(rel_coors, nbhd_indices, dim = 2)
else:
k = repeat(k, 'b h j d -> b h n j d', n = n)
v = repeat(v, 'b h j d -> b h n j d', n = n)
# prepare mask
if exists(mask):
q_mask = rearrange(mask, 'b i -> b 1 i 1')
k_mask = repeat(mask, 'b j -> b i j', i = n)
if exists(nbhd_indices):
k_mask = batched_index_select(k_mask, nbhd_indices, dim = 2)
k_mask = rearrange(k_mask, 'b i j -> b 1 i j')
mask = q_mask * k_mask
if exists(nbhd_masks):
mask &= rearrange(nbhd_masks, 'b i j -> b 1 i j')
# generate and apply rotary embeddings
rel_dist = -(rel_dist ** 2)
rel_dist = rearrange(rel_dist, 'b i j -> b 1 i j 1')
if self.rel_pos_emb:
seq = torch.arange(n, device = device, dtype = q.dtype)
seq_target_pos = nbhd_indices if exists(nbhd_indices) else rearrange(seq, 'j -> 1 1 j')
seq_rel_dist = rearrange(seq, 'i -> 1 i 1') - seq_target_pos
seq_rel_dist = repeat(seq_rel_dist, 'b i j -> b 1 i j 1', b = b)
rel_dist = torch.cat((rel_dist, seq_rel_dist), dim = -1)
qk_pos, value_pos = self.dynamic_pos_bias_mlp(rel_dist)
# calculate inner product for queries and keys
q = repeat(q, 'b h i d -> b h i j d', j = k.shape[-2])
# l2 distance
# -cdist(q, k).pow(2)
qk = -((q - k) ** 2).sum(dim = -1)
qk = qk * self.scale
# add relative positions to qk as well as values
qk = qk + qk_pos
v = v + value_pos
# add edge information and pass through edges MLP if needed
if exists(edges):
if exists(nbhd_indices):
edges = batched_index_select(edges, nbhd_indices, dim = 2)
qk = rearrange(qk, 'b h i j -> b i j h')
qk = torch.cat((qk, edges), dim = -1)
qk = self.edge_mlp(qk)
qk = rearrange(qk, 'b i j h -> b h i j')
# coordinate MLP and calculate coordinate updates
coors_mlp_input = rearrange(qk, 'b h i j -> b i j h')
coor_weights = self.coors_mlp(coors_mlp_input)
if exists(mask):
mask_value = max_neg_value(coor_weights)
coor_mask = repeat(mask, 'b 1 i j -> b i j 1')
coor_weights.masked_fill_(~coor_mask, mask_value)
coor_attn = coor_weights.softmax(dim = -2)
coor_attn = self.coor_dropout(coor_attn)
rel_coors_sign = self.coors_gate(coors_mlp_input)
rel_coors_sign = rearrange(rel_coors_sign, 'b i j h -> b i j 1 h')
if self.use_cross_product:
rel_coors_i = repeat(rel_coors, 'b n i c -> b n (i j) c', j = j)
rel_coors_j = repeat(rel_coors, 'b n j c -> b n (i j) c', i = j)
cross_coors = torch.cross(rel_coors_i, rel_coors_j, dim = -1)
cross_coors = self.norm_rel_coors(cross_coors)
cross_coors = repeat(cross_coors, 'b i j c -> b i j c h', h = h)
cross_coors_sign_i = self.cross_coors_gate_i(coors_mlp_input)
cross_coors_sign_j = self.cross_coors_gate_j(coors_mlp_input)
cross_coors_sign = rearrange(cross_coors_sign_i, 'b n i h -> b n i 1 h') * rearrange(cross_coors_sign_j, 'b n j h -> b n 1 j h')
cross_coors_sign = rearrange(cross_coors_sign, 'b n i j h -> b n (i j) 1 h')
cross_coors = cross_coors * cross_coors_sign
rel_coors = self.norm_rel_coors(rel_coors)
rel_coors = repeat(rel_coors, 'b i j c -> b i j c h', h = h)
rel_coors = rel_coors * rel_coors_sign
# cross product
if self.use_cross_product:
cross_weights = self.cross_coors_mlp(coors_mlp_input)
cross_weights = rearrange(cross_weights, 'b i j (h n) -> b i j h n', n = 2)
cross_weights_i, cross_weights_j = cross_weights.unbind(dim = -1)
cross_weights = rearrange(cross_weights_i, 'b n i h -> b n i 1 h') + rearrange(cross_weights_j, 'b n j h -> b n 1 j h')
if exists(mask):
cross_mask = (coor_mask[:, :, :, None, :] & coor_mask[:, :, None, :, :])
cross_weights = cross_weights.masked_fill(~cross_mask, mask_value)
cross_weights = rearrange(cross_weights, 'b n i j h -> b n (i j) h')
cross_attn = cross_weights.softmax(dim = -2)
# aggregate and combine heads for coordinate updates
rel_out = einsum('b i j h, b i j c h -> b i c h', coor_attn, rel_coors)
if self.use_cross_product:
cross_out = einsum('b i j h, b i j c h -> b i c h', cross_attn, cross_coors)
rel_out = torch.cat((rel_out, cross_out), dim = -1)
coors_out = einsum('b n c h, h -> b n c', rel_out, self.coors_combine)
# derive attention
sim = qk.clone()
if exists(mask):
mask_value = max_neg_value(sim)
sim.masked_fill_(~mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.node_dropout(attn)
if exists(self.talking_heads):
attn = self.talking_heads(attn)
# weighted sum of values and combine heads
out = einsum('b h i j, b h i j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
# linear attention
if self.has_linear_attn:
lin_out = self.linear_attn(feats, mask = _mask)
out = torch.cat((out, lin_out), dim = -1)
# combine heads, both local + global linear attention (if designated)
out = self.to_out(out)
return out, coors_out
# transformer
class Block(nn.Module):
def __init__(self, attn, ff):
super().__init__()
self.attn = attn
self.ff = ff
def forward(self, inp, coor_changes = None):
feats, coors, mask, edges, adj_mat = inp
feats, coors = self.attn(feats, coors, edges = edges, mask = mask, adj_mat = adj_mat)
feats, coors = self.ff(feats, coors)
return (feats, coors, mask, edges, adj_mat)
class EnTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
num_tokens = None,
rel_pos_emb = False,
dim_head = 64,
heads = 8,
num_edge_tokens = None,
edge_dim = 0,
coors_hidden_dim = 16,
neighbors = 0,
only_sparse_neighbors = False,
num_adj_degrees = None,
adj_dim = 0,
valid_neighbor_radius = float('inf'),
init_eps = 1e-3,
norm_rel_coors = True,
norm_coors_scale_init = 1.,
use_cross_product = False,
talking_heads = False,
checkpoint = False,
attn_dropout = 0.,
ff_dropout = 0.,
num_global_linear_attn_heads = 0
):
super().__init__()
assert dim_head >= 32, 'your dimension per head should be greater than 32 for rotary embeddings to work well'
assert not (exists(num_adj_degrees) and num_adj_degrees < 1), 'make sure adjacent degrees is greater than 1'
if only_sparse_neighbors:
num_adj_degrees = default(num_adj_degrees, 1)
self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None
self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None
self.num_adj_degrees = num_adj_degrees
self.adj_emb = nn.Embedding(num_adj_degrees + 1, adj_dim) if exists(num_adj_degrees) and adj_dim > 0 else None
adj_dim = adj_dim if exists(num_adj_degrees) else 0
self.checkpoint = checkpoint
self.layers = nn.ModuleList([])
for ind in range(depth):
self.layers.append(Block(
Residual(EquivariantAttention(
dim = dim,
dim_head = dim_head,
heads = heads,
coors_hidden_dim = coors_hidden_dim,
edge_dim = (edge_dim + adj_dim),
neighbors = neighbors,
only_sparse_neighbors = only_sparse_neighbors,
valid_neighbor_radius = valid_neighbor_radius,
init_eps = init_eps,
rel_pos_emb = rel_pos_emb,
norm_rel_coors = norm_rel_coors,
norm_coors_scale_init = norm_coors_scale_init,
use_cross_product = use_cross_product,
talking_heads = talking_heads,
dropout = attn_dropout,
num_global_linear_attn_heads = num_global_linear_attn_heads
)),
Residual(FeedForward(
dim = dim,
dropout = ff_dropout
))
))
def forward(
self,
feats,
coors,
edges = None,
mask = None,
adj_mat = None,
return_coor_changes = False,
**kwargs
):
b = feats.shape[0]
if exists(self.token_emb):
feats = self.token_emb(feats)
if exists(self.edge_emb):
assert exists(edges), 'edges must be passed in as (batch x seq x seq) indicating edge type'
edges = self.edge_emb(edges)
assert not (exists(adj_mat) and (not exists(self.num_adj_degrees) or self.num_adj_degrees == 0)), 'num_adj_degrees must be greater than 0 if you are passing in an adjacency matrix'
if exists(self.num_adj_degrees):
assert exists(adj_mat), 'adjacency matrix must be passed in (keyword argument adj_mat)'
if len(adj_mat.shape) == 2:
adj_mat = repeat(adj_mat.clone(), 'i j -> b i j', b = b)
adj_indices = adj_mat.clone().long()
for ind in range(self.num_adj_degrees - 1):
degree = ind + 2
next_degree_adj_mat = (adj_mat.float() @ adj_mat.float()) > 0
next_degree_mask = (next_degree_adj_mat.float() - adj_mat.float()).bool()
adj_indices.masked_fill_(next_degree_mask, degree)
adj_mat = next_degree_adj_mat.clone()
if exists(self.adj_emb):
adj_emb = self.adj_emb(adj_indices)
edges = torch.cat((edges, adj_emb), dim = -1) if exists(edges) else adj_emb
assert not (return_coor_changes and self.training), 'you must be eval mode in order to return coordinates'
# go through layers
coor_changes = [coors]
inp = (feats, coors, mask, edges, adj_mat)
# if in training mode and checkpointing is designated, use checkpointing across blocks to save memory
if self.training and self.checkpoint:
inp = checkpoint_sequential(self.layers, len(self.layers), inp)
else:
# iterate through blocks
for layer in self.layers:
inp = layer(inp)
coor_changes.append(inp[1]) # append coordinates for visualization
# return
feats, coors, *_ = inp
if return_coor_changes:
return feats, coors, coor_changes
return feats, coors
| En-transformer-main | en_transformer/en_transformer.py |
from en_transformer.en_transformer import EquivariantAttention, EnTransformer
| En-transformer-main | en_transformer/__init__.py |
import torch
from torch import sin, cos, atan2, acos
def rot_z(gamma):
return torch.tensor([
[cos(gamma), -sin(gamma), 0],
[sin(gamma), cos(gamma), 0],
[0, 0, 1]
], dtype = gamma.dtype)
def rot_y(beta):
return torch.tensor([
[cos(beta), 0, sin(beta)],
[0, 1, 0],
[-sin(beta), 0, cos(beta)]
], dtype = beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
| En-transformer-main | en_transformer/utils.py |
import torch
from en_transformer.utils import rot
from en_transformer import EnTransformer
torch.set_default_dtype(torch.float64)
def test_readme():
model = EnTransformer(
dim = 512,
depth = 1,
dim_head = 64,
heads = 8,
edge_dim = 4,
neighbors = 6
)
feats = torch.randn(1, 32, 512)
coors = torch.randn(1, 32, 3)
edges = torch.randn(1, 32, 1024, 4)
mask = torch.ones(1, 32).bool()
feats, coors = model(feats, coors, edges, mask = mask)
assert True, 'it runs'
def test_equivariance():
model = EnTransformer(
dim = 512,
depth = 1,
edge_dim = 4,
rel_pos_emb = True
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
feats1, coors1 = model(feats, coors @ R + T, edges)
feats2, coors2 = model(feats, coors, edges)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_equivariance_with_cross_product():
model = EnTransformer(
dim = 512,
depth = 1,
edge_dim = 4,
rel_pos_emb = True,
use_cross_product = True
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
feats1, coors1 = model(feats, coors @ R + T, edges)
feats2, coors2 = model(feats, coors, edges)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_equivariance_with_nearest_neighbors():
model = EnTransformer(
dim = 512,
depth = 1,
edge_dim = 4,
neighbors = 5
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
feats1, coors1 = model(feats, coors @ R + T, edges)
feats2, coors2 = model(feats, coors, edges)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_equivariance_with_sparse_neighbors():
model = EnTransformer(
dim = 512,
depth = 1,
heads = 4,
dim_head = 32,
neighbors = 0,
only_sparse_neighbors = True
)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
i = torch.arange(feats.shape[1])
adj_mat = (i[:, None] <= (i[None, :] + 1)) & (i[:, None] >= (i[None, :] - 1))
feats1, coors1 = model(feats, coors @ R + T, adj_mat = adj_mat)
feats2, coors2 = model(feats, coors, adj_mat = adj_mat)
assert torch.allclose(feats1, feats2, atol = 1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol = 1e-6), 'type 1 features are equivariant'
def test_depth():
model = EnTransformer(
dim = 8,
depth = 12,
edge_dim = 4,
neighbors = 16
)
feats = torch.randn(1, 128, 8)
coors = torch.randn(1, 128, 3)
edges = torch.randn(1, 128, 128, 4)
feats, coors = model(feats, coors, edges)
assert not torch.any(torch.isnan(feats)), 'no NaN in features'
assert not torch.any(torch.isnan(coors)), 'no NaN in coordinates'
| En-transformer-main | tests/test_equivariance.py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract as einsum
import copy
import dgl
from util import base_indices, RTs_by_torsion, xyzs_in_base_frame, rigid_from_3_points
def init_lecun_normal(module, scale=1.0):
def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(torch.tensor(alpha))
p = alpha_normal_cdf + (normal.cdf(torch.tensor(beta)) - alpha_normal_cdf) * uniform
v = torch.clamp(2 * p - 1, -1 + 1e-8, 1 - 1e-8)
x = mu + sigma * np.sqrt(2) * torch.erfinv(v)
x = torch.clamp(x, a, b)
return x
def sample_truncated_normal(shape, scale=1.0):
stddev = np.sqrt(scale/shape[-1])/.87962566103423978 # shape[-1] = fan_in
return stddev * truncated_normal(torch.rand(shape))
module.weight = torch.nn.Parameter( (sample_truncated_normal(module.weight.shape)) )
return module
def init_lecun_normal_param(weight, scale=1.0):
def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(torch.tensor(alpha))
p = alpha_normal_cdf + (normal.cdf(torch.tensor(beta)) - alpha_normal_cdf) * uniform
v = torch.clamp(2 * p - 1, -1 + 1e-8, 1 - 1e-8)
x = mu + sigma * np.sqrt(2) * torch.erfinv(v)
x = torch.clamp(x, a, b)
return x
def sample_truncated_normal(shape, scale=1.0):
stddev = np.sqrt(scale/shape[-1])/.87962566103423978 # shape[-1] = fan_in
return stddev * truncated_normal(torch.rand(shape))
weight = torch.nn.Parameter( (sample_truncated_normal(weight.shape)) )
return weight
# for gradient checkpointing
def create_custom_forward(module, **kwargs):
def custom_forward(*inputs):
return module(*inputs, **kwargs)
return custom_forward
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class Dropout(nn.Module):
# Dropout entire row or column
def __init__(self, broadcast_dim=None, p_drop=0.15):
super(Dropout, self).__init__()
# give ones with probability of 1-p_drop / zeros with p_drop
self.sampler = torch.distributions.bernoulli.Bernoulli(torch.tensor([1-p_drop]))
self.broadcast_dim=broadcast_dim
self.p_drop=p_drop
def forward(self, x):
if not self.training: # no drophead during evaluation mode
return x
shape = list(x.shape)
if not self.broadcast_dim == None:
shape[self.broadcast_dim] = 1
mask = self.sampler.sample(shape).to(x.device).view(shape)
x = mask * x / (1.0 - self.p_drop)
return x
def rbf(D):
# Distance radial basis function
D_min, D_max, D_count = 0., 20., 36
D_mu = torch.linspace(D_min, D_max, D_count).to(D.device)
D_mu = D_mu[None,:]
D_sigma = (D_max - D_min) / D_count
D_expand = torch.unsqueeze(D, -1)
RBF = torch.exp(-((D_expand - D_mu) / D_sigma)**2)
return RBF
def get_seqsep(idx):
'''
Input:
- idx: residue indices of given sequence (B,L)
Output:
- seqsep: sequence separation feature with sign (B, L, L, 1)
Sergey found that having sign in seqsep features helps a little
'''
seqsep = idx[:,None,:] - idx[:,:,None]
sign = torch.sign(seqsep)
neigh = torch.abs(seqsep)
neigh[neigh > 1] = 0.0 # if bonded -- 1.0 / else 0.0
neigh = sign * neigh
return neigh.unsqueeze(-1)
def make_full_graph(xyz, pair, idx, top_k=64, kmin=9):
'''
Input:
- xyz: current backbone cooordinates (B, L, 3, 3)
- pair: pair features from Trunk (B, L, L, E)
- idx: residue index from ground truth pdb
Output:
- G: defined graph
'''
B, L = xyz.shape[:2]
device = xyz.device
# seq sep
sep = idx[:,None,:] - idx[:,:,None]
b,i,j = torch.where(sep.abs() > 0)
src = b*L+i
tgt = b*L+j
G = dgl.graph((src, tgt), num_nodes=B*L).to(device)
G.edata['rel_pos'] = (xyz[b,j,:] - xyz[b,i,:]).detach() # no gradient through basis function
return G, pair[b,i,j][...,None]
def make_topk_graph(xyz, pair, idx, top_k=64, kmin=32, eps=1e-6):
'''
Input:
- xyz: current backbone cooordinates (B, L, 3, 3)
- pair: pair features from Trunk (B, L, L, E)
- idx: residue index from ground truth pdb
Output:
- G: defined graph
'''
B, L = xyz.shape[:2]
device = xyz.device
# distance map from current CA coordinates
D = torch.cdist(xyz, xyz) + torch.eye(L, device=device).unsqueeze(0)*999.9 # (B, L, L)
# seq sep
sep = idx[:,None,:] - idx[:,:,None]
sep = sep.abs() + torch.eye(L, device=device).unsqueeze(0)*999.9
D = D + sep*eps
# get top_k neighbors
D_neigh, E_idx = torch.topk(D, min(top_k, L), largest=False) # shape of E_idx: (B, L, top_k)
topk_matrix = torch.zeros((B, L, L), device=device)
topk_matrix.scatter_(2, E_idx, 1.0)
# put an edge if any of the 3 conditions are met:
# 1) |i-j| <= kmin (connect sequentially adjacent residues)
# 2) top_k neighbors
cond = torch.logical_or(topk_matrix > 0.0, sep < kmin)
b,i,j = torch.where(cond)
src = b*L+i
tgt = b*L+j
G = dgl.graph((src, tgt), num_nodes=B*L).to(device)
G.edata['rel_pos'] = (xyz[b,j,:] - xyz[b,i,:]).detach() # no gradient through basis function
return G, pair[b,i,j][...,None]
def make_rotX(angs, eps=1e-6):
B,L = angs.shape[:2]
NORM = torch.linalg.norm(angs, dim=-1) + eps
RTs = torch.eye(4, device=angs.device).repeat(B,L,1,1)
RTs[:,:,1,1] = angs[:,:,0]/NORM
RTs[:,:,1,2] = -angs[:,:,1]/NORM
RTs[:,:,2,1] = angs[:,:,1]/NORM
RTs[:,:,2,2] = angs[:,:,0]/NORM
return RTs
# rotate about the z axis
def make_rotZ(angs, eps=1e-6):
B,L = angs.shape[:2]
NORM = torch.linalg.norm(angs, dim=-1) + eps
RTs = torch.eye(4, device=angs.device).repeat(B,L,1,1)
RTs[:,:,0,0] = angs[:,:,0]/NORM
RTs[:,:,0,1] = -angs[:,:,1]/NORM
RTs[:,:,1,0] = angs[:,:,1]/NORM
RTs[:,:,1,1] = angs[:,:,0]/NORM
return RTs
# rotate about an arbitrary axis
def make_rot_axis(angs, u, eps=1e-6):
B,L = angs.shape[:2]
NORM = torch.linalg.norm(angs, dim=-1) + eps
RTs = torch.eye(4, device=angs.device).repeat(B,L,1,1)
ct = angs[:,:,0]/NORM
st = angs[:,:,1]/NORM
u0 = u[:,:,0]
u1 = u[:,:,1]
u2 = u[:,:,2]
RTs[:,:,0,0] = ct+u0*u0*(1-ct)
RTs[:,:,0,1] = u0*u1*(1-ct)-u2*st
RTs[:,:,0,2] = u0*u2*(1-ct)+u1*st
RTs[:,:,1,0] = u0*u1*(1-ct)+u2*st
RTs[:,:,1,1] = ct+u1*u1*(1-ct)
RTs[:,:,1,2] = u1*u2*(1-ct)-u0*st
RTs[:,:,2,0] = u0*u2*(1-ct)-u1*st
RTs[:,:,2,1] = u1*u2*(1-ct)+u0*st
RTs[:,:,2,2] = ct+u2*u2*(1-ct)
return RTs
class ComputeAllAtomCoords(nn.Module):
def __init__(self):
super(ComputeAllAtomCoords, self).__init__()
self.base_indices = nn.Parameter(base_indices, requires_grad=False)
self.RTs_in_base_frame = nn.Parameter(RTs_by_torsion, requires_grad=False)
self.xyzs_in_base_frame = nn.Parameter(xyzs_in_base_frame, requires_grad=False)
def forward(self, seq, xyz, alphas, non_ideal=False, use_H=True):
B,L = xyz.shape[:2]
Rs, Ts = rigid_from_3_points(xyz[...,0,:],xyz[...,1,:],xyz[...,2,:], non_ideal=non_ideal)
RTF0 = torch.eye(4).repeat(B,L,1,1).to(device=Rs.device)
# bb
RTF0[:,:,:3,:3] = Rs
RTF0[:,:,:3,3] = Ts
# omega
RTF1 = torch.einsum(
'brij,brjk,brkl->bril',
RTF0, self.RTs_in_base_frame[seq,0,:], make_rotX(alphas[:,:,0,:]))
# phi
RTF2 = torch.einsum(
'brij,brjk,brkl->bril',
RTF0, self.RTs_in_base_frame[seq,1,:], make_rotX(alphas[:,:,1,:]))
# psi
RTF3 = torch.einsum(
'brij,brjk,brkl->bril',
RTF0, self.RTs_in_base_frame[seq,2,:], make_rotX(alphas[:,:,2,:]))
# CB bend
basexyzs = self.xyzs_in_base_frame[seq]
NCr = 0.5*(basexyzs[:,:,2,:3]+basexyzs[:,:,0,:3])
CAr = (basexyzs[:,:,1,:3])
CBr = (basexyzs[:,:,4,:3])
CBrotaxis1 = (CBr-CAr).cross(NCr-CAr)
CBrotaxis1 /= torch.linalg.norm(CBrotaxis1, dim=-1, keepdim=True)+1e-8
# CB twist
NCp = basexyzs[:,:,2,:3] - basexyzs[:,:,0,:3]
NCpp = NCp - torch.sum(NCp*NCr, dim=-1, keepdim=True)/ torch.sum(NCr*NCr, dim=-1, keepdim=True) * NCr
CBrotaxis2 = (CBr-CAr).cross(NCpp)
CBrotaxis2 /= torch.linalg.norm(CBrotaxis2, dim=-1, keepdim=True)+1e-8
CBrot1 = make_rot_axis(alphas[:,:,7,:], CBrotaxis1 )
CBrot2 = make_rot_axis(alphas[:,:,8,:], CBrotaxis2 )
RTF8 = torch.einsum(
'brij,brjk,brkl->bril',
RTF0, CBrot1,CBrot2)
# chi1 + CG bend
RTF4 = torch.einsum(
'brij,brjk,brkl,brlm->brim',
RTF8,
self.RTs_in_base_frame[seq,3,:],
make_rotX(alphas[:,:,3,:]),
make_rotZ(alphas[:,:,9,:]))
# chi2
RTF5 = torch.einsum(
'brij,brjk,brkl->bril',
RTF4, self.RTs_in_base_frame[seq,4,:],make_rotX(alphas[:,:,4,:]))
# chi3
RTF6 = torch.einsum(
'brij,brjk,brkl->bril',
RTF5,self.RTs_in_base_frame[seq,5,:],make_rotX(alphas[:,:,5,:]))
# chi4
RTF7 = torch.einsum(
'brij,brjk,brkl->bril',
RTF6,self.RTs_in_base_frame[seq,6,:],make_rotX(alphas[:,:,6,:]))
RTframes = torch.stack((
RTF0,RTF1,RTF2,RTF3,RTF4,RTF5,RTF6,RTF7,RTF8
),dim=2)
xyzs = torch.einsum(
'brtij,brtj->brti',
RTframes.gather(2,self.base_indices[seq][...,None,None].repeat(1,1,1,4,4)), basexyzs
)
if use_H:
return RTframes, xyzs[...,:3]
else:
return RTframes, xyzs[...,:14,:3]
| RFdiffusion-main | util_module.py |
import torch
import torch.nn as nn
from Embeddings import MSA_emb, Extra_emb, Templ_emb, Recycling, Timestep_emb
from Track_module import IterativeSimulator
from AuxiliaryPredictor import DistanceNetwork, MaskedTokenNetwork, ExpResolvedNetwork, LDDTNetwork
from util import INIT_CRDS
from opt_einsum import contract as einsum
from icecream import ic
class RoseTTAFoldModule(nn.Module):
def __init__(self,
n_extra_block,
n_main_block,
n_ref_block,
d_msa,
d_msa_full,
d_pair,
d_templ,
n_head_msa,
n_head_pair,
n_head_templ,
d_hidden,
d_hidden_templ,
p_drop,
d_t1d,
d_t2d,
d_time_emb, # total dims for input timestep emb
d_time_emb_proj, # size of projected timestep emb
T, # total timesteps (used in timestep emb
use_motif_timestep, # Whether to have a distinct emb for motif
freeze_track_motif, # Whether to freeze updates to motif in track
SE3_param_full={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32},
SE3_param_topk={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32},
input_seq_onehot=False, # For continuous vs. discrete sequence
):
super(RoseTTAFoldModule, self).__init__()
self.freeze_track_motif = freeze_track_motif
# Input Embeddings
d_state = SE3_param_topk['l0_out_features']
self.latent_emb = MSA_emb(d_msa=d_msa, d_pair=d_pair, d_state=d_state,
p_drop=p_drop, input_seq_onehot=input_seq_onehot) # Allowed to take onehotseq
self.full_emb = Extra_emb(d_msa=d_msa_full, d_init=25,
p_drop=p_drop, input_seq_onehot=input_seq_onehot) # Allowed to take onehotseq
self.templ_emb = Templ_emb(d_pair=d_pair, d_templ=d_templ, d_state=d_state,
n_head=n_head_templ,
d_hidden=d_hidden_templ, p_drop=0.25, d_t1d=d_t1d, d_t2d=d_t2d)
# timestep embedder
if d_time_emb:
print('NOTE: Using sinusoidal timestep embeddings of dim ',d_time_emb, ' projected to dim ',d_time_emb_proj)
assert d_t1d >= 22 + d_time_emb_proj, 'timestep projection size doesn\'t fit into RF t1d projection layers'
self.timestep_embedder = Timestep_emb(input_size=d_time_emb,
output_size=d_time_emb_proj,
T=T,
use_motif_timestep=use_motif_timestep)
# Update inputs with outputs from previous round
self.recycle = Recycling(d_msa=d_msa, d_pair=d_pair, d_state=d_state)
#
self.simulator = IterativeSimulator(n_extra_block=n_extra_block,
n_main_block=n_main_block,
n_ref_block=n_ref_block,
d_msa=d_msa, d_msa_full=d_msa_full,
d_pair=d_pair, d_hidden=d_hidden,
n_head_msa=n_head_msa,
n_head_pair=n_head_pair,
SE3_param_full=SE3_param_full,
SE3_param_topk=SE3_param_topk,
p_drop=p_drop)
##
self.c6d_pred = DistanceNetwork(d_pair, p_drop=p_drop)
self.aa_pred = MaskedTokenNetwork(d_msa, p_drop=p_drop)
self.lddt_pred = LDDTNetwork(d_state)
self.exp_pred = ExpResolvedNetwork(d_msa, d_state)
def forward(self, msa_latent, msa_full, seq, xyz, idx, t,
t1d=None, t2d=None, xyz_t=None, alpha_t=None,
msa_prev=None, pair_prev=None, state_prev=None,
return_raw=False, return_full=False, return_infer=False,
use_checkpoint=False, motif_mask=None, i_cycle=None, n_cycle=None):
B, N, L = msa_latent.shape[:3]
# Get embeddings
msa_latent, pair, state = self.latent_emb(msa_latent, seq, idx)
msa_full = self.full_emb(msa_full, seq, idx)
# Do recycling
if msa_prev == None:
msa_prev = torch.zeros_like(msa_latent[:,0])
pair_prev = torch.zeros_like(pair)
state_prev = torch.zeros_like(state)
msa_recycle, pair_recycle, state_recycle = self.recycle(seq, msa_prev, pair_prev, xyz, state_prev)
msa_latent[:,0] = msa_latent[:,0] + msa_recycle.reshape(B,L,-1)
pair = pair + pair_recycle
state = state + state_recycle
# Get timestep embedding (if using)
if hasattr(self, 'timestep_embedder'):
assert t is not None
time_emb = self.timestep_embedder(L,t,motif_mask)
n_tmpl = t1d.shape[1]
t1d = torch.cat([t1d, time_emb[None,None,...].repeat(1,n_tmpl,1,1)], dim=-1)
# add template embedding
pair, state = self.templ_emb(t1d, t2d, alpha_t, xyz_t, pair, state, use_checkpoint=use_checkpoint)
# Predict coordinates from given inputs
is_frozen_residue = motif_mask if self.freeze_track_motif else torch.zeros_like(motif_mask).bool()
msa, pair, R, T, alpha_s, state = self.simulator(seq, msa_latent, msa_full, pair, xyz[:,:,:3],
state, idx, use_checkpoint=use_checkpoint,
motif_mask=is_frozen_residue)
if return_raw:
# get last structure
xyz = einsum('bnij,bnaj->bnai', R[-1], xyz[:,:,:3]-xyz[:,:,1].unsqueeze(-2)) + T[-1].unsqueeze(-2)
return msa[:,0], pair, xyz, state, alpha_s[-1]
# predict masked amino acids
logits_aa = self.aa_pred(msa)
# Predict LDDT
lddt = self.lddt_pred(state)
if return_infer:
# get last structure
xyz = einsum('bnij,bnaj->bnai', R[-1], xyz[:,:,:3]-xyz[:,:,1].unsqueeze(-2)) + T[-1].unsqueeze(-2)
# get scalar plddt
nbin = lddt.shape[1]
bin_step = 1.0 / nbin
lddt_bins = torch.linspace(bin_step, 1.0, nbin, dtype=lddt.dtype, device=lddt.device)
pred_lddt = nn.Softmax(dim=1)(lddt)
pred_lddt = torch.sum(lddt_bins[None,:,None]*pred_lddt, dim=1)
return msa[:,0], pair, xyz, state, alpha_s[-1], logits_aa.permute(0,2,1), pred_lddt
#
# predict distogram & orientograms
logits = self.c6d_pred(pair)
# predict experimentally resolved or not
logits_exp = self.exp_pred(msa[:,0], state)
# get all intermediate bb structures
xyz = einsum('rbnij,bnaj->rbnai', R, xyz[:,:,:3]-xyz[:,:,1].unsqueeze(-2)) + T.unsqueeze(-2)
return logits, logits_aa, logits_exp, xyz, alpha_s, lddt
| RFdiffusion-main | RoseTTAFoldModel.py |
# script for diffusion protocols
import torch
import pickle
import numpy as np
import os
import logging
from typing import List
from scipy.spatial.transform import Rotation as scipy_R
from util import rigid_from_3_points
from util import torsion_indices as TOR_INDICES
from util import torsion_can_flip as TOR_CAN_FLIP
from util import reference_angles as REF_ANGLES
from util_module import ComputeAllAtomCoords
from diff_util import th_min_angle, th_interpolate_angles
from chemical import INIT_CRDS
import igso3
import time
torch.set_printoptions(sci_mode=False)
def get_beta_schedule(T, b0, bT, schedule_type, schedule_params={}, inference=False):
"""
Given a noise schedule type, create the beta schedule
"""
assert schedule_type in ["linear"]
# Adjust b0 and bT if T is not 200
# This is a good approximation, with the beta correction below, unless T is very small
assert T >= 15, "With discrete time and T < 15, the schedule is badly approximated"
b0 *= 200 / T
bT *= 200 / T
# linear noise schedule
if schedule_type == "linear":
schedule = torch.linspace(b0, bT, T)
else:
raise NotImplementedError(f"Schedule of type {schedule_type} not implemented.")
# get alphabar_t for convenience
alpha_schedule = 1 - schedule
alphabar_t_schedule = torch.cumprod(alpha_schedule, dim=0)
if inference:
print(
f"With this beta schedule ({schedule_type} schedule, beta_0 = {round(b0, 3)}, beta_T = {round(bT,3)}), alpha_bar_T = {alphabar_t_schedule[-1]}"
)
return schedule, alpha_schedule, alphabar_t_schedule
class EuclideanDiffuser:
# class for diffusing points in 3D
def __init__(
self,
T,
b_0,
b_T,
schedule_type="linear",
schedule_kwargs={},
):
self.T = T
# make noise/beta schedule
(
self.beta_schedule,
self.alpha_schedule,
self.alphabar_schedule,
) = get_beta_schedule(T, b_0, b_T, schedule_type, **schedule_kwargs)
def diffuse_translations(self, xyz, diffusion_mask=None, var_scale=1):
return self.apply_kernel_recursive(xyz, diffusion_mask, var_scale)
def apply_kernel(self, x, t, diffusion_mask=None, var_scale=1):
"""
Applies a noising kernel to the points in x
Parameters:
x (torch.tensor, required): (N,3,3) set of backbone coordinates
t (int, required): Which timestep
noise_scale (float, required): scale for noise
"""
t_idx = t - 1 # bring from 1-indexed to 0-indexed
assert len(x.shape) == 3
L, _, _ = x.shape
# c-alpha crds
ca_xyz = x[:, 1, :]
b_t = self.beta_schedule[t_idx]
# get the noise at timestep t
mean = torch.sqrt(1 - b_t) * ca_xyz
var = torch.ones(L, 3) * (b_t) * var_scale
sampled_crds = torch.normal(mean, torch.sqrt(var))
delta = sampled_crds - ca_xyz
if not diffusion_mask is None:
delta[diffusion_mask, ...] = 0
out_crds = x + delta[:, None, :]
return out_crds, delta
def apply_kernel_recursive(self, xyz, diffusion_mask=None, var_scale=1):
"""
Repeatedly apply self.apply_kernel T times and return all crds
"""
bb_stack = []
T_stack = []
cur_xyz = torch.clone(xyz)
for t in range(1, self.T + 1):
cur_xyz, cur_T = self.apply_kernel(
cur_xyz, t, var_scale=var_scale, diffusion_mask=diffusion_mask
)
bb_stack.append(cur_xyz)
T_stack.append(cur_T)
return torch.stack(bb_stack).transpose(0, 1), torch.stack(T_stack).transpose(
0, 1
)
def write_pkl(save_path: str, pkl_data):
"""Serialize data into a pickle file."""
with open(save_path, "wb") as handle:
pickle.dump(pkl_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def read_pkl(read_path: str, verbose=False):
"""Read data from a pickle file."""
with open(read_path, "rb") as handle:
try:
return pickle.load(handle)
except Exception as e:
if verbose:
print(f"Failed to read {read_path}")
raise (e)
class IGSO3:
"""
Class for taking in a set of backbone crds and performing IGSO3 diffusion
on all of them.
Unlike the diffusion on translations, much of this class is written for a
scaling between an initial time t=0 and final time t=1.
"""
def __init__(
self,
*,
T,
min_sigma,
max_sigma,
min_b,
max_b,
cache_dir,
num_omega=1000,
schedule="linear",
L=2000,
):
"""
Args:
T: total number of time steps
min_sigma: smallest allowed scale parameter, should be at least 0.01 to maintain numerical stability. Recommended value is 0.05.
max_sigma: for exponential schedule, the largest scale parameter. Ignored for recommeded linear schedule
min_b: lower value of beta in Ho schedule analogue
max_b: upper value of beta in Ho schedule analogue
num_omega: discretization level in the angles across [0, pi]
schedule: currently only linear and exponential are supported. The exponential schedule may be noising too slowly.
L: truncation level
"""
self._log = logging.getLogger(__name__)
self.T = T
self.schedule = schedule
self.cache_dir = cache_dir
self.min_sigma = min_sigma
self.max_sigma = max_sigma
if self.schedule == "linear":
self.min_b = min_b
self.max_b = max_b
self.max_sigma = self.sigma(1.0)
self.num_omega = num_omega
self.num_sigma = 500
# Calculate igso3 values.
self.L = L # truncation level
self.igso3_vals = self._calc_igso3_vals(L=L)
self.step_size = 1 / self.T
def _calc_igso3_vals(self, L=2000):
"""_calc_igso3_vals computes numerical approximations to the
relevant analytically intractable functionals of the igso3
distribution.
The calculated values are cached, or loaded from cache if they already
exist.
Args:
L: truncation level for power series expansion of the pdf.
"""
replace_period = lambda x: str(x).replace(".", "_")
if self.schedule == "linear":
cache_fname = os.path.join(
self.cache_dir,
f"T_{self.T}_omega_{self.num_omega}_min_sigma_{replace_period(self.min_sigma)}"
+ f"_min_b_{replace_period(self.min_b)}_max_b_{replace_period(self.max_b)}_schedule_{self.schedule}.pkl",
)
elif self.schedule == "exponential":
cache_fname = os.path.join(
self.cache_dir,
f"T_{self.T}_omega_{self.num_omega}_min_sigma_{replace_period(self.min_sigma)}"
f"_max_sigma_{replace_period(self.max_sigma)}_schedule_{self.schedule}",
)
else:
raise ValueError(f"Unrecognize schedule {self.schedule}")
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
if os.path.exists(cache_fname):
self._log.info("Using cached IGSO3.")
igso3_vals = read_pkl(cache_fname)
else:
self._log.info("Calculating IGSO3.")
igso3_vals = igso3.calculate_igso3(
num_sigma=self.num_sigma,
min_sigma=self.min_sigma,
max_sigma=self.max_sigma,
num_omega=self.num_omega,
L=L,
)
write_pkl(cache_fname, igso3_vals)
return igso3_vals
@property
def discrete_sigma(self):
return self.igso3_vals["discrete_sigma"]
def sigma_idx(self, sigma: np.ndarray):
"""
Calculates the index for discretized sigma during IGSO(3) initialization."""
return np.digitize(sigma, self.discrete_sigma) - 1
def t_to_idx(self, t: np.ndarray):
"""
Helper function to go from discrete time index t to corresponding sigma_idx.
Args:
t: time index (integer between 1 and 200)
"""
continuous_t = t / self.T
return self.sigma_idx(self.sigma(continuous_t))
def sigma(self, t: torch.tensor):
"""
Extract \sigma(t) corresponding to chosen sigma schedule.
Args:
t: torch tensor with time between 0 and 1
"""
if not type(t) == torch.Tensor:
t = torch.tensor(t)
if torch.any(t < 0) or torch.any(t > 1):
raise ValueError(f"Invalid t={t}")
if self.schedule == "exponential":
sigma = t * np.log10(self.max_sigma) + (1 - t) * np.log10(self.min_sigma)
return 10**sigma
elif self.schedule == "linear": # Variance exploding analogue of Ho schedule
# add self.min_sigma for stability
return (
self.min_sigma
+ t * self.min_b
+ (1 / 2) * (t**2) * (self.max_b - self.min_b)
)
else:
raise ValueError(f"Unrecognize schedule {self.schedule}")
def g(self, t):
"""
g returns the drift coefficient at time t
since
sigma(t)^2 := \int_0^t g(s)^2 ds,
for arbitrary sigma(t) we invert this relationship to compute
g(t) = sqrt(d/dt sigma(t)^2).
Args:
t: scalar time between 0 and 1
Returns:
drift cooeficient as a scalar.
"""
t = torch.tensor(t, requires_grad=True)
sigma_sqr = self.sigma(t) ** 2
grads = torch.autograd.grad(sigma_sqr.sum(), t)[0]
return torch.sqrt(grads)
def sample(self, ts, n_samples=1):
"""
sample uses the inverse cdf to sample an angle of rotation from
IGSO(3)
Args:
ts: array of integer time steps to sample from.
n_samples: number of samples to draw.
Returns:
sampled angles of rotation. [len(ts), N]
"""
assert sum(ts == 0) == 0, "assumes one-indexed, not zero indexed"
all_samples = []
for t in ts:
sigma_idx = self.t_to_idx(t)
sample_i = np.interp(
np.random.rand(n_samples),
self.igso3_vals["cdf"][sigma_idx],
self.igso3_vals["discrete_omega"],
) # [N, 1]
all_samples.append(sample_i)
return np.stack(all_samples, axis=0)
def sample_vec(self, ts, n_samples=1):
"""sample_vec generates a rotation vector(s) from IGSO(3) at time steps
ts.
Return:
Sampled vector of shape [len(ts), N, 3]
"""
x = np.random.randn(len(ts), n_samples, 3)
x /= np.linalg.norm(x, axis=-1, keepdims=True)
return x * self.sample(ts, n_samples=n_samples)[..., None]
def score_norm(self, t, omega):
"""
score_norm computes the score norm based on the time step and angle
Args:
t: integer time step
omega: angles (scalar or shape [N])
Return:
score_norm with same shape as omega
"""
sigma_idx = self.t_to_idx(t)
score_norm_t = np.interp(
omega,
self.igso3_vals["discrete_omega"],
self.igso3_vals["score_norm"][sigma_idx],
)
return score_norm_t
def score_vec(self, ts, vec):
"""score_vec computes the score of the IGSO(3) density as a rotation
vector. This score vector is in the direction of the sampled vector,
and has magnitude given by score_norms.
In particular, Rt @ hat(score_vec(ts, vec)) is what is referred to as
the score approximation in Algorithm 1
Args:
ts: times of shape [T]
vec: where to compute the score of shape [T, N, 3]
Returns:
score vectors of shape [T, N, 3]
"""
omega = np.linalg.norm(vec, axis=-1)
all_score_norm = []
for i, t in enumerate(ts):
omega_t = omega[i]
t_idx = t - 1
sigma_idx = self.t_to_idx(t)
score_norm_t = np.interp(
omega_t,
self.igso3_vals["discrete_omega"],
self.igso3_vals["score_norm"][sigma_idx],
)[:, None]
all_score_norm.append(score_norm_t)
score_norm = np.stack(all_score_norm, axis=0)
return score_norm * vec / omega[..., None]
def exp_score_norm(self, ts):
"""exp_score_norm returns the expected value of norm of the score for
IGSO(3) with time parameter ts of shape [T].
"""
sigma_idcs = [self.t_to_idx(t) for t in ts]
return self.igso3_vals["exp_score_norms"][sigma_idcs]
def diffuse_frames(self, xyz, t_list, diffusion_mask=None):
"""diffuse_frames samples from the IGSO(3) distribution to noise frames
Parameters:
xyz (np.array or torch.tensor, required): (L,3,3) set of backbone coordinates
mask (np.array or torch.tensor, required): (L,) set of bools. True/1 is NOT diffused, False/0 IS diffused
Returns:
np.array : N/CA/C coordinates for each residue
(T,L,3,3), where T is num timesteps
"""
if torch.is_tensor(xyz):
xyz = xyz.numpy()
t = np.arange(self.T) + 1 # 1-indexed!!
num_res = len(xyz)
N = torch.from_numpy(xyz[None, :, 0, :])
Ca = torch.from_numpy(xyz[None, :, 1, :]) # [1, num_res, 3, 3]
C = torch.from_numpy(xyz[None, :, 2, :])
# scipy rotation object for true coordinates
R_true, Ca = rigid_from_3_points(N, Ca, C)
R_true = R_true[0]
Ca = Ca[0]
# Sample rotations and scores from IGSO3
sampled_rots = self.sample_vec(t, n_samples=num_res) # [T, N, 3]
if diffusion_mask is not None:
non_diffusion_mask = 1 - diffusion_mask[None, :, None]
sampled_rots = sampled_rots * non_diffusion_mask
# Apply sampled rot.
R_sampled = (
scipy_R.from_rotvec(sampled_rots.reshape(-1, 3))
.as_matrix()
.reshape(self.T, num_res, 3, 3)
)
R_perturbed = np.einsum("tnij,njk->tnik", R_sampled, R_true)
perturbed_crds = (
np.einsum(
"tnij,naj->tnai", R_sampled, xyz[:, :3, :] - Ca[:, None, ...].numpy()
)
+ Ca[None, :, None].numpy()
)
if t_list != None:
idx = [i - 1 for i in t_list]
perturbed_crds = perturbed_crds[idx]
R_perturbed = R_perturbed[idx]
return (
perturbed_crds.transpose(1, 0, 2, 3), # [L, T, 3, 3]
R_perturbed.transpose(1, 0, 2, 3),
)
def reverse_sample_vectorized(
self, R_t, R_0, t, noise_level, mask=None, return_perturb=False
):
"""reverse_sample uses an approximation to the IGSO3 score to sample
a rotation at the previous time step.
Roughly - this update follows the reverse time SDE for Reimannian
manifolds proposed by de Bortoli et al. Theorem 1 [1]. But with an
approximation to the score based on the prediction of R0.
Unlike in reference [1], this diffusion on SO(3) relies on geometric
variance schedule. Specifically we follow [2] (appendix C) and assume
sigma_t = sigma_min * (sigma_max / sigma_min)^{t/T},
for time step t. When we view this as a discretization of the SDE
from time 0 to 1 with step size (1/T). Following Eq. 5 and Eq. 6,
this maps on to the forward time SDEs
dx = g(t) dBt [FORWARD]
and
dx = g(t)^2 score(xt, t)dt + g(t) B't, [REVERSE]
where g(t) = sigma_t * sqrt(2 * log(sigma_max/ sigma_min)), and Bt and
B't are Brownian motions. The formula for g(t) obtains from equation 9
of [2], from which this sampling function may be generalized to
alternative noising schedules.
Args:
R_t: noisy rotation of shape [N, 3, 3]
R_0: prediction of un-noised rotation
t: integer time step
noise_level: scaling on the noise added when obtaining sample
(preliminary performance seems empirically better with noise
level=0.5)
mask: whether the residue is to be updated. A value of 1 means the
rotation is not updated from r_t. A value of 0 means the
rotation is updated.
Return:
sampled rotation matrix for time t-1 of shape [3, 3]
Reference:
[1] De Bortoli, V., Mathieu, E., Hutchinson, M., Thornton, J., Teh, Y.
W., & Doucet, A. (2022). Riemannian score-based generative modeling.
arXiv preprint arXiv:2202.02763.
[2] Song, Y., Sohl-Dickstein, J., Kingma, D. P., Kumar, A., Ermon, S.,
& Poole, B. (2020). Score-based generative modeling through stochastic
differential equations. arXiv preprint arXiv:2011.13456.
"""
# compute rotation vector corresponding to prediction of how r_t goes to r_0
R_0, R_t = torch.tensor(R_0), torch.tensor(R_t)
R_0t = torch.einsum("...ij,...kj->...ik", R_t, R_0)
R_0t_rotvec = torch.tensor(
scipy_R.from_matrix(R_0t.cpu().numpy()).as_rotvec()
).to(R_0.device)
# Approximate the score based on the prediction of R0.
# R_t @ hat(Score_approx) is the score approximation in the Lie algebra
# SO(3) (i.e. the output of Algorithm 1)
Omega = torch.linalg.norm(R_0t_rotvec, axis=-1).numpy()
Score_approx = R_0t_rotvec * (self.score_norm(t, Omega) / Omega)[:, None]
# Compute scaling for score and sampled noise (following Eq 6 of [2])
continuous_t = t / self.T
rot_g = self.g(continuous_t).to(Score_approx.device)
# Sample and scale noise to add to the rotation perturbation in the
# SO(3) tangent space. Since IG-SO(3) is the Brownian motion on SO(3)
# (up to a deceleration of time by a factor of two), for small enough
# time-steps, this is equivalent to perturbing r_t with IG-SO(3) noise.
# See e.g. Algorithm 1 of De Bortoli et al.
Z = np.random.normal(size=(R_0.shape[0], 3))
Z = torch.from_numpy(Z).to(Score_approx.device)
Z *= noise_level
Delta_r = (rot_g**2) * self.step_size * Score_approx
# Sample perturbation from discretized SDE (following eq. 6 of [2]),
# This approximate sampling from IGSO3(* ; Delta_r, rot_g^2 *
# self.step_size) with tangent Gaussian.
Perturb_tangent = Delta_r + rot_g * np.sqrt(self.step_size) * Z
if mask is not None:
Perturb_tangent *= (1 - mask.long())[:, None, None]
Perturb = igso3.Exp(Perturb_tangent)
if return_perturb:
return Perturb
Interp_rot = torch.einsum("...ij,...jk->...ik", Perturb, R_t)
return Interp_rot
class Diffuser:
# wrapper for yielding diffused coordinates
def __init__(
self,
T,
b_0,
b_T,
min_sigma,
max_sigma,
min_b,
max_b,
schedule_type,
so3_schedule_type,
so3_type,
crd_scale,
schedule_kwargs={},
var_scale=1.0,
cache_dir=".",
partial_T=None,
truncation_level=2000,
):
"""
Parameters:
T (int, required): Number of steps in the schedule
b_0 (float, required): Starting variance for Euclidean schedule
b_T (float, required): Ending variance for Euclidean schedule
"""
self.T = T
self.b_0 = b_0
self.b_T = b_T
self.min_sigma = min_sigma
self.max_sigma = max_sigma
self.crd_scale = crd_scale
self.var_scale = var_scale
self.cache_dir = cache_dir
# get backbone frame diffuser
self.so3_diffuser = IGSO3(
T=self.T,
min_sigma=self.min_sigma,
max_sigma=self.max_sigma,
schedule=so3_schedule_type,
min_b=min_b,
max_b=max_b,
cache_dir=self.cache_dir,
L=truncation_level,
)
# get backbone translation diffuser
self.eucl_diffuser = EuclideanDiffuser(
self.T, b_0, b_T, schedule_type=schedule_type, **schedule_kwargs
)
print("Successful diffuser __init__")
def diffuse_pose(
self,
xyz,
seq,
atom_mask,
include_motif_sidechains=True,
diffusion_mask=None,
t_list=None,
):
"""
Given full atom xyz, sequence and atom mask, diffuse the protein frame
translations and rotations
Parameters:
xyz (L,14/27,3) set of coordinates
seq (L,) integer sequence
atom_mask: mask describing presence/absence of an atom in pdb
diffusion_mask (torch.tensor, optional): Tensor of bools, True means NOT diffused at this residue, False means diffused
t_list (list, optional): If present, only return the diffused coordinates at timesteps t within the list
"""
if diffusion_mask is None:
diffusion_mask = torch.zeros(len(xyz.squeeze())).to(dtype=bool)
get_allatom = ComputeAllAtomCoords().to(device=xyz.device)
L = len(xyz)
# bring to origin and scale
# check if any BB atoms are nan before centering
nan_mask = ~torch.isnan(xyz.squeeze()[:, :3]).any(dim=-1).any(dim=-1)
assert torch.sum(~nan_mask) == 0
# Centre unmasked structure at origin, as in training (to prevent information leak)
if torch.sum(diffusion_mask) != 0:
self.motif_com = xyz[diffusion_mask, 1, :].mean(
dim=0
) # This is needed for one of the potentials
xyz = xyz - self.motif_com
elif torch.sum(diffusion_mask) == 0:
xyz = xyz - xyz[:, 1, :].mean(dim=0)
xyz_true = torch.clone(xyz)
xyz = xyz * self.crd_scale
# 1 get translations
tick = time.time()
diffused_T, deltas = self.eucl_diffuser.diffuse_translations(
xyz[:, :3, :].clone(), diffusion_mask=diffusion_mask
)
# print('Time to diffuse coordinates: ',time.time()-tick)
diffused_T /= self.crd_scale
deltas /= self.crd_scale
# 2 get frames
tick = time.time()
diffused_frame_crds, diffused_frames = self.so3_diffuser.diffuse_frames(
xyz[:, :3, :].clone(), diffusion_mask=diffusion_mask.numpy(), t_list=None
)
diffused_frame_crds /= self.crd_scale
# print('Time to diffuse frames: ',time.time()-tick)
##### Now combine all the diffused quantities to make full atom diffused poses
tick = time.time()
cum_delta = deltas.cumsum(dim=1)
# The coordinates of the translated AND rotated frames
diffused_BB = (
torch.from_numpy(diffused_frame_crds) + cum_delta[:, :, None, :]
).transpose(
0, 1
) # [n,L,3,3]
# diffused_BB = torch.from_numpy(diffused_frame_crds).transpose(0,1)
# diffused_BB is [t_steps,L,3,3]
t_steps, L = diffused_BB.shape[:2]
diffused_fa = torch.zeros(t_steps, L, 27, 3)
diffused_fa[:, :, :3, :] = diffused_BB
# Add in sidechains from motif
if include_motif_sidechains:
diffused_fa[:, diffusion_mask, :14, :] = xyz_true[None, diffusion_mask, :14]
if t_list is None:
fa_stack = diffused_fa
else:
t_idx_list = [t - 1 for t in t_list]
fa_stack = diffused_fa[t_idx_list]
return fa_stack, xyz_true
| RFdiffusion-main | diffusion.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract as einsum
import torch.utils.checkpoint as checkpoint
from util import cross_product_matrix
from util_module import *
from Attention_module import *
from SE3_network import SE3TransformerWrapper
# Components for three-track blocks
# 1. MSA -> MSA update (biased attention. bias from pair & structure)
# 2. Pair -> Pair update (biased attention. bias from structure)
# 3. MSA -> Pair update (extract coevolution signal)
# 4. Str -> Str update (node from MSA, edge from Pair)
# Update MSA with biased self-attention. bias from Pair & Str
class MSAPairStr2MSA(nn.Module):
def __init__(self, d_msa=256, d_pair=128, n_head=8, d_state=16,
d_hidden=32, p_drop=0.15, use_global_attn=False):
super(MSAPairStr2MSA, self).__init__()
self.norm_pair = nn.LayerNorm(d_pair)
self.proj_pair = nn.Linear(d_pair+36, d_pair)
self.norm_state = nn.LayerNorm(d_state)
self.proj_state = nn.Linear(d_state, d_msa)
self.drop_row = Dropout(broadcast_dim=1, p_drop=p_drop)
self.row_attn = MSARowAttentionWithBias(d_msa=d_msa, d_pair=d_pair,
n_head=n_head, d_hidden=d_hidden)
if use_global_attn:
self.col_attn = MSAColGlobalAttention(d_msa=d_msa, n_head=n_head, d_hidden=d_hidden)
else:
self.col_attn = MSAColAttention(d_msa=d_msa, n_head=n_head, d_hidden=d_hidden)
self.ff = FeedForwardLayer(d_msa, 4, p_drop=p_drop)
# Do proper initialization
self.reset_parameter()
def reset_parameter(self):
# initialize weights to normal distrib
self.proj_pair = init_lecun_normal(self.proj_pair)
self.proj_state = init_lecun_normal(self.proj_state)
# initialize bias to zeros
nn.init.zeros_(self.proj_pair.bias)
nn.init.zeros_(self.proj_state.bias)
def forward(self, msa, pair, rbf_feat, state):
'''
Inputs:
- msa: MSA feature (B, N, L, d_msa)
- pair: Pair feature (B, L, L, d_pair)
- rbf_feat: Ca-Ca distance feature calculated from xyz coordinates (B, L, L, 36)
- xyz: xyz coordinates (B, L, n_atom, 3)
- state: updated node features after SE(3)-Transformer layer (B, L, d_state)
Output:
- msa: Updated MSA feature (B, N, L, d_msa)
'''
B, N, L = msa.shape[:3]
# prepare input bias feature by combining pair & coordinate info
pair = self.norm_pair(pair)
pair = torch.cat((pair, rbf_feat), dim=-1)
pair = self.proj_pair(pair) # (B, L, L, d_pair)
#
# update query sequence feature (first sequence in the MSA) with feedbacks (state) from SE3
state = self.norm_state(state)
state = self.proj_state(state).reshape(B, 1, L, -1)
msa = msa.index_add(1, torch.tensor([0,], device=state.device), state)
#
# Apply row/column attention to msa & transform
msa = msa + self.drop_row(self.row_attn(msa, pair))
msa = msa + self.col_attn(msa)
msa = msa + self.ff(msa)
return msa
class PairStr2Pair(nn.Module):
def __init__(self, d_pair=128, n_head=4, d_hidden=32, d_rbf=36, p_drop=0.15):
super(PairStr2Pair, self).__init__()
self.emb_rbf = nn.Linear(d_rbf, d_hidden)
self.proj_rbf = nn.Linear(d_hidden, d_pair)
self.drop_row = Dropout(broadcast_dim=1, p_drop=p_drop)
self.drop_col = Dropout(broadcast_dim=2, p_drop=p_drop)
self.row_attn = BiasedAxialAttention(d_pair, d_pair, n_head, d_hidden, p_drop=p_drop, is_row=True)
self.col_attn = BiasedAxialAttention(d_pair, d_pair, n_head, d_hidden, p_drop=p_drop, is_row=False)
self.ff = FeedForwardLayer(d_pair, 2)
self.reset_parameter()
def reset_parameter(self):
nn.init.kaiming_normal_(self.emb_rbf.weight, nonlinearity='relu')
nn.init.zeros_(self.emb_rbf.bias)
self.proj_rbf = init_lecun_normal(self.proj_rbf)
nn.init.zeros_(self.proj_rbf.bias)
def forward(self, pair, rbf_feat):
B, L = pair.shape[:2]
rbf_feat = self.proj_rbf(F.relu_(self.emb_rbf(rbf_feat)))
pair = pair + self.drop_row(self.row_attn(pair, rbf_feat))
pair = pair + self.drop_col(self.col_attn(pair, rbf_feat))
pair = pair + self.ff(pair)
return pair
class MSA2Pair(nn.Module):
def __init__(self, d_msa=256, d_pair=128, d_hidden=32, p_drop=0.15):
super(MSA2Pair, self).__init__()
self.norm = nn.LayerNorm(d_msa)
self.proj_left = nn.Linear(d_msa, d_hidden)
self.proj_right = nn.Linear(d_msa, d_hidden)
self.proj_out = nn.Linear(d_hidden*d_hidden, d_pair)
self.reset_parameter()
def reset_parameter(self):
# normal initialization
self.proj_left = init_lecun_normal(self.proj_left)
self.proj_right = init_lecun_normal(self.proj_right)
nn.init.zeros_(self.proj_left.bias)
nn.init.zeros_(self.proj_right.bias)
# zero initialize output
nn.init.zeros_(self.proj_out.weight)
nn.init.zeros_(self.proj_out.bias)
def forward(self, msa, pair):
B, N, L = msa.shape[:3]
msa = self.norm(msa)
left = self.proj_left(msa)
right = self.proj_right(msa)
right = right / float(N)
out = einsum('bsli,bsmj->blmij', left, right).reshape(B, L, L, -1)
out = self.proj_out(out)
pair = pair + out
return pair
class SCPred(nn.Module):
def __init__(self, d_msa=256, d_state=32, d_hidden=128, p_drop=0.15):
super(SCPred, self).__init__()
self.norm_s0 = nn.LayerNorm(d_msa)
self.norm_si = nn.LayerNorm(d_state)
self.linear_s0 = nn.Linear(d_msa, d_hidden)
self.linear_si = nn.Linear(d_state, d_hidden)
# ResNet layers
self.linear_1 = nn.Linear(d_hidden, d_hidden)
self.linear_2 = nn.Linear(d_hidden, d_hidden)
self.linear_3 = nn.Linear(d_hidden, d_hidden)
self.linear_4 = nn.Linear(d_hidden, d_hidden)
# Final outputs
self.linear_out = nn.Linear(d_hidden, 20)
self.reset_parameter()
def reset_parameter(self):
# normal initialization
self.linear_s0 = init_lecun_normal(self.linear_s0)
self.linear_si = init_lecun_normal(self.linear_si)
self.linear_out = init_lecun_normal(self.linear_out)
nn.init.zeros_(self.linear_s0.bias)
nn.init.zeros_(self.linear_si.bias)
nn.init.zeros_(self.linear_out.bias)
# right before relu activation: He initializer (kaiming normal)
nn.init.kaiming_normal_(self.linear_1.weight, nonlinearity='relu')
nn.init.zeros_(self.linear_1.bias)
nn.init.kaiming_normal_(self.linear_3.weight, nonlinearity='relu')
nn.init.zeros_(self.linear_3.bias)
# right before residual connection: zero initialize
nn.init.zeros_(self.linear_2.weight)
nn.init.zeros_(self.linear_2.bias)
nn.init.zeros_(self.linear_4.weight)
nn.init.zeros_(self.linear_4.bias)
def forward(self, seq, state):
'''
Predict side-chain torsion angles along with backbone torsions
Inputs:
- seq: hidden embeddings corresponding to query sequence (B, L, d_msa)
- state: state feature (output l0 feature) from previous SE3 layer (B, L, d_state)
Outputs:
- si: predicted torsion angles (phi, psi, omega, chi1~4 with cos/sin, Cb bend, Cb twist, CG) (B, L, 10, 2)
'''
B, L = seq.shape[:2]
seq = self.norm_s0(seq)
state = self.norm_si(state)
si = self.linear_s0(seq) + self.linear_si(state)
si = si + self.linear_2(F.relu_(self.linear_1(F.relu_(si))))
si = si + self.linear_4(F.relu_(self.linear_3(F.relu_(si))))
si = self.linear_out(F.relu_(si))
return si.view(B, L, 10, 2)
class Str2Str(nn.Module):
def __init__(self, d_msa=256, d_pair=128, d_state=16,
SE3_param={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32}, p_drop=0.1):
super(Str2Str, self).__init__()
# initial node & pair feature process
self.norm_msa = nn.LayerNorm(d_msa)
self.norm_pair = nn.LayerNorm(d_pair)
self.norm_state = nn.LayerNorm(d_state)
self.embed_x = nn.Linear(d_msa+d_state, SE3_param['l0_in_features'])
self.embed_e1 = nn.Linear(d_pair, SE3_param['num_edge_features'])
self.embed_e2 = nn.Linear(SE3_param['num_edge_features']+36+1, SE3_param['num_edge_features'])
self.norm_node = nn.LayerNorm(SE3_param['l0_in_features'])
self.norm_edge1 = nn.LayerNorm(SE3_param['num_edge_features'])
self.norm_edge2 = nn.LayerNorm(SE3_param['num_edge_features'])
self.se3 = SE3TransformerWrapper(**SE3_param)
self.sc_predictor = SCPred(d_msa=d_msa, d_state=SE3_param['l0_out_features'],
p_drop=p_drop)
self.reset_parameter()
def reset_parameter(self):
# initialize weights to normal distribution
self.embed_x = init_lecun_normal(self.embed_x)
self.embed_e1 = init_lecun_normal(self.embed_e1)
self.embed_e2 = init_lecun_normal(self.embed_e2)
# initialize bias to zeros
nn.init.zeros_(self.embed_x.bias)
nn.init.zeros_(self.embed_e1.bias)
nn.init.zeros_(self.embed_e2.bias)
@torch.cuda.amp.autocast(enabled=False)
def forward(self, msa, pair, R_in, T_in, xyz, state, idx, motif_mask, top_k=64, eps=1e-5):
B, N, L = msa.shape[:3]
if motif_mask is None:
motif_mask = torch.zeros(L).bool()
# process msa & pair features
node = self.norm_msa(msa[:,0])
pair = self.norm_pair(pair)
state = self.norm_state(state)
node = torch.cat((node, state), dim=-1)
node = self.norm_node(self.embed_x(node))
pair = self.norm_edge1(self.embed_e1(pair))
neighbor = get_seqsep(idx)
rbf_feat = rbf(torch.cdist(xyz[:,:,1], xyz[:,:,1]))
pair = torch.cat((pair, rbf_feat, neighbor), dim=-1)
pair = self.norm_edge2(self.embed_e2(pair))
# define graph
if top_k != 0:
G, edge_feats = make_topk_graph(xyz[:,:,1,:], pair, idx, top_k=top_k)
else:
G, edge_feats = make_full_graph(xyz[:,:,1,:], pair, idx, top_k=top_k)
l1_feats = xyz - xyz[:,:,1,:].unsqueeze(2)
l1_feats = l1_feats.reshape(B*L, -1, 3)
# apply SE(3) Transformer & update coordinates
shift = self.se3(G, node.reshape(B*L, -1, 1), l1_feats, edge_feats)
state = shift['0'].reshape(B, L, -1) # (B, L, C)
offset = shift['1'].reshape(B, L, 2, 3)
offset[:,motif_mask,...] = 0 # NOTE: motif mask is all zeros if not freeezing the motif
delTi = offset[:,:,0,:] / 10.0 # translation
R = offset[:,:,1,:] / 100.0 # rotation
Qnorm = torch.sqrt( 1 + torch.sum(R*R, dim=-1) )
qA, qB, qC, qD = 1/Qnorm, R[:,:,0]/Qnorm, R[:,:,1]/Qnorm, R[:,:,2]/Qnorm
delRi = torch.zeros((B,L,3,3), device=xyz.device)
delRi[:,:,0,0] = qA*qA+qB*qB-qC*qC-qD*qD
delRi[:,:,0,1] = 2*qB*qC - 2*qA*qD
delRi[:,:,0,2] = 2*qB*qD + 2*qA*qC
delRi[:,:,1,0] = 2*qB*qC + 2*qA*qD
delRi[:,:,1,1] = qA*qA-qB*qB+qC*qC-qD*qD
delRi[:,:,1,2] = 2*qC*qD - 2*qA*qB
delRi[:,:,2,0] = 2*qB*qD - 2*qA*qC
delRi[:,:,2,1] = 2*qC*qD + 2*qA*qB
delRi[:,:,2,2] = qA*qA-qB*qB-qC*qC+qD*qD
Ri = einsum('bnij,bnjk->bnik', delRi, R_in)
Ti = delTi + T_in #einsum('bnij,bnj->bni', delRi, T_in) + delTi
alpha = self.sc_predictor(msa[:,0], state)
return Ri, Ti, state, alpha
class IterBlock(nn.Module):
def __init__(self, d_msa=256, d_pair=128,
n_head_msa=8, n_head_pair=4,
use_global_attn=False,
d_hidden=32, d_hidden_msa=None, p_drop=0.15,
SE3_param={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32}):
super(IterBlock, self).__init__()
if d_hidden_msa == None:
d_hidden_msa = d_hidden
self.msa2msa = MSAPairStr2MSA(d_msa=d_msa, d_pair=d_pair,
n_head=n_head_msa,
d_state=SE3_param['l0_out_features'],
use_global_attn=use_global_attn,
d_hidden=d_hidden_msa, p_drop=p_drop)
self.msa2pair = MSA2Pair(d_msa=d_msa, d_pair=d_pair,
d_hidden=d_hidden//2, p_drop=p_drop)
#d_hidden=d_hidden, p_drop=p_drop)
self.pair2pair = PairStr2Pair(d_pair=d_pair, n_head=n_head_pair,
d_hidden=d_hidden, p_drop=p_drop)
self.str2str = Str2Str(d_msa=d_msa, d_pair=d_pair,
d_state=SE3_param['l0_out_features'],
SE3_param=SE3_param,
p_drop=p_drop)
def forward(self, msa, pair, R_in, T_in, xyz, state, idx, motif_mask, use_checkpoint=False):
rbf_feat = rbf(torch.cdist(xyz[:,:,1,:], xyz[:,:,1,:]))
if use_checkpoint:
msa = checkpoint.checkpoint(create_custom_forward(self.msa2msa), msa, pair, rbf_feat, state)
pair = checkpoint.checkpoint(create_custom_forward(self.msa2pair), msa, pair)
pair = checkpoint.checkpoint(create_custom_forward(self.pair2pair), pair, rbf_feat)
R, T, state, alpha = checkpoint.checkpoint(create_custom_forward(self.str2str, top_k=0), msa, pair, R_in, T_in, xyz, state, idx, motif_mask)
else:
msa = self.msa2msa(msa, pair, rbf_feat, state)
pair = self.msa2pair(msa, pair)
pair = self.pair2pair(pair, rbf_feat)
R, T, state, alpha = self.str2str(msa, pair, R_in, T_in, xyz, state, idx, motif_mask=motif_mask, top_k=0)
return msa, pair, R, T, state, alpha
class IterativeSimulator(nn.Module):
def __init__(self, n_extra_block=4, n_main_block=12, n_ref_block=4,
d_msa=256, d_msa_full=64, d_pair=128, d_hidden=32,
n_head_msa=8, n_head_pair=4,
SE3_param_full={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32},
SE3_param_topk={'l0_in_features':32, 'l0_out_features':16, 'num_edge_features':32},
p_drop=0.15):
super(IterativeSimulator, self).__init__()
self.n_extra_block = n_extra_block
self.n_main_block = n_main_block
self.n_ref_block = n_ref_block
self.proj_state = nn.Linear(SE3_param_topk['l0_out_features'], SE3_param_full['l0_out_features'])
# Update with extra sequences
if n_extra_block > 0:
self.extra_block = nn.ModuleList([IterBlock(d_msa=d_msa_full, d_pair=d_pair,
n_head_msa=n_head_msa,
n_head_pair=n_head_pair,
d_hidden_msa=8,
d_hidden=d_hidden,
p_drop=p_drop,
use_global_attn=True,
SE3_param=SE3_param_full)
for i in range(n_extra_block)])
# Update with seed sequences
if n_main_block > 0:
self.main_block = nn.ModuleList([IterBlock(d_msa=d_msa, d_pair=d_pair,
n_head_msa=n_head_msa,
n_head_pair=n_head_pair,
d_hidden=d_hidden,
p_drop=p_drop,
use_global_attn=False,
SE3_param=SE3_param_full)
for i in range(n_main_block)])
self.proj_state2 = nn.Linear(SE3_param_full['l0_out_features'], SE3_param_topk['l0_out_features'])
# Final SE(3) refinement
if n_ref_block > 0:
self.str_refiner = Str2Str(d_msa=d_msa, d_pair=d_pair,
d_state=SE3_param_topk['l0_out_features'],
SE3_param=SE3_param_topk,
p_drop=p_drop)
self.reset_parameter()
def reset_parameter(self):
self.proj_state = init_lecun_normal(self.proj_state)
nn.init.zeros_(self.proj_state.bias)
self.proj_state2 = init_lecun_normal(self.proj_state2)
nn.init.zeros_(self.proj_state2.bias)
def forward(self, seq, msa, msa_full, pair, xyz_in, state, idx, use_checkpoint=False, motif_mask=None):
"""
input:
seq: query sequence (B, L)
msa: seed MSA embeddings (B, N, L, d_msa)
msa_full: extra MSA embeddings (B, N, L, d_msa_full)
pair: initial residue pair embeddings (B, L, L, d_pair)
xyz_in: initial BB coordinates (B, L, n_atom, 3)
state: initial state features containing mixture of query seq, sidechain, accuracy info (B, L, d_state)
idx: residue index
motif_mask: bool tensor, True if motif position that is frozen, else False(L,)
"""
B, L = pair.shape[:2]
if motif_mask is None:
motif_mask = torch.zeros(L).bool()
R_in = torch.eye(3, device=xyz_in.device).reshape(1,1,3,3).expand(B, L, -1, -1)
T_in = xyz_in[:,:,1].clone()
xyz_in = xyz_in - T_in.unsqueeze(-2)
state = self.proj_state(state)
R_s = list()
T_s = list()
alpha_s = list()
for i_m in range(self.n_extra_block):
R_in = R_in.detach() # detach rotation (for stability)
T_in = T_in.detach()
# Get current BB structure
xyz = einsum('bnij,bnaj->bnai', R_in, xyz_in) + T_in.unsqueeze(-2)
msa_full, pair, R_in, T_in, state, alpha = self.extra_block[i_m](msa_full,
pair,
R_in,
T_in,
xyz,
state,
idx,
motif_mask=motif_mask,
use_checkpoint=use_checkpoint)
R_s.append(R_in)
T_s.append(T_in)
alpha_s.append(alpha)
for i_m in range(self.n_main_block):
R_in = R_in.detach()
T_in = T_in.detach()
# Get current BB structure
xyz = einsum('bnij,bnaj->bnai', R_in, xyz_in) + T_in.unsqueeze(-2)
msa, pair, R_in, T_in, state, alpha = self.main_block[i_m](msa,
pair,
R_in,
T_in,
xyz,
state,
idx,
motif_mask=motif_mask,
use_checkpoint=use_checkpoint)
R_s.append(R_in)
T_s.append(T_in)
alpha_s.append(alpha)
state = self.proj_state2(state)
for i_m in range(self.n_ref_block):
R_in = R_in.detach()
T_in = T_in.detach()
xyz = einsum('bnij,bnaj->bnai', R_in, xyz_in) + T_in.unsqueeze(-2)
R_in, T_in, state, alpha = self.str_refiner(msa,
pair,
R_in,
T_in,
xyz,
state,
idx,
top_k=64,
motif_mask=motif_mask)
R_s.append(R_in)
T_s.append(T_in)
alpha_s.append(alpha)
R_s = torch.stack(R_s, dim=0)
T_s = torch.stack(T_s, dim=0)
alpha_s = torch.stack(alpha_s, dim=0)
return msa, pair, R_s, T_s, alpha_s, state
| RFdiffusion-main | Track_module.py |
import numpy as np
import torch
from chemical import INIT_CRDS
PARAMS = {
"DMIN" : 2.0,
"DMAX" : 20.0,
"DBINS" : 36,
"ABINS" : 36,
}
# ============================================================
def get_pair_dist(a, b):
"""calculate pair distances between two sets of points
Parameters
----------
a,b : pytorch tensors of shape [batch,nres,3]
store Cartesian coordinates of two sets of atoms
Returns
-------
dist : pytorch tensor of shape [batch,nres,nres]
stores paitwise distances between atoms in a and b
"""
dist = torch.cdist(a, b, p=2)
return dist
# ============================================================
def get_ang(a, b, c):
"""calculate planar angles for all consecutive triples (a[i],b[i],c[i])
from Cartesian coordinates of three sets of atoms a,b,c
Parameters
----------
a,b,c : pytorch tensors of shape [batch,nres,3]
store Cartesian coordinates of three sets of atoms
Returns
-------
ang : pytorch tensor of shape [batch,nres]
stores resulting planar angles
"""
v = a - b
w = c - b
v /= torch.norm(v, dim=-1, keepdim=True)
w /= torch.norm(w, dim=-1, keepdim=True)
vw = torch.sum(v*w, dim=-1)
return torch.acos(vw)
# ============================================================
def get_dih(a, b, c, d):
"""calculate dihedral angles for all consecutive quadruples (a[i],b[i],c[i],d[i])
given Cartesian coordinates of four sets of atoms a,b,c,d
Parameters
----------
a,b,c,d : pytorch tensors of shape [batch,nres,3]
store Cartesian coordinates of four sets of atoms
Returns
-------
dih : pytorch tensor of shape [batch,nres]
stores resulting dihedrals
"""
b0 = a - b
b1 = c - b
b2 = d - c
b1 /= torch.norm(b1, dim=-1, keepdim=True)
v = b0 - torch.sum(b0*b1, dim=-1, keepdim=True)*b1
w = b2 - torch.sum(b2*b1, dim=-1, keepdim=True)*b1
x = torch.sum(v*w, dim=-1)
y = torch.sum(torch.cross(b1,v,dim=-1)*w, dim=-1)
return torch.atan2(y, x)
def get_Cb(xyz):
'''recreate Cb given N,Ca,C'''
N = xyz[...,0,:]
Ca = xyz[...,1,:]
C = xyz[...,2,:]
b = Ca - N
c = C - Ca
a = torch.cross(b, c, dim=-1)
return -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
# ============================================================
def xyz_to_c6d(xyz, params=PARAMS):
"""convert cartesian coordinates into 2d distance
and orientation maps
Parameters
----------
xyz : pytorch tensor of shape [batch,nres,3,3]
stores Cartesian coordinates of backbone N,Ca,C atoms
Returns
-------
c6d : pytorch tensor of shape [batch,nres,nres,4]
stores stacked dist,omega,theta,phi 2D maps
"""
batch = xyz.shape[0]
nres = xyz.shape[1]
# three anchor atoms
N = xyz[:,:,0]
Ca = xyz[:,:,1]
C = xyz[:,:,2]
Cb = get_Cb(xyz)
# 6d coordinates order: (dist,omega,theta,phi)
c6d = torch.zeros([batch,nres,nres,4],dtype=xyz.dtype,device=xyz.device)
dist = get_pair_dist(Cb,Cb)
dist[torch.isnan(dist)] = 999.9
c6d[...,0] = dist + 999.9*torch.eye(nres,device=xyz.device)[None,...]
b,i,j = torch.where(c6d[...,0]<params['DMAX'])
c6d[b,i,j,torch.full_like(b,1)] = get_dih(Ca[b,i], Cb[b,i], Cb[b,j], Ca[b,j])
c6d[b,i,j,torch.full_like(b,2)] = get_dih(N[b,i], Ca[b,i], Cb[b,i], Cb[b,j])
c6d[b,i,j,torch.full_like(b,3)] = get_ang(Ca[b,i], Cb[b,i], Cb[b,j])
# fix long-range distances
c6d[...,0][c6d[...,0]>=params['DMAX']] = 999.9
mask = torch.zeros((batch, nres,nres), dtype=xyz.dtype, device=xyz.device)
mask[b,i,j] = 1.0
return c6d, mask
def xyz_to_t2d(xyz_t, params=PARAMS):
"""convert template cartesian coordinates into 2d distance
and orientation maps
Parameters
----------
xyz_t : pytorch tensor of shape [batch,templ,nres,3,3]
stores Cartesian coordinates of template backbone N,Ca,C atoms
Returns
-------
t2d : pytorch tensor of shape [batch,nres,nres,37+6+3]
stores stacked dist,omega,theta,phi 2D maps
"""
B, T, L = xyz_t.shape[:3]
c6d, mask = xyz_to_c6d(xyz_t[:,:,:,:3].view(B*T,L,3,3), params=params)
c6d = c6d.view(B, T, L, L, 4)
mask = mask.view(B, T, L, L, 1)
#
# dist to one-hot encoded
dist = dist_to_onehot(c6d[...,0], params)
orien = torch.cat((torch.sin(c6d[...,1:]), torch.cos(c6d[...,1:])), dim=-1)*mask # (B, T, L, L, 6)
#
mask = ~torch.isnan(c6d[:,:,:,:,0]) # (B, T, L, L)
t2d = torch.cat((dist, orien, mask.unsqueeze(-1)), dim=-1)
t2d[torch.isnan(t2d)] = 0.0
return t2d
def xyz_to_chi1(xyz_t):
'''convert template cartesian coordinates into chi1 angles
Parameters
----------
xyz_t: pytorch tensor of shape [batch, templ, nres, 14, 3]
stores Cartesian coordinates of template atoms. For missing atoms, it should be NaN
Returns
-------
chi1 : pytorch tensor of shape [batch, templ, nres, 2]
stores cos and sin chi1 angle
'''
B, T, L = xyz_t.shape[:3]
xyz_t = xyz_t.reshape(B*T, L, 14, 3)
# chi1 angle: N, CA, CB, CG
chi1 = get_dih(xyz_t[:,:,0], xyz_t[:,:,1], xyz_t[:,:,4], xyz_t[:,:,5]) # (B*T, L)
cos_chi1 = torch.cos(chi1)
sin_chi1 = torch.sin(chi1)
mask_chi1 = ~torch.isnan(chi1)
chi1 = torch.stack((cos_chi1, sin_chi1, mask_chi1), dim=-1) # (B*T, L, 3)
chi1[torch.isnan(chi1)] = 0.0
chi1 = chi1.reshape(B, T, L, 3)
return chi1
def xyz_to_bbtor(xyz, params=PARAMS):
batch = xyz.shape[0]
nres = xyz.shape[1]
# three anchor atoms
N = xyz[:,:,0]
Ca = xyz[:,:,1]
C = xyz[:,:,2]
# recreate Cb given N,Ca,C
next_N = torch.roll(N, -1, dims=1)
prev_C = torch.roll(C, 1, dims=1)
phi = get_dih(prev_C, N, Ca, C)
psi = get_dih(N, Ca, C, next_N)
#
phi[:,0] = 0.0
psi[:,-1] = 0.0
#
astep = 2.0*np.pi / params['ABINS']
phi_bin = torch.round((phi+np.pi-astep/2)/astep)
psi_bin = torch.round((psi+np.pi-astep/2)/astep)
return torch.stack([phi_bin, psi_bin], axis=-1).long()
# ============================================================
def dist_to_onehot(dist, params=PARAMS):
dist[torch.isnan(dist)] = 999.9
dstep = (params['DMAX'] - params['DMIN']) / params['DBINS']
dbins = torch.linspace(params['DMIN']+dstep, params['DMAX'], params['DBINS'],dtype=dist.dtype,device=dist.device)
db = torch.bucketize(dist.contiguous(),dbins).long()
dist = torch.nn.functional.one_hot(db, num_classes=params['DBINS']+1).float()
return dist
def c6d_to_bins(c6d,params=PARAMS):
"""bin 2d distance and orientation maps
"""
dstep = (params['DMAX'] - params['DMIN']) / params['DBINS']
astep = 2.0*np.pi / params['ABINS']
dbins = torch.linspace(params['DMIN']+dstep, params['DMAX'], params['DBINS'],dtype=c6d.dtype,device=c6d.device)
ab360 = torch.linspace(-np.pi+astep, np.pi, params['ABINS'],dtype=c6d.dtype,device=c6d.device)
ab180 = torch.linspace(astep, np.pi, params['ABINS']//2,dtype=c6d.dtype,device=c6d.device)
db = torch.bucketize(c6d[...,0].contiguous(),dbins)
ob = torch.bucketize(c6d[...,1].contiguous(),ab360)
tb = torch.bucketize(c6d[...,2].contiguous(),ab360)
pb = torch.bucketize(c6d[...,3].contiguous(),ab180)
ob[db==params['DBINS']] = params['ABINS']
tb[db==params['DBINS']] = params['ABINS']
pb[db==params['DBINS']] = params['ABINS']//2
return torch.stack([db,ob,tb,pb],axis=-1).to(torch.uint8)
# ============================================================
def dist_to_bins(dist,params=PARAMS):
"""bin 2d distance maps
"""
dstep = (params['DMAX'] - params['DMIN']) / params['DBINS']
db = torch.round((dist-params['DMIN']-dstep/2)/dstep)
db[db<0] = 0
db[db>params['DBINS']] = params['DBINS']
return db.long()
# ============================================================
def c6d_to_bins2(c6d, same_chain, negative=False, params=PARAMS):
"""bin 2d distance and orientation maps
"""
dstep = (params['DMAX'] - params['DMIN']) / params['DBINS']
astep = 2.0*np.pi / params['ABINS']
db = torch.round((c6d[...,0]-params['DMIN']-dstep/2)/dstep)
ob = torch.round((c6d[...,1]+np.pi-astep/2)/astep)
tb = torch.round((c6d[...,2]+np.pi-astep/2)/astep)
pb = torch.round((c6d[...,3]-astep/2)/astep)
# put all d<dmin into one bin
db[db<0] = 0
# synchronize no-contact bins
db[db>params['DBINS']] = params['DBINS']
ob[db==params['DBINS']] = params['ABINS']
tb[db==params['DBINS']] = params['ABINS']
pb[db==params['DBINS']] = params['ABINS']//2
if negative:
db = torch.where(same_chain.bool(), db.long(), params['DBINS'])
ob = torch.where(same_chain.bool(), ob.long(), params['ABINS'])
tb = torch.where(same_chain.bool(), tb.long(), params['ABINS'])
pb = torch.where(same_chain.bool(), pb.long(), params['ABINS']//2)
return torch.stack([db,ob,tb,pb],axis=-1).long()
def get_init_xyz(xyz_t):
# input: xyz_t (B, T, L, 14, 3)
# ouput: xyz (B, T, L, 14, 3)
B, T, L = xyz_t.shape[:3]
init = INIT_CRDS.to(xyz_t.device).reshape(1,1,1,27,3).repeat(B,T,L,1,1)
if torch.isnan(xyz_t).all():
return init
mask = torch.isnan(xyz_t[:,:,:,:3]).any(dim=-1).any(dim=-1) # (B, T, L)
#
center_CA = ((~mask[:,:,:,None]) * torch.nan_to_num(xyz_t[:,:,:,1,:])).sum(dim=2) / ((~mask[:,:,:,None]).sum(dim=2)+1e-4) # (B, T, 3)
xyz_t = xyz_t - center_CA.view(B,T,1,1,3)
#
idx_s = list()
for i_b in range(B):
for i_T in range(T):
if mask[i_b, i_T].all():
continue
exist_in_templ = torch.where(~mask[i_b, i_T])[0] # (L_sub)
seqmap = (torch.arange(L, device=xyz_t.device)[:,None] - exist_in_templ[None,:]).abs() # (L, L_sub)
seqmap = torch.argmin(seqmap, dim=-1) # (L)
idx = torch.gather(exist_in_templ, -1, seqmap) # (L)
offset_CA = torch.gather(xyz_t[i_b, i_T, :, 1, :], 0, idx.reshape(L,1).expand(-1,3))
init[i_b,i_T] += offset_CA.reshape(L,1,3)
#
xyz = torch.where(mask.view(B, T, L, 1, 1), init, xyz_t)
return xyz
| RFdiffusion-main | kinematics.py |
import torch
import torch.nn as nn
#from equivariant_attention.modules import get_basis_and_r, GSE3Res, GNormBias
#from equivariant_attention.modules import GConvSE3, GNormSE3
#from equivariant_attention.fibers import Fiber
from util_module import init_lecun_normal_param
from se3_transformer.model import SE3Transformer
from se3_transformer.model.fiber import Fiber
class SE3TransformerWrapper(nn.Module):
"""SE(3) equivariant GCN with attention"""
def __init__(self, num_layers=2, num_channels=32, num_degrees=3, n_heads=4, div=4,
l0_in_features=32, l0_out_features=32,
l1_in_features=3, l1_out_features=2,
num_edge_features=32):
super().__init__()
# Build the network
self.l1_in = l1_in_features
#
fiber_edge = Fiber({0: num_edge_features})
if l1_out_features > 0:
if l1_in_features > 0:
fiber_in = Fiber({0: l0_in_features, 1: l1_in_features})
fiber_hidden = Fiber.create(num_degrees, num_channels)
fiber_out = Fiber({0: l0_out_features, 1: l1_out_features})
else:
fiber_in = Fiber({0: l0_in_features})
fiber_hidden = Fiber.create(num_degrees, num_channels)
fiber_out = Fiber({0: l0_out_features, 1: l1_out_features})
else:
if l1_in_features > 0:
fiber_in = Fiber({0: l0_in_features, 1: l1_in_features})
fiber_hidden = Fiber.create(num_degrees, num_channels)
fiber_out = Fiber({0: l0_out_features})
else:
fiber_in = Fiber({0: l0_in_features})
fiber_hidden = Fiber.create(num_degrees, num_channels)
fiber_out = Fiber({0: l0_out_features})
self.se3 = SE3Transformer(num_layers=num_layers,
fiber_in=fiber_in,
fiber_hidden=fiber_hidden,
fiber_out = fiber_out,
num_heads=n_heads,
channels_div=div,
fiber_edge=fiber_edge,
use_layer_norm=True)
#use_layer_norm=False)
self.reset_parameter()
def reset_parameter(self):
# make sure linear layer before ReLu are initialized with kaiming_normal_
for n, p in self.se3.named_parameters():
if "bias" in n:
nn.init.zeros_(p)
elif len(p.shape) == 1:
continue
else:
if "radial_func" not in n:
p = init_lecun_normal_param(p)
else:
if "net.6" in n:
nn.init.zeros_(p)
else:
nn.init.kaiming_normal_(p, nonlinearity='relu')
# make last layers to be zero-initialized
#self.se3.graph_modules[-1].to_kernel_self['0'] = init_lecun_normal_param(self.se3.graph_modules[-1].to_kernel_self['0'])
#self.se3.graph_modules[-1].to_kernel_self['1'] = init_lecun_normal_param(self.se3.graph_modules[-1].to_kernel_self['1'])
nn.init.zeros_(self.se3.graph_modules[-1].to_kernel_self['0'])
nn.init.zeros_(self.se3.graph_modules[-1].to_kernel_self['1'])
def forward(self, G, type_0_features, type_1_features=None, edge_features=None):
if self.l1_in > 0:
node_features = {'0': type_0_features, '1': type_1_features}
else:
node_features = {'0': type_0_features}
edge_features = {'0': edge_features}
return self.se3(G, node_features, edge_features)
| RFdiffusion-main | SE3_network.py |
import sys
import numpy as np
import torch
import scipy.sparse
from chemical import *
from scoring import *
def generate_Cbeta(N, Ca, C):
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = torch.cross(b, c, dim=-1)
# Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
# fd: below matches sidechain generator (=Rosetta params)
Cb = -0.57910144 * a + 0.5689693 * b - 0.5441217 * c + Ca
return Cb
def th_ang_v(ab, bc, eps: float = 1e-8):
def th_norm(x, eps: float = 1e-8):
return x.square().sum(-1, keepdim=True).add(eps).sqrt()
def th_N(x, alpha: float = 0):
return x / th_norm(x).add(alpha)
ab, bc = th_N(ab), th_N(bc)
cos_angle = torch.clamp((ab * bc).sum(-1), -1, 1)
sin_angle = torch.sqrt(1 - cos_angle.square() + eps)
dih = torch.stack((cos_angle, sin_angle), -1)
return dih
def th_dih_v(ab, bc, cd):
def th_cross(a, b):
a, b = torch.broadcast_tensors(a, b)
return torch.cross(a, b, dim=-1)
def th_norm(x, eps: float = 1e-8):
return x.square().sum(-1, keepdim=True).add(eps).sqrt()
def th_N(x, alpha: float = 0):
return x / th_norm(x).add(alpha)
ab, bc, cd = th_N(ab), th_N(bc), th_N(cd)
n1 = th_N(th_cross(ab, bc))
n2 = th_N(th_cross(bc, cd))
sin_angle = (th_cross(n1, bc) * n2).sum(-1)
cos_angle = (n1 * n2).sum(-1)
dih = torch.stack((cos_angle, sin_angle), -1)
return dih
def th_dih(a, b, c, d):
return th_dih_v(a - b, b - c, c - d)
# More complicated version splits error in CA-N and CA-C (giving more accurate CB position)
# It returns the rigid transformation from local frame to global frame
def rigid_from_3_points(N, Ca, C, non_ideal=False, eps=1e-8):
# N, Ca, C - [B,L, 3]
# R - [B,L, 3, 3], det(R)=1, inv(R) = R.T, R is a rotation matrix
B, L = N.shape[:2]
v1 = C - Ca
v2 = N - Ca
e1 = v1 / (torch.norm(v1, dim=-1, keepdim=True) + eps)
u2 = v2 - (torch.einsum("bli, bli -> bl", e1, v2)[..., None] * e1)
e2 = u2 / (torch.norm(u2, dim=-1, keepdim=True) + eps)
e3 = torch.cross(e1, e2, dim=-1)
R = torch.cat(
[e1[..., None], e2[..., None], e3[..., None]], axis=-1
) # [B,L,3,3] - rotation matrix
if non_ideal:
v2 = v2 / (torch.norm(v2, dim=-1, keepdim=True) + eps)
cosref = torch.sum(e1 * v2, dim=-1) # cosine of current N-CA-C bond angle
costgt = cos_ideal_NCAC.item()
cos2del = torch.clamp(
cosref * costgt
+ torch.sqrt((1 - cosref * cosref) * (1 - costgt * costgt) + eps),
min=-1.0,
max=1.0,
)
cosdel = torch.sqrt(0.5 * (1 + cos2del) + eps)
sindel = torch.sign(costgt - cosref) * torch.sqrt(1 - 0.5 * (1 + cos2del) + eps)
Rp = torch.eye(3, device=N.device).repeat(B, L, 1, 1)
Rp[:, :, 0, 0] = cosdel
Rp[:, :, 0, 1] = -sindel
Rp[:, :, 1, 0] = sindel
Rp[:, :, 1, 1] = cosdel
R = torch.einsum("blij,bljk->blik", R, Rp)
return R, Ca
def get_tor_mask(seq, torsion_indices, mask_in=None):
B, L = seq.shape[:2]
tors_mask = torch.ones((B, L, 10), dtype=torch.bool, device=seq.device)
tors_mask[..., 3:7] = torsion_indices[seq, :, -1] > 0
tors_mask[:, 0, 1] = False
tors_mask[:, -1, 0] = False
# mask for additional angles
tors_mask[:, :, 7] = seq != aa2num["GLY"]
tors_mask[:, :, 8] = seq != aa2num["GLY"]
tors_mask[:, :, 9] = torch.logical_and(seq != aa2num["GLY"], seq != aa2num["ALA"])
tors_mask[:, :, 9] = torch.logical_and(tors_mask[:, :, 9], seq != aa2num["UNK"])
tors_mask[:, :, 9] = torch.logical_and(tors_mask[:, :, 9], seq != aa2num["MAS"])
if mask_in != None:
# mask for missing atoms
# chis
ti0 = torch.gather(mask_in, 2, torsion_indices[seq, :, 0])
ti1 = torch.gather(mask_in, 2, torsion_indices[seq, :, 1])
ti2 = torch.gather(mask_in, 2, torsion_indices[seq, :, 2])
ti3 = torch.gather(mask_in, 2, torsion_indices[seq, :, 3])
is_valid = torch.stack((ti0, ti1, ti2, ti3), dim=-2).all(dim=-1)
tors_mask[..., 3:7] = torch.logical_and(tors_mask[..., 3:7], is_valid)
tors_mask[:, :, 7] = torch.logical_and(
tors_mask[:, :, 7], mask_in[:, :, 4]
) # CB exist?
tors_mask[:, :, 8] = torch.logical_and(
tors_mask[:, :, 8], mask_in[:, :, 4]
) # CB exist?
tors_mask[:, :, 9] = torch.logical_and(
tors_mask[:, :, 9], mask_in[:, :, 5]
) # XG exist?
return tors_mask
def get_torsions(
xyz_in, seq, torsion_indices, torsion_can_flip, ref_angles, mask_in=None
):
B, L = xyz_in.shape[:2]
tors_mask = get_tor_mask(seq, torsion_indices, mask_in)
# torsions to restrain to 0 or 180degree
tors_planar = torch.zeros((B, L, 10), dtype=torch.bool, device=xyz_in.device)
tors_planar[:, :, 5] = seq == aa2num["TYR"] # TYR chi 3 should be planar
# idealize given xyz coordinates before computing torsion angles
xyz = xyz_in.clone()
Rs, Ts = rigid_from_3_points(xyz[..., 0, :], xyz[..., 1, :], xyz[..., 2, :])
Nideal = torch.tensor([-0.5272, 1.3593, 0.000], device=xyz_in.device)
Cideal = torch.tensor([1.5233, 0.000, 0.000], device=xyz_in.device)
xyz[..., 0, :] = torch.einsum("brij,j->bri", Rs, Nideal) + Ts
xyz[..., 2, :] = torch.einsum("brij,j->bri", Rs, Cideal) + Ts
torsions = torch.zeros((B, L, 10, 2), device=xyz.device)
# avoid undefined angles for H generation
torsions[:, 0, 1, 0] = 1.0
torsions[:, -1, 0, 0] = 1.0
# omega
torsions[:, :-1, 0, :] = th_dih(
xyz[:, :-1, 1, :], xyz[:, :-1, 2, :], xyz[:, 1:, 0, :], xyz[:, 1:, 1, :]
)
# phi
torsions[:, 1:, 1, :] = th_dih(
xyz[:, :-1, 2, :], xyz[:, 1:, 0, :], xyz[:, 1:, 1, :], xyz[:, 1:, 2, :]
)
# psi
torsions[:, :, 2, :] = -1 * th_dih(
xyz[:, :, 0, :], xyz[:, :, 1, :], xyz[:, :, 2, :], xyz[:, :, 3, :]
)
# chis
ti0 = torch.gather(xyz, 2, torsion_indices[seq, :, 0, None].repeat(1, 1, 1, 3))
ti1 = torch.gather(xyz, 2, torsion_indices[seq, :, 1, None].repeat(1, 1, 1, 3))
ti2 = torch.gather(xyz, 2, torsion_indices[seq, :, 2, None].repeat(1, 1, 1, 3))
ti3 = torch.gather(xyz, 2, torsion_indices[seq, :, 3, None].repeat(1, 1, 1, 3))
torsions[:, :, 3:7, :] = th_dih(ti0, ti1, ti2, ti3)
# CB bend
NC = 0.5 * (xyz[:, :, 0, :3] + xyz[:, :, 2, :3])
CA = xyz[:, :, 1, :3]
CB = xyz[:, :, 4, :3]
t = th_ang_v(CB - CA, NC - CA)
t0 = ref_angles[seq][..., 0, :]
torsions[:, :, 7, :] = torch.stack(
(torch.sum(t * t0, dim=-1), t[..., 0] * t0[..., 1] - t[..., 1] * t0[..., 0]),
dim=-1,
)
# CB twist
NCCA = NC - CA
NCp = xyz[:, :, 2, :3] - xyz[:, :, 0, :3]
NCpp = (
NCp
- torch.sum(NCp * NCCA, dim=-1, keepdim=True)
/ torch.sum(NCCA * NCCA, dim=-1, keepdim=True)
* NCCA
)
t = th_ang_v(CB - CA, NCpp)
t0 = ref_angles[seq][..., 1, :]
torsions[:, :, 8, :] = torch.stack(
(torch.sum(t * t0, dim=-1), t[..., 0] * t0[..., 1] - t[..., 1] * t0[..., 0]),
dim=-1,
)
# CG bend
CG = xyz[:, :, 5, :3]
t = th_ang_v(CG - CB, CA - CB)
t0 = ref_angles[seq][..., 2, :]
torsions[:, :, 9, :] = torch.stack(
(torch.sum(t * t0, dim=-1), t[..., 0] * t0[..., 1] - t[..., 1] * t0[..., 0]),
dim=-1,
)
mask0 = torch.isnan(torsions[..., 0]).nonzero()
mask1 = torch.isnan(torsions[..., 1]).nonzero()
torsions[mask0[:, 0], mask0[:, 1], mask0[:, 2], 0] = 1.0
torsions[mask1[:, 0], mask1[:, 1], mask1[:, 2], 1] = 0.0
# alt chis
torsions_alt = torsions.clone()
torsions_alt[torsion_can_flip[seq, :]] *= -1
return torsions, torsions_alt, tors_mask, tors_planar
def get_tips(xyz, seq):
B, L = xyz.shape[:2]
xyz_tips = torch.gather(
xyz, 2, tip_indices.to(xyz.device)[seq][:, :, None, None].expand(-1, -1, -1, 3)
).reshape(B, L, 3)
mask = ~(torch.isnan(xyz_tips[:, :, 0]))
if torch.isnan(xyz_tips).any(): # replace NaN tip atom with virtual Cb atom
# three anchor atoms
N = xyz[:, :, 0]
Ca = xyz[:, :, 1]
C = xyz[:, :, 2]
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = torch.cross(b, c, dim=-1)
Cb = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + Ca
xyz_tips = torch.where(torch.isnan(xyz_tips), Cb, xyz_tips)
return xyz_tips, mask
# process ideal frames
def make_frame(X, Y):
Xn = X / torch.linalg.norm(X)
Y = Y - torch.dot(Y, Xn) * Xn
Yn = Y / torch.linalg.norm(Y)
Z = torch.cross(Xn, Yn)
Zn = Z / torch.linalg.norm(Z)
return torch.stack((Xn, Yn, Zn), dim=-1)
def cross_product_matrix(u):
B, L = u.shape[:2]
matrix = torch.zeros((B, L, 3, 3), device=u.device)
matrix[:, :, 0, 1] = -u[..., 2]
matrix[:, :, 0, 2] = u[..., 1]
matrix[:, :, 1, 0] = u[..., 2]
matrix[:, :, 1, 2] = -u[..., 0]
matrix[:, :, 2, 0] = -u[..., 1]
matrix[:, :, 2, 1] = u[..., 0]
return matrix
# writepdb
def writepdb(
filename, atoms, seq, binderlen=None, idx_pdb=None, bfacts=None, chain_idx=None
):
f = open(filename, "w")
ctr = 1
scpu = seq.cpu().squeeze()
atomscpu = atoms.cpu().squeeze()
if bfacts is None:
bfacts = torch.zeros(atomscpu.shape[0])
if idx_pdb is None:
idx_pdb = 1 + torch.arange(atomscpu.shape[0])
Bfacts = torch.clamp(bfacts.cpu(), 0, 1)
for i, s in enumerate(scpu):
if chain_idx is None:
if binderlen is not None:
if i < binderlen:
chain = "A"
else:
chain = "B"
elif binderlen is None:
chain = "A"
else:
chain = chain_idx[i]
if len(atomscpu.shape) == 2:
f.write(
"%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"
% (
"ATOM",
ctr,
" CA ",
num2aa[s],
chain,
idx_pdb[i],
atomscpu[i, 0],
atomscpu[i, 1],
atomscpu[i, 2],
1.0,
Bfacts[i],
)
)
ctr += 1
elif atomscpu.shape[1] == 3:
for j, atm_j in enumerate([" N ", " CA ", " C "]):
f.write(
"%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"
% (
"ATOM",
ctr,
atm_j,
num2aa[s],
chain,
idx_pdb[i],
atomscpu[i, j, 0],
atomscpu[i, j, 1],
atomscpu[i, j, 2],
1.0,
Bfacts[i],
)
)
ctr += 1
elif atomscpu.shape[1] == 4:
for j, atm_j in enumerate([" N ", " CA ", " C ", " O "]):
f.write(
"%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"
% (
"ATOM",
ctr,
atm_j,
num2aa[s],
chain,
idx_pdb[i],
atomscpu[i, j, 0],
atomscpu[i, j, 1],
atomscpu[i, j, 2],
1.0,
Bfacts[i],
)
)
ctr += 1
else:
natoms = atomscpu.shape[1]
if natoms != 14 and natoms != 27:
print("bad size!", atoms.shape)
assert False
atms = aa2long[s]
# his prot hack
if (
s == 8
and torch.linalg.norm(atomscpu[i, 9, :] - atomscpu[i, 5, :]) < 1.7
):
atms = (
" N ",
" CA ",
" C ",
" O ",
" CB ",
" CG ",
" NE2",
" CD2",
" CE1",
" ND1",
None,
None,
None,
None,
" H ",
" HA ",
"1HB ",
"2HB ",
" HD2",
" HE1",
" HD1",
None,
None,
None,
None,
None,
None,
) # his_d
for j, atm_j in enumerate(atms):
if (
j < natoms and atm_j is not None
): # and not torch.isnan(atomscpu[i,j,:]).any()):
f.write(
"%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"
% (
"ATOM",
ctr,
atm_j,
num2aa[s],
chain,
idx_pdb[i],
atomscpu[i, j, 0],
atomscpu[i, j, 1],
atomscpu[i, j, 2],
1.0,
Bfacts[i],
)
)
ctr += 1
# resolve tip atom indices
tip_indices = torch.full((22,), 0)
for i in range(22):
tip_atm = aa2tip[i]
atm_long = aa2long[i]
tip_indices[i] = atm_long.index(tip_atm)
# resolve torsion indices
torsion_indices = torch.full((22, 4, 4), 0)
torsion_can_flip = torch.full((22, 10), False, dtype=torch.bool)
for i in range(22):
i_l, i_a = aa2long[i], aa2longalt[i]
for j in range(4):
if torsions[i][j] is None:
continue
for k in range(4):
a = torsions[i][j][k]
torsion_indices[i, j, k] = i_l.index(a)
if i_l.index(a) != i_a.index(a):
torsion_can_flip[i, 3 + j] = True ##bb tors never flip
# HIS is a special case
torsion_can_flip[8, 4] = False
# build the mapping from atoms in the full rep (Nx27) to the "alternate" rep
allatom_mask = torch.zeros((22, 27), dtype=torch.bool)
long2alt = torch.zeros((22, 27), dtype=torch.long)
for i in range(22):
i_l, i_lalt = aa2long[i], aa2longalt[i]
for j, a in enumerate(i_l):
if a is None:
long2alt[i, j] = j
else:
long2alt[i, j] = i_lalt.index(a)
allatom_mask[i, j] = True
# bond graph traversal
num_bonds = torch.zeros((22, 27, 27), dtype=torch.long)
for i in range(22):
num_bonds_i = np.zeros((27, 27))
for bnamei, bnamej in aabonds[i]:
bi, bj = aa2long[i].index(bnamei), aa2long[i].index(bnamej)
num_bonds_i[bi, bj] = 1
num_bonds_i = scipy.sparse.csgraph.shortest_path(num_bonds_i, directed=False)
num_bonds_i[num_bonds_i >= 4] = 4
num_bonds[i, ...] = torch.tensor(num_bonds_i)
# LJ/LK scoring parameters
ljlk_parameters = torch.zeros((22, 27, 5), dtype=torch.float)
lj_correction_parameters = torch.zeros(
(22, 27, 4), dtype=bool
) # donor/acceptor/hpol/disulf
for i in range(22):
for j, a in enumerate(aa2type[i]):
if a is not None:
ljlk_parameters[i, j, :] = torch.tensor(type2ljlk[a])
lj_correction_parameters[i, j, 0] = (type2hb[a] == HbAtom.DO) + (
type2hb[a] == HbAtom.DA
)
lj_correction_parameters[i, j, 1] = (type2hb[a] == HbAtom.AC) + (
type2hb[a] == HbAtom.DA
)
lj_correction_parameters[i, j, 2] = type2hb[a] == HbAtom.HP
lj_correction_parameters[i, j, 3] = a == "SH1" or a == "HS"
# hbond scoring parameters
def donorHs(D, bonds, atoms):
dHs = []
for i, j in bonds:
if i == D:
idx_j = atoms.index(j)
if idx_j >= 14: # if atom j is a hydrogen
dHs.append(idx_j)
if j == D:
idx_i = atoms.index(i)
if idx_i >= 14: # if atom j is a hydrogen
dHs.append(idx_i)
assert len(dHs) > 0
return dHs
def acceptorBB0(A, hyb, bonds, atoms):
if hyb == HbHybType.SP2:
for i, j in bonds:
if i == A:
B = atoms.index(j)
if B < 14:
break
if j == A:
B = atoms.index(i)
if B < 14:
break
for i, j in bonds:
if i == atoms[B]:
B0 = atoms.index(j)
if B0 < 14:
break
if j == atoms[B]:
B0 = atoms.index(i)
if B0 < 14:
break
elif hyb == HbHybType.SP3 or hyb == HbHybType.RING:
for i, j in bonds:
if i == A:
B = atoms.index(j)
if B < 14:
break
if j == A:
B = atoms.index(i)
if B < 14:
break
for i, j in bonds:
if i == A and j != atoms[B]:
B0 = atoms.index(j)
break
if j == A and i != atoms[B]:
B0 = atoms.index(i)
break
return B, B0
hbtypes = torch.full(
(22, 27, 3), -1, dtype=torch.long
) # (donortype, acceptortype, acchybtype)
hbbaseatoms = torch.full(
(22, 27, 2), -1, dtype=torch.long
) # (B,B0) for acc; (D,-1) for don
hbpolys = torch.zeros(
(HbDonType.NTYPES, HbAccType.NTYPES, 3, 15)
) # weight,xmin,xmax,ymin,ymax,c9,...,c0
for i in range(22):
for j, a in enumerate(aa2type[i]):
if a in type2dontype:
j_hs = donorHs(aa2long[i][j], aabonds[i], aa2long[i])
for j_h in j_hs:
hbtypes[i, j_h, 0] = type2dontype[a]
hbbaseatoms[i, j_h, 0] = j
if a in type2acctype:
j_b, j_b0 = acceptorBB0(
aa2long[i][j], type2hybtype[a], aabonds[i], aa2long[i]
)
hbtypes[i, j, 1] = type2acctype[a]
hbtypes[i, j, 2] = type2hybtype[a]
hbbaseatoms[i, j, 0] = j_b
hbbaseatoms[i, j, 1] = j_b0
for i in range(HbDonType.NTYPES):
for j in range(HbAccType.NTYPES):
weight = dontype2wt[i] * acctype2wt[j]
pdist, pbah, pahd = hbtypepair2poly[(i, j)]
xrange, yrange, coeffs = hbpolytype2coeffs[pdist]
hbpolys[i, j, 0, 0] = weight
hbpolys[i, j, 0, 1:3] = torch.tensor(xrange)
hbpolys[i, j, 0, 3:5] = torch.tensor(yrange)
hbpolys[i, j, 0, 5:] = torch.tensor(coeffs)
xrange, yrange, coeffs = hbpolytype2coeffs[pahd]
hbpolys[i, j, 1, 0] = weight
hbpolys[i, j, 1, 1:3] = torch.tensor(xrange)
hbpolys[i, j, 1, 3:5] = torch.tensor(yrange)
hbpolys[i, j, 1, 5:] = torch.tensor(coeffs)
xrange, yrange, coeffs = hbpolytype2coeffs[pbah]
hbpolys[i, j, 2, 0] = weight
hbpolys[i, j, 2, 1:3] = torch.tensor(xrange)
hbpolys[i, j, 2, 3:5] = torch.tensor(yrange)
hbpolys[i, j, 2, 5:] = torch.tensor(coeffs)
# kinematic parameters
base_indices = torch.full((22, 27), 0, dtype=torch.long)
xyzs_in_base_frame = torch.ones((22, 27, 4))
RTs_by_torsion = torch.eye(4).repeat(22, 7, 1, 1)
reference_angles = torch.ones((22, 3, 2))
for i in range(22):
i_l = aa2long[i]
for name, base, coords in ideal_coords[i]:
idx = i_l.index(name)
base_indices[i, idx] = base
xyzs_in_base_frame[i, idx, :3] = torch.tensor(coords)
# omega frame
RTs_by_torsion[i, 0, :3, :3] = torch.eye(3)
RTs_by_torsion[i, 0, :3, 3] = torch.zeros(3)
# phi frame
RTs_by_torsion[i, 1, :3, :3] = make_frame(
xyzs_in_base_frame[i, 0, :3] - xyzs_in_base_frame[i, 1, :3],
torch.tensor([1.0, 0.0, 0.0]),
)
RTs_by_torsion[i, 1, :3, 3] = xyzs_in_base_frame[i, 0, :3]
# psi frame
RTs_by_torsion[i, 2, :3, :3] = make_frame(
xyzs_in_base_frame[i, 2, :3] - xyzs_in_base_frame[i, 1, :3],
xyzs_in_base_frame[i, 1, :3] - xyzs_in_base_frame[i, 0, :3],
)
RTs_by_torsion[i, 2, :3, 3] = xyzs_in_base_frame[i, 2, :3]
# chi1 frame
if torsions[i][0] is not None:
a0, a1, a2 = torsion_indices[i, 0, 0:3]
RTs_by_torsion[i, 3, :3, :3] = make_frame(
xyzs_in_base_frame[i, a2, :3] - xyzs_in_base_frame[i, a1, :3],
xyzs_in_base_frame[i, a0, :3] - xyzs_in_base_frame[i, a1, :3],
)
RTs_by_torsion[i, 3, :3, 3] = xyzs_in_base_frame[i, a2, :3]
# chi2~4 frame
for j in range(1, 4):
if torsions[i][j] is not None:
a2 = torsion_indices[i, j, 2]
if (i == 18 and j == 2) or (
i == 8 and j == 2
): # TYR CZ-OH & HIS CE1-HE1 a special case
a0, a1 = torsion_indices[i, j, 0:2]
RTs_by_torsion[i, 3 + j, :3, :3] = make_frame(
xyzs_in_base_frame[i, a2, :3] - xyzs_in_base_frame[i, a1, :3],
xyzs_in_base_frame[i, a0, :3] - xyzs_in_base_frame[i, a1, :3],
)
else:
RTs_by_torsion[i, 3 + j, :3, :3] = make_frame(
xyzs_in_base_frame[i, a2, :3],
torch.tensor([-1.0, 0.0, 0.0]),
)
RTs_by_torsion[i, 3 + j, :3, 3] = xyzs_in_base_frame[i, a2, :3]
# CB/CG angles
NCr = 0.5 * (xyzs_in_base_frame[i, 0, :3] + xyzs_in_base_frame[i, 2, :3])
CAr = xyzs_in_base_frame[i, 1, :3]
CBr = xyzs_in_base_frame[i, 4, :3]
CGr = xyzs_in_base_frame[i, 5, :3]
reference_angles[i, 0, :] = th_ang_v(CBr - CAr, NCr - CAr)
NCp = xyzs_in_base_frame[i, 2, :3] - xyzs_in_base_frame[i, 0, :3]
NCpp = NCp - torch.dot(NCp, NCr) / torch.dot(NCr, NCr) * NCr
reference_angles[i, 1, :] = th_ang_v(CBr - CAr, NCpp)
reference_angles[i, 2, :] = th_ang_v(CGr, torch.tensor([-1.0, 0.0, 0.0]))
N_BACKBONE_ATOMS = 3
N_HEAVY = 14
def writepdb_multi(
filename,
atoms_stack,
bfacts,
seq_stack,
backbone_only=False,
chain_ids=None,
use_hydrogens=True,
):
"""
Function for writing multiple structural states of the same sequence into a single
pdb file.
"""
f = open(filename, "w")
if seq_stack.ndim != 2:
T = atoms_stack.shape[0]
seq_stack = torch.tile(seq_stack, (T, 1))
seq_stack = seq_stack.cpu()
for atoms, scpu in zip(atoms_stack, seq_stack):
ctr = 1
atomscpu = atoms.cpu()
Bfacts = torch.clamp(bfacts.cpu(), 0, 1)
for i, s in enumerate(scpu):
atms = aa2long[s]
for j, atm_j in enumerate(atms):
if backbone_only and j >= N_BACKBONE_ATOMS:
break
if not use_hydrogens and j >= N_HEAVY:
break
if (atm_j is None) or (torch.all(torch.isnan(atomscpu[i, j]))):
continue
chain_id = "A"
if chain_ids is not None:
chain_id = chain_ids[i]
f.write(
"%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"
% (
"ATOM",
ctr,
atm_j,
num2aa[s],
chain_id,
i + 1,
atomscpu[i, j, 0],
atomscpu[i, j, 1],
atomscpu[i, j, 2],
1.0,
Bfacts[i],
)
)
ctr += 1
f.write("ENDMDL\n")
| RFdiffusion-main | util.py |
##
## lk and lk term
#(LJ_RADIUS LJ_WDEPTH LK_DGFREE LK_LAMBDA LK_VOLUME)
type2ljlk = {
"CNH2":(1.968297,0.094638,3.077030,3.5000,13.500000),
"COO":(1.916661,0.141799,-3.332648,3.5000,14.653000),
"CH0":(2.011760,0.062642,1.409284,3.5000,8.998000),
"CH1":(2.011760,0.062642,-3.538387,3.5000,10.686000),
"CH2":(2.011760,0.062642,-1.854658,3.5000,18.331000),
"CH3":(2.011760,0.062642,7.292929,3.5000,25.855000),
"aroC":(2.016441,0.068775,1.797950,3.5000,16.704000),
"Ntrp":(1.802452,0.161725,-8.413116,3.5000,9.522100),
"Nhis":(1.802452,0.161725,-9.739606,3.5000,9.317700),
"NtrR":(1.802452,0.161725,-5.158080,3.5000,9.779200),
"NH2O":(1.802452,0.161725,-8.101638,3.5000,15.689000),
"Nlys":(1.802452,0.161725,-20.864641,3.5000,16.514000),
"Narg":(1.802452,0.161725,-8.968351,3.5000,15.717000),
"Npro":(1.802452,0.161725,-0.984585,3.5000,3.718100),
"OH":(1.542743,0.161947,-8.133520,3.5000,10.722000),
"OHY":(1.542743,0.161947,-8.133520,3.5000,10.722000),
"ONH2":(1.548662,0.182924,-6.591644,3.5000,10.102000),
"OOC":(1.492871,0.099873,-9.239832,3.5000,9.995600),
"S":(1.975967,0.455970,-1.707229,3.5000,17.640000),
"SH1":(1.975967,0.455970,3.291643,3.5000,23.240000),
"Nbb":(1.802452,0.161725,-9.969494,3.5000,15.992000),
"CAbb":(2.011760,0.062642,2.533791,3.5000,12.137000),
"CObb":(1.916661,0.141799,3.104248,3.5000,13.221000),
"OCbb":(1.540580,0.142417,-8.006829,3.5000,12.196000),
"HNbb":(0.901681,0.005000,0.0000,3.5000,0.0000),
"Hapo":(1.421272,0.021808,0.0000,3.5000,0.0000),
"Haro":(1.374914,0.015909,0.0000,3.5000,0.0000),
"Hpol":(0.901681,0.005000,0.0000,3.5000,0.0000),
"HS":(0.363887,0.050836,0.0000,3.5000,0.0000),
}
# hbond donor/acceptors
class HbAtom:
NO = 0
DO = 1 # donor
AC = 2 # acceptor
DA = 3 # donor & acceptor
HP = 4 # polar H
type2hb = {
"CNH2":HbAtom.NO, "COO":HbAtom.NO, "CH0":HbAtom.NO, "CH1":HbAtom.NO,
"CH2":HbAtom.NO, "CH3":HbAtom.NO, "aroC":HbAtom.NO, "Ntrp":HbAtom.DO,
"Nhis":HbAtom.AC, "NtrR":HbAtom.DO, "NH2O":HbAtom.DO, "Nlys":HbAtom.DO,
"Narg":HbAtom.DO, "Npro":HbAtom.NO, "OH":HbAtom.DA, "OHY":HbAtom.DA,
"ONH2":HbAtom.AC, "OOC":HbAtom.AC, "S":HbAtom.NO, "SH1":HbAtom.NO,
"Nbb":HbAtom.DO, "CAbb":HbAtom.NO, "CObb":HbAtom.NO, "OCbb":HbAtom.AC,
"HNbb":HbAtom.HP, "Hapo":HbAtom.NO, "Haro":HbAtom.NO, "Hpol":HbAtom.HP,
"HS":HbAtom.HP, # HP in rosetta(?)
}
##
## hbond term
class HbDonType:
PBA = 0
IND = 1
IME = 2
GDE = 3
CXA = 4
AMO = 5
HXL = 6
AHX = 7
NTYPES = 8
class HbAccType:
PBA = 0
CXA = 1
CXL = 2
HXL = 3
AHX = 4
IME = 5
NTYPES = 6
class HbHybType:
SP2 = 0
SP3 = 1
RING = 2
NTYPES = 3
type2dontype = {
"Nbb": HbDonType.PBA,
"Ntrp": HbDonType.IND,
"NtrR": HbDonType.GDE,
"Narg": HbDonType.GDE,
"NH2O": HbDonType.CXA,
"Nlys": HbDonType.AMO,
"OH": HbDonType.HXL,
"OHY": HbDonType.AHX,
}
type2acctype = {
"OCbb": HbAccType.PBA,
"ONH2": HbAccType.CXA,
"OOC": HbAccType.CXL,
"OH": HbAccType.HXL,
"OHY": HbAccType.AHX,
"Nhis": HbAccType.IME,
}
type2hybtype = {
"OCbb": HbHybType.SP2,
"ONH2": HbHybType.SP2,
"OOC": HbHybType.SP2,
"OHY": HbHybType.SP3,
"OH": HbHybType.SP3,
"Nhis": HbHybType.RING,
}
dontype2wt = {
HbDonType.PBA: 1.45,
HbDonType.IND: 1.15,
HbDonType.IME: 1.42,
HbDonType.GDE: 1.11,
HbDonType.CXA: 1.29,
HbDonType.AMO: 1.17,
HbDonType.HXL: 0.99,
HbDonType.AHX: 1.00,
}
acctype2wt = {
HbAccType.PBA: 1.19,
HbAccType.CXA: 1.21,
HbAccType.CXL: 1.10,
HbAccType.HXL: 1.15,
HbAccType.AHX: 1.15,
HbAccType.IME: 1.17,
}
class HbPolyType:
ahdist_aASN_dARG = 0
ahdist_aASN_dASN = 1
ahdist_aASN_dGLY = 2
ahdist_aASN_dHIS = 3
ahdist_aASN_dLYS = 4
ahdist_aASN_dSER = 5
ahdist_aASN_dTRP = 6
ahdist_aASN_dTYR = 7
ahdist_aASP_dARG = 8
ahdist_aASP_dASN = 9
ahdist_aASP_dGLY = 10
ahdist_aASP_dHIS = 11
ahdist_aASP_dLYS = 12
ahdist_aASP_dSER = 13
ahdist_aASP_dTRP = 14
ahdist_aASP_dTYR = 15
ahdist_aGLY_dARG = 16
ahdist_aGLY_dASN = 17
ahdist_aGLY_dGLY = 18
ahdist_aGLY_dHIS = 19
ahdist_aGLY_dLYS = 20
ahdist_aGLY_dSER = 21
ahdist_aGLY_dTRP = 22
ahdist_aGLY_dTYR = 23
ahdist_aHIS_dARG = 24
ahdist_aHIS_dASN = 25
ahdist_aHIS_dGLY = 26
ahdist_aHIS_dHIS = 27
ahdist_aHIS_dLYS = 28
ahdist_aHIS_dSER = 29
ahdist_aHIS_dTRP = 30
ahdist_aHIS_dTYR = 31
ahdist_aSER_dARG = 32
ahdist_aSER_dASN = 33
ahdist_aSER_dGLY = 34
ahdist_aSER_dHIS = 35
ahdist_aSER_dLYS = 36
ahdist_aSER_dSER = 37
ahdist_aSER_dTRP = 38
ahdist_aSER_dTYR = 39
ahdist_aTYR_dARG = 40
ahdist_aTYR_dASN = 41
ahdist_aTYR_dGLY = 42
ahdist_aTYR_dHIS = 43
ahdist_aTYR_dLYS = 44
ahdist_aTYR_dSER = 45
ahdist_aTYR_dTRP = 46
ahdist_aTYR_dTYR = 47
cosBAH_off = 48
cosBAH_7 = 49
cosBAH_6i = 50
AHD_1h = 51
AHD_1i = 52
AHD_1j = 53
AHD_1k = 54
# map donor:acceptor pairs to polynomials
hbtypepair2poly = {
(HbDonType.PBA,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dGLY,HbPolyType.cosBAH_off,HbPolyType.AHD_1j),
(HbDonType.CXA,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dASN,HbPolyType.cosBAH_off,HbPolyType.AHD_1j),
(HbDonType.IME,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dHIS,HbPolyType.cosBAH_off,HbPolyType.AHD_1j),
(HbDonType.IND,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dTRP,HbPolyType.cosBAH_off,HbPolyType.AHD_1j),
(HbDonType.AMO,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dLYS,HbPolyType.cosBAH_off,HbPolyType.AHD_1h),
(HbDonType.GDE,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dARG,HbPolyType.cosBAH_off,HbPolyType.AHD_1j),
(HbDonType.AHX,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dTYR,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.HXL,HbAccType.PBA): (HbPolyType.ahdist_aGLY_dSER,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.PBA,HbAccType.CXA): (HbPolyType.ahdist_aASN_dGLY,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.CXA,HbAccType.CXA): (HbPolyType.ahdist_aASN_dASN,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.IME,HbAccType.CXA): (HbPolyType.ahdist_aASN_dHIS,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.IND,HbAccType.CXA): (HbPolyType.ahdist_aASN_dTRP,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.AMO,HbAccType.CXA): (HbPolyType.ahdist_aASN_dLYS,HbPolyType.cosBAH_off,HbPolyType.AHD_1h),
(HbDonType.GDE,HbAccType.CXA): (HbPolyType.ahdist_aASN_dARG,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.AHX,HbAccType.CXA): (HbPolyType.ahdist_aASN_dTYR,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.HXL,HbAccType.CXA): (HbPolyType.ahdist_aASN_dSER,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.PBA,HbAccType.CXL): (HbPolyType.ahdist_aASP_dGLY,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.CXA,HbAccType.CXL): (HbPolyType.ahdist_aASP_dASN,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.IME,HbAccType.CXL): (HbPolyType.ahdist_aASP_dHIS,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.IND,HbAccType.CXL): (HbPolyType.ahdist_aASP_dTRP,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.AMO,HbAccType.CXL): (HbPolyType.ahdist_aASP_dLYS,HbPolyType.cosBAH_off,HbPolyType.AHD_1h),
(HbDonType.GDE,HbAccType.CXL): (HbPolyType.ahdist_aASP_dARG,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.AHX,HbAccType.CXL): (HbPolyType.ahdist_aASP_dTYR,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.HXL,HbAccType.CXL): (HbPolyType.ahdist_aASP_dSER,HbPolyType.cosBAH_off,HbPolyType.AHD_1k),
(HbDonType.PBA,HbAccType.IME): (HbPolyType.ahdist_aHIS_dGLY,HbPolyType.cosBAH_7,HbPolyType.AHD_1i),
(HbDonType.CXA,HbAccType.IME): (HbPolyType.ahdist_aHIS_dASN,HbPolyType.cosBAH_7,HbPolyType.AHD_1i),
(HbDonType.IME,HbAccType.IME): (HbPolyType.ahdist_aHIS_dHIS,HbPolyType.cosBAH_7,HbPolyType.AHD_1h),
(HbDonType.IND,HbAccType.IME): (HbPolyType.ahdist_aHIS_dTRP,HbPolyType.cosBAH_7,HbPolyType.AHD_1h),
(HbDonType.AMO,HbAccType.IME): (HbPolyType.ahdist_aHIS_dLYS,HbPolyType.cosBAH_7,HbPolyType.AHD_1i),
(HbDonType.GDE,HbAccType.IME): (HbPolyType.ahdist_aHIS_dARG,HbPolyType.cosBAH_7,HbPolyType.AHD_1h),
(HbDonType.AHX,HbAccType.IME): (HbPolyType.ahdist_aHIS_dTYR,HbPolyType.cosBAH_7,HbPolyType.AHD_1i),
(HbDonType.HXL,HbAccType.IME): (HbPolyType.ahdist_aHIS_dSER,HbPolyType.cosBAH_7,HbPolyType.AHD_1i),
(HbDonType.PBA,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dGLY,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.CXA,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dASN,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.IME,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dHIS,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.IND,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dTRP,HbPolyType.cosBAH_6i,HbPolyType.AHD_1h),
(HbDonType.AMO,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dLYS,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.GDE,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dARG,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.AHX,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dTYR,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.HXL,HbAccType.AHX): (HbPolyType.ahdist_aTYR_dSER,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.PBA,HbAccType.HXL): (HbPolyType.ahdist_aSER_dGLY,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.CXA,HbAccType.HXL): (HbPolyType.ahdist_aSER_dASN,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.IME,HbAccType.HXL): (HbPolyType.ahdist_aSER_dHIS,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.IND,HbAccType.HXL): (HbPolyType.ahdist_aSER_dTRP,HbPolyType.cosBAH_6i,HbPolyType.AHD_1h),
(HbDonType.AMO,HbAccType.HXL): (HbPolyType.ahdist_aSER_dLYS,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.GDE,HbAccType.HXL): (HbPolyType.ahdist_aSER_dARG,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.AHX,HbAccType.HXL): (HbPolyType.ahdist_aSER_dTYR,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
(HbDonType.HXL,HbAccType.HXL): (HbPolyType.ahdist_aSER_dSER,HbPolyType.cosBAH_6i,HbPolyType.AHD_1i),
}
# polynomials are triplets, (x_min, x_max), (y[x<x_min],y[x>x_max]), (c_9,...,c_0)
hbpolytype2coeffs = { # Parameters imported from rosetta sp2_elec_params @v2017.48-dev59886
HbPolyType.ahdist_aASN_dARG: ((0.7019094761929999, 2.86820307153,),(1.1, 1.1,),( 0.58376113, -9.29345473, 64.86270904, -260.3946711, 661.43138077, -1098.01378958, 1183.58371466, -790.82929582, 291.33125475, -43.01629727,)),
HbPolyType.ahdist_aASN_dASN: ((0.625841094801, 2.75107708444,),(1.1, 1.1,),( -1.31243015, 18.6745072, -112.63858313, 373.32878091, -734.99145504, 861.38324861, -556.21026097, 143.5626977, 20.03238394, -11.52167705,)),
HbPolyType.ahdist_aASN_dGLY: ((0.7477341047139999, 2.6796350782799996,),(1.1, 1.1,),( -1.61294554, 23.3150793, -144.11313069, 496.13575, -1037.83809166, 1348.76826073, -1065.14368678, 473.89008925, -100.41142701, 7.44453515,)),
HbPolyType.ahdist_aASN_dHIS: ((0.344789524346, 2.8303582266000005,),(1.1, 1.1,),( -0.2657122, 4.1073775, -26.9099632, 97.10486507, -209.96002602, 277.33057268, -218.74766996, 97.42852213, -24.07382402, 3.73962807,)),
HbPolyType.ahdist_aASN_dLYS: ((0.542905671869, 2.45259389314,),(1.1, 1.1,),( 1.38531754, -18.48733797, 106.14444613, -344.70585054, 698.91577956, -917.0879402, 775.32787908, -403.09588787, 113.65054778, -11.66516403,)),
HbPolyType.ahdist_aASN_dSER: ((1.0812774602500002, 2.6832123582599996,),(1.1, 1.1,),( -3.51524353, 47.54032873, -254.40168577, 617.84606386, -255.49935027, -2361.56230539, 6426.85797934, -7760.4403891, 4694.08106855, -1149.83549068,)),
HbPolyType.ahdist_aASN_dTRP: ((0.6689984999999999, 3.0704254,),(1.1, 1.1,),( -0.5284840422, 8.3510150838, -56.4100479414, 212.4884326254, -488.3178610608, 703.7762350506, -628.9936994633999, 331.4294356146, -93.265817571, 11.9691623698,)),
HbPolyType.ahdist_aASN_dTYR: ((1.08950268805, 2.6887046709400004,),(1.1, 1.1,),( -4.4488705, 63.27696281, -371.44187037, 1121.71921621, -1638.11394306, 142.99988401, 3436.65879147, -5496.07011787, 3709.30505237, -962.79669688,)),
HbPolyType.ahdist_aASP_dARG: ((0.8100404642229999, 2.9851230124799994,),(1.1, 1.1,),( -0.66430344, 10.41343145, -70.12656205, 265.12578414, -617.05849171, 911.39378582, -847.25013928, 472.09090981, -141.71513167, 18.57721132,)),
HbPolyType.ahdist_aASP_dASN: ((1.05401125073, 3.11129675908,),(1.1, 1.1,),( 0.02090728, -0.24144928, -0.19578075, 16.80904547, -117.70216251, 407.18551288, -809.95195924, 939.83137947, -593.94527692, 159.57610528,)),
HbPolyType.ahdist_aASP_dGLY: ((0.886260952629, 2.66843608743,),(1.1, 1.1,),( -7.00699267, 107.33021779, -713.45752385, 2694.43092298, -6353.05100287, 9667.94098394, -9461.9261027, 5721.0086877, -1933.97818198, 279.47763789,)),
HbPolyType.ahdist_aASP_dHIS: ((1.03597611139, 2.78208509117,),(1.1, 1.1,),( -1.34823406, 17.08925926, -78.75087193, 106.32795459, 400.18459698, -2041.04320193, 4033.83557387, -4239.60530204, 2324.00877252, -519.38410941,)),
HbPolyType.ahdist_aASP_dLYS: ((0.97789485082, 2.50496946108,),(1.1, 1.1,),( -0.41300315, 6.59243438, -44.44525308, 163.11796012, -351.2307798, 443.2463146, -297.84582856, 62.38600547, 33.77496227, -14.11652182,)),
HbPolyType.ahdist_aASP_dSER: ((0.542905671869, 2.45259389314,),(1.1, 1.1,),( 1.38531754, -18.48733797, 106.14444613, -344.70585054, 698.91577956, -917.0879402, 775.32787908, -403.09588787, 113.65054778, -11.66516403,)),
HbPolyType.ahdist_aASP_dTRP: ((0.419155746414, 3.0486938610500003,),(1.1, 1.1,),( -0.24563471, 3.85598551, -25.75176874, 95.36525025, -214.13175785, 299.76133553, -259.0691378, 132.06975835, -37.15612683, 5.60445773,)),
HbPolyType.ahdist_aASP_dTYR: ((1.01057521468, 2.7207545786900003,),(1.1, 1.1,),( -0.15808672, -10.21398871, 178.80080949, -1238.0583801, 4736.25248274, -11071.96777725, 16239.07550047, -14593.21092621, 7335.66765017, -1575.08145078,)),
HbPolyType.ahdist_aGLY_dARG: ((0.499016667857, 2.9377031027599996,),(1.1, 1.1,),( -0.15923533, 2.5526639, -17.38788803, 65.71046957, -151.13491186, 218.78048387, -199.15882919, 110.56568974, -35.95143745, 6.47580213,)),
HbPolyType.ahdist_aGLY_dASN: ((0.7194388032060001, 2.9303772333599998,),(1.1, 1.1,),( -1.40718342, 23.65929694, -172.97144348, 720.64417348, -1882.85420815, 3194.87197776, -3515.52467458, 2415.75238278, -941.47705161, 159.84784277,)),
HbPolyType.ahdist_aGLY_dGLY: ((1.38403812683, 2.9981039433,),(1.1, 1.1,),( -0.5307601, 6.47949946, -22.39522814, -55.14303544, 708.30945242, -2619.49318162, 5227.8805795, -6043.31211632, 3806.04676175, -1007.66024144,)),
HbPolyType.ahdist_aGLY_dHIS: ((0.47406840932899996, 2.9234200830400003,),(1.1, 1.1,),( -0.12881679, 1.933838, -12.03134888, 39.92691227, -75.41519959, 78.87968016, -37.82769801, -0.13178679, 4.50193019, 0.45408359,)),
HbPolyType.ahdist_aGLY_dLYS: ((0.545347533475, 2.42624380351,),(1.1, 1.1,),( -0.22921901, 2.07015714, -6.2947417, 0.66645697, 45.21805416, -130.26668981, 176.32401031, -126.68226346, 43.96744431, -4.40105281,)),
HbPolyType.ahdist_aGLY_dSER: ((1.2803349239700001, 2.2465996077400003,),(1.1, 1.1,),( 6.72508613, -86.98495585, 454.18518444, -1119.89141452, 715.624663, 3172.36852982, -9455.49113097, 11797.38766934, -7363.28302948, 1885.50119665,)),
HbPolyType.ahdist_aGLY_dTRP: ((0.686512740494, 3.02901351815,),(1.1, 1.1,),( -0.1051487, 1.41597708, -7.42149173, 17.31830704, -6.98293652, -54.76605063, 130.95272289, -132.77575305, 62.75460448, -9.89110842,)),
HbPolyType.ahdist_aGLY_dTYR: ((1.28894687639, 2.26335316892,),(1.1, 1.1,),( 13.84536925, -169.40579865, 893.79467505, -2670.60617561, 5016.46234701, -6293.79378818, 5585.1049063, -3683.50722701, 1709.48661405, -399.5712153,)),
HbPolyType.ahdist_aHIS_dARG: ((0.8967400957230001, 2.96809434226,),(1.1, 1.1,),( 0.43460495, -10.52727665, 103.16979807, -551.42887412, 1793.25378923, -3701.08304991, 4861.05155388, -3922.4285529, 1763.82137881, -335.43441944,)),
HbPolyType.ahdist_aHIS_dASN: ((0.887120931718, 2.59166903153,),(1.1, 1.1,),( -3.50289894, 54.42813924, -368.14395507, 1418.90186454, -3425.60485859, 5360.92334837, -5428.54462336, 3424.68800187, -1221.49631986, 189.27122436,)),
HbPolyType.ahdist_aHIS_dGLY: ((1.01629363411, 2.58523052904,),(1.1, 1.1,),( -1.68095217, 21.31894078, -107.72203494, 251.81021758, -134.07465831, -707.64527046, 1894.6282743, -2156.85951846, 1216.83585872, -275.48078944,)),
HbPolyType.ahdist_aHIS_dHIS: ((0.9773010778919999, 2.72533796329,),(1.1, 1.1,),( -2.33350626, 35.66072412, -233.98966111, 859.13714961, -1925.30958567, 2685.35293578, -2257.48067507, 1021.49796136, -169.36082523, -12.1348055,)),
HbPolyType.ahdist_aHIS_dLYS: ((0.7080936539849999, 2.47191718632,),(1.1, 1.1,),( -1.88479369, 28.38084382, -185.74039957, 690.81875917, -1605.11404391, 2414.83545623, -2355.9723201, 1442.24496229, -506.45880637, 79.47512505,)),
HbPolyType.ahdist_aHIS_dSER: ((0.90846809159, 2.5477956147,),(1.1, 1.1,),( -0.92004641, 15.91841533, -117.83979251, 488.22211296, -1244.13047376, 2017.43704053, -2076.04468019, 1302.42621488, -451.29138643, 67.15812575,)),
HbPolyType.ahdist_aHIS_dTRP: ((0.991999676806, 2.81296584506,),(1.1, 1.1,),( -1.29358587, 19.97152857, -131.89796017, 485.29199356, -1084.0466445, 1497.3352889, -1234.58042682, 535.8048197, -75.58951691, -9.91148332,)),
HbPolyType.ahdist_aHIS_dTYR: ((0.882661836357, 2.5469016429900004,),(1.1, 1.1,),( -6.94700143, 109.07997256, -747.64035726, 2929.83959536, -7220.15788571, 11583.34170519, -12078.443492, 7881.85479715, -2918.19482068, 468.23988622,)),
HbPolyType.ahdist_aSER_dARG: ((1.0204658147399999, 2.8899566041900004,),(1.1, 1.1,),( 0.33887327, -7.54511361, 70.87316645, -371.88263665, 1206.67454443, -2516.82084076, 3379.45432693, -2819.73384601, 1325.33307517, -265.54533008,)),
HbPolyType.ahdist_aSER_dASN: ((1.01393052233, 3.0024434159299997,),(1.1, 1.1,),( 0.37012361, -7.46486204, 64.85775924, -318.6047209, 974.66322243, -1924.37334018, 2451.63840629, -1943.1915675, 867.07870559, -163.83771761,)),
HbPolyType.ahdist_aSER_dGLY: ((1.3856562156299999, 2.74160605537,),(1.1, 1.1,),( -1.32847415, 22.67528654, -172.53450064, 770.79034865, -2233.48829652, 4354.38807288, -5697.35144236, 4803.38686157, -2361.48028857, 518.28202382,)),
HbPolyType.ahdist_aSER_dHIS: ((0.550992321207, 2.68549261999,),(1.1, 1.1,),( -1.98041793, 29.59668639, -190.36751773, 688.43324385, -1534.68894765, 2175.66568976, -1952.07622113, 1066.28943929, -324.23381388, 43.41006168,)),
HbPolyType.ahdist_aSER_dLYS: ((0.8603189393170001, 2.77729502744,),(1.1, 1.1,),( 0.90884741, -17.24690746, 141.78469099, -661.85989315, 1929.7674992, -3636.43392779, 4419.00727923, -3332.43482061, 1410.78913266, -253.53829424,)),
HbPolyType.ahdist_aSER_dSER: ((1.10866545921, 2.61727781204,),(1.1, 1.1,),( -0.38264308, 4.41779675, -10.7016645, -81.91314845, 668.91174735, -2187.50684758, 3983.56103269, -4213.32320546, 2418.41531442, -580.28918569,)),
HbPolyType.ahdist_aSER_dTRP: ((1.4092077245899999, 2.8066121197099996,),(1.1, 1.1,),( 0.73762477, -11.70741276, 73.05154232, -205.00144794, 89.58794368, 1082.94541375, -3343.98293188, 4601.70815729, -3178.53568678, 896.59487831,)),
HbPolyType.ahdist_aSER_dTYR: ((1.10773547919, 2.60403567341,),(1.1, 1.1,),( -1.13249925, 14.66643161, -69.01708791, 93.96846742, 380.56063898, -1984.56675689, 4074.08891127, -4492.76927139, 2613.13168054, -627.71933508,)),
HbPolyType.ahdist_aTYR_dARG: ((1.05581400627, 2.85499888099,),(1.1, 1.1,),( -0.30396592, 5.30288548, -39.75788579, 167.5416547, -435.15958911, 716.52357586, -735.95195083, 439.76284677, -130.00400085, 13.23827556,)),
HbPolyType.ahdist_aTYR_dASN: ((1.0994919065200002, 2.8400869077900004,),(1.1, 1.1,),( 0.33548259, -3.5890451, 8.97769025, 48.1492734, -400.5983616, 1269.89613211, -2238.03101675, 2298.33009115, -1290.42961162, 308.43185147,)),
HbPolyType.ahdist_aTYR_dGLY: ((1.36546155066, 2.7303075916400004,),(1.1, 1.1,),( -1.55312915, 18.62092487, -70.91365499, -41.83066505, 1248.88835245, -4719.81948329, 9186.09528168, -10266.11434548, 6266.21959533, -1622.19652457,)),
HbPolyType.ahdist_aTYR_dHIS: ((0.5955982461899999, 2.6643551317500003,),(1.1, 1.1,),( -0.47442788, 7.16629863, -46.71287553, 171.46128947, -388.17484011, 558.45202337, -506.35587481, 276.46237273, -83.52554392, 12.05709329,)),
HbPolyType.ahdist_aTYR_dLYS: ((0.7978598238760001, 2.7620933782,),(1.1, 1.1,),( -0.20201464, 1.69684984, 0.27677515, -55.05786347, 286.29918332, -725.92372531, 1054.771746, -889.33602341, 401.11342256, -73.02221189,)),
HbPolyType.ahdist_aTYR_dSER: ((0.7083554962559999, 2.7032011990599996,),(1.1, 1.1,),( -0.70764192, 11.67978065, -82.80447482, 329.83401367, -810.58976486, 1269.57613941, -1261.04047117, 761.72890446, -254.37526011, 37.24301861,)),
HbPolyType.ahdist_aTYR_dTRP: ((1.10934023051, 2.8819112108,),(1.1, 1.1,),( -11.58453967, 204.88308091, -1589.77384548, 7100.84791905, -20113.61354433, 37457.83646055, -45850.02969172, 35559.8805122, -15854.78726237, 3098.04931146,)),
HbPolyType.ahdist_aTYR_dTYR: ((1.1105954899400001, 2.60081798685,),(1.1, 1.1,),( -1.63120628, 19.48493187, -81.0332905, 56.80517706, 687.42717782, -2842.77799908, 5385.52231471, -5656.74159307, 3178.83470588, -744.70042777,)),
HbPolyType.AHD_1h: ((1.76555274367, 3.1416,),(1.1, 1.1,),( 0.62725838, -9.98558225, 59.39060071, -120.82930213, -333.26536028, 2603.13082592, -6895.51207142, 9651.25238056, -7127.13394872, 2194.77244026,)),
HbPolyType.AHD_1i: ((1.59914724347, 3.1416,),(1.1, 1.1,),( -0.18888801, 3.48241679, -25.65508662, 89.57085435, -95.91708218, -367.93452341, 1589.6904702, -2662.3582135, 2184.40194483, -723.28383545,)),
HbPolyType.AHD_1j: ((1.1435646388, 3.1416,),(1.1, 1.1,),( 0.47683259, -9.54524724, 83.62557693, -420.55867774, 1337.19354878, -2786.26265686, 3803.178227, -3278.62879901, 1619.04116204, -347.50157909,)),
HbPolyType.AHD_1k: ((1.15651981164, 3.1416,),(1.1, 1.1,),( -0.10757999, 2.0276542, -16.51949978, 75.83866839, -214.18025678, 380.55117567, -415.47847283, 255.66998474, -69.94662165, 3.21313428,)),
HbPolyType.cosBAH_off: ((-1234.0, 1.1,),(1.1, 1.1,),( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,)),
HbPolyType.cosBAH_6i: ((-0.23538144897100002, 1.1,),(1.1, 1.1,),( -0.822093, -3.75364636, 46.88852157, -129.5440564, 146.69151428, -67.60598792, 2.91683129, 9.26673173, -3.84488178, 0.05706659,)),
HbPolyType.cosBAH_7: ((-0.019373850666900002, 1.1,),(1.1, 1.1,),( 0.0, -27.942923450028, 136.039920253368, -268.06959056747, 275.400462507919, -153.502076215949, 39.741591385461, 0.693861510121, -3.885952320499, 1.024765090788892)),
} | RFdiffusion-main | scoring.py |
import torch
import numpy as np
import random
from chemical import INIT_CRDS
from icecream import ic
def th_min_angle(start, end, radians=False):
"""
Finds the angle you would add to <start> in order to get to <end>
on the shortest path.
"""
a,b,c = (np.pi, 2*np.pi, 3*np.pi) if radians else (180, 360, 540)
shortest_angle = ((((end - start) % b) + c) % b) - a
return shortest_angle
def th_interpolate_angles(start, end, T, n_diffuse,mindiff=None, radians=True):
"""
"""
# find the minimum angle to add to get from start to end
angle_diffs = th_min_angle(start, end, radians=radians)
if mindiff is not None:
assert torch.sum(mindiff.flatten()-angle_diffs) == 0.
if n_diffuse is None:
# default is to diffuse for max steps
n_diffuse = torch.full((len(angle_diffs)), T)
interps = []
for i,diff in enumerate(angle_diffs):
N = int(n_diffuse[i])
actual_interp = torch.linspace(start[i], start[i]+diff, N)
whole_interp = torch.full((T,), float(start[i]+diff))
temp=torch.clone(whole_interp)
whole_interp[:N] = actual_interp
interps.append(whole_interp)
return torch.stack(interps, dim=0)
def th_interpolate_angle_single(start, end, step, T, mindiff=None, radians=True):
"""
"""
# find the minimum angle to add to get from start to end
angle_diffs = th_min_angle(start, end, radians=radians)
if mindiff is not None:
assert torch.sum(mindiff.flatten()-angle_diffs) == 0.
# linearly interpolate between x = [0, T-1], y = [start, start + diff]
x_range = T-1
interps = step / x_range * angle_diffs + start
return interps
def get_aa_schedule(T, L, nsteps=100):
"""
Returns the steps t when each amino acid should be decoded,
as well as how many steps that amino acids chi angles will be diffused
Parameters:
T (int, required): Total number of steps we are decoding the sequence over
L (int, required): Length of protein sequence
nsteps (int, optional): Number of steps over the course of which to decode the amino acids
Returns: three items
decode_times (list): List of times t when the positions in <decode_order> should be decoded
decode_order (list): List of lists, each element containing which positions are going to be decoded at
the corresponding time in <decode_times>
idx2diffusion_steps (np.array): Array mapping the index of the residue to how many diffusion steps it will require
"""
# nsteps can't be more than T or more than length of protein
if (nsteps > T) or (nsteps > L):
nsteps = min([T,L])
decode_order = [[a] for a in range(L)]
random.shuffle(decode_order)
while len(decode_order) > nsteps:
# pop an element and then add those positions randomly to some other step
tmp_seqpos = decode_order.pop()
decode_order[random.randint(0,len(decode_order)-1)] += tmp_seqpos
random.shuffle(decode_order)
decode_times = np.arange(nsteps)+1
# now given decode times, calculate number of diffusion steps each position gets
aa_masks = np.full((200,L), False)
idx2diffusion_steps = np.full((L,),float(np.nan))
for i,t in enumerate(decode_times):
decode_pos = decode_order[i] # positions to be decoded at this step
for j,pos in enumerate(decode_pos):
# calculate number of diffusion steps this residue gets
idx2diffusion_steps[pos] = int(t)
aa_masks[t,pos] = True
aa_masks = np.cumsum(aa_masks, axis=0)
return decode_times, decode_order, idx2diffusion_steps, ~(aa_masks.astype(bool))
####################
### for SecStruc ###
####################
def ss_to_tensor(ss_dict):
"""
Function to convert ss files to indexed tensors
0 = Helix
1 = Strand
2 = Loop
3 = Mask/unknown
4 = idx for pdb
"""
ss_conv = {'H':0,'E':1,'L':2}
ss_int = np.array([int(ss_conv[i]) for i in ss_dict['ss']])
return ss_int
def mask_ss(ss, min_mask = 0, max_mask = 0.75):
"""
Function to take ss array, find the junctions, and randomly mask these until a random proportion (up to 75%) is masked
Input: numpy array of ss (H=0,E=1,L=2,mask=3)
output: tensor with some proportion of junctions masked
"""
mask_prop = random.uniform(min_mask, max_mask)
transitions = np.where(ss[:-1] - ss[1:] != 0)[0] #gets last index of each block of ss
counter = 0
#TODO think about masking whole ss elements
while len(ss[ss == 3])/len(ss) < mask_prop and counter < 100: #very hacky - do better
try:
width = random.randint(1,9)
start = random.choice(transitions)
offset = random.randint(-8,1)
ss[start+offset:start+offset+width] = 3
counter += 1
except:
counter += 1
ss = torch.tensor(ss)
mask = torch.where(ss == 3, True, False)
ss = torch.nn.functional.one_hot(ss, num_classes=4)
return ss, mask
def construct_block_adj_matrix( sstruct, xyz, nan_mask, cutoff=6, include_loops=False ):
'''
Given a sstruct specification and backbone coordinates, build a block adjacency matrix.
Input:
sstruct (torch.FloatTensor): (L) length tensor with numeric encoding of sstruct at each position
xyz (torch.FloatTensor): (L,3,3) tensor of Cartesian coordinates of backbone N,Ca,C atoms
cutoff (float): The Cb distance cutoff under which residue pairs are considered adjacent
By eye, Nate thinks 6A is a good Cb distance cutoff
Output:
block_adj (torch.FloatTensor): (L,L) boolean matrix where adjacent secondary structure contacts are 1
'''
# Remove nans at this stage, as ss doesn't consider nans
xyz_nonan = xyz[nan_mask]
L = xyz_nonan.shape[0]
assert L == sstruct.shape[0]
# three anchor atoms
N = xyz_nonan[:,0]
Ca = xyz_nonan[:,1]
C = xyz_nonan[:,2]
# recreate Cb given N,Ca,C
Cb = generate_Cbeta(N,Ca,C)
dist = get_pair_dist(Cb,Cb) # [L,L]
dist[torch.isnan(dist)] = 999.9
assert torch.sum(torch.isnan(dist)) == 0
dist += 999.9*torch.eye(L,device=xyz.device)
# Now we have dist matrix and sstruct specification, turn this into a block adjacency matrix
# First: Construct a list of segments and the index at which they begin and end
in_segment = True
segments = []
begin = -1
end = -1
# need to expand ss out to size L
for i in range(sstruct.shape[0]):
# Starting edge case
if i == 0:
begin = 0
continue
if not sstruct[i] == sstruct[i-1]:
end = i
segments.append( (sstruct[i-1], begin, end) )
begin = i
# Ending edge case: last segment is length one
if not end == sstruct.shape[0]:
segments.append( (sstruct[-1], begin, sstruct.shape[0]) )
# Second: Using segments and dgram, determine adjacent blocks
block_adj = torch.zeros_like(dist)
for i in range(len(segments)):
curr_segment = segments[i]
if curr_segment[0] == 2 and not include_loops: continue
begin_i = curr_segment[1]
end_i = curr_segment[2]
for j in range(i+1, len(segments)):
j_segment = segments[j]
if j_segment[0] == 2 and not include_loops: continue
begin_j = j_segment[1]
end_j = j_segment[2]
if torch.any( dist[begin_i:end_i, begin_j:end_j] < cutoff ):
# Matrix is symmetic
block_adj[begin_i:end_i, begin_j:end_j] = torch.ones(end_i - begin_i, end_j - begin_j)
block_adj[begin_j:end_j, begin_i:end_i] = torch.ones(end_j - begin_j, end_i - begin_i)
return block_adj
def get_pair_dist(a, b):
"""calculate pair distances between two sets of points
Parameters
----------
a,b : pytorch tensors of shape [batch,nres,3]
store Cartesian coordinates of two sets of atoms
Returns
-------
dist : pytorch tensor of shape [batch,nres,nres]
stores paitwise distances between atoms in a and b
"""
dist = torch.cdist(a, b, p=2)
return dist
| RFdiffusion-main | diff_util.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from opt_einsum import contract as einsum
from util_module import init_lecun_normal
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, r_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.norm = nn.LayerNorm(d_model)
self.linear1 = nn.Linear(d_model, d_model*r_ff)
self.dropout = nn.Dropout(p_drop)
self.linear2 = nn.Linear(d_model*r_ff, d_model)
self.reset_parameter()
def reset_parameter(self):
# initialize linear layer right before ReLu: He initializer (kaiming normal)
nn.init.kaiming_normal_(self.linear1.weight, nonlinearity='relu')
nn.init.zeros_(self.linear1.bias)
# initialize linear layer right before residual connection: zero initialize
nn.init.zeros_(self.linear2.weight)
nn.init.zeros_(self.linear2.bias)
def forward(self, src):
src = self.norm(src)
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class Attention(nn.Module):
# calculate multi-head attention
def __init__(self, d_query, d_key, n_head, d_hidden, d_out, p_drop=0.1):
super(Attention, self).__init__()
self.h = n_head
self.dim = d_hidden
#
self.to_q = nn.Linear(d_query, n_head*d_hidden, bias=False)
self.to_k = nn.Linear(d_key, n_head*d_hidden, bias=False)
self.to_v = nn.Linear(d_key, n_head*d_hidden, bias=False)
#
self.to_out = nn.Linear(n_head*d_hidden, d_out)
self.scaling = 1/math.sqrt(d_hidden)
#
# initialize all parameters properly
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_q.weight)
nn.init.xavier_uniform_(self.to_k.weight)
nn.init.xavier_uniform_(self.to_v.weight)
# to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
def forward(self, query, key, value):
B, Q = query.shape[:2]
B, K = key.shape[:2]
#
query = self.to_q(query).reshape(B, Q, self.h, self.dim)
key = self.to_k(key).reshape(B, K, self.h, self.dim)
value = self.to_v(value).reshape(B, K, self.h, self.dim)
#
query = query * self.scaling
attn = einsum('bqhd,bkhd->bhqk', query, key)
attn = F.softmax(attn, dim=-1)
#
out = einsum('bhqk,bkhd->bqhd', attn, value)
out = out.reshape(B, Q, self.h*self.dim)
#
out = self.to_out(out)
return out
class AttentionWithBias(nn.Module):
def __init__(self, d_in=256, d_bias=128, n_head=8, d_hidden=32):
super(AttentionWithBias, self).__init__()
self.norm_in = nn.LayerNorm(d_in)
self.norm_bias = nn.LayerNorm(d_bias)
#
self.to_q = nn.Linear(d_in, n_head*d_hidden, bias=False)
self.to_k = nn.Linear(d_in, n_head*d_hidden, bias=False)
self.to_v = nn.Linear(d_in, n_head*d_hidden, bias=False)
self.to_b = nn.Linear(d_bias, n_head, bias=False)
self.to_g = nn.Linear(d_in, n_head*d_hidden)
self.to_out = nn.Linear(n_head*d_hidden, d_in)
self.scaling = 1/math.sqrt(d_hidden)
self.h = n_head
self.dim = d_hidden
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_q.weight)
nn.init.xavier_uniform_(self.to_k.weight)
nn.init.xavier_uniform_(self.to_v.weight)
# bias: normal distribution
self.to_b = init_lecun_normal(self.to_b)
# gating: zero weights, one biases (mostly open gate at the begining)
nn.init.zeros_(self.to_g.weight)
nn.init.ones_(self.to_g.bias)
# to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
def forward(self, x, bias):
B, L = x.shape[:2]
#
x = self.norm_in(x)
bias = self.norm_bias(bias)
#
query = self.to_q(x).reshape(B, L, self.h, self.dim)
key = self.to_k(x).reshape(B, L, self.h, self.dim)
value = self.to_v(x).reshape(B, L, self.h, self.dim)
bias = self.to_b(bias) # (B, L, L, h)
gate = torch.sigmoid(self.to_g(x))
#
key = key * self.scaling
attn = einsum('bqhd,bkhd->bqkh', query, key)
attn = attn + bias
attn = F.softmax(attn, dim=-2)
#
out = einsum('bqkh,bkhd->bqhd', attn, value).reshape(B, L, -1)
out = gate * out
#
out = self.to_out(out)
return out
# MSA Attention (row/column) from AlphaFold architecture
class SequenceWeight(nn.Module):
def __init__(self, d_msa, n_head, d_hidden, p_drop=0.1):
super(SequenceWeight, self).__init__()
self.h = n_head
self.dim = d_hidden
self.scale = 1.0 / math.sqrt(self.dim)
self.to_query = nn.Linear(d_msa, n_head*d_hidden)
self.to_key = nn.Linear(d_msa, n_head*d_hidden)
self.dropout = nn.Dropout(p_drop)
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_query.weight)
nn.init.xavier_uniform_(self.to_key.weight)
def forward(self, msa):
B, N, L = msa.shape[:3]
tar_seq = msa[:,0]
q = self.to_query(tar_seq).view(B, 1, L, self.h, self.dim)
k = self.to_key(msa).view(B, N, L, self.h, self.dim)
q = q * self.scale
attn = einsum('bqihd,bkihd->bkihq', q, k)
attn = F.softmax(attn, dim=1)
return self.dropout(attn)
class MSARowAttentionWithBias(nn.Module):
def __init__(self, d_msa=256, d_pair=128, n_head=8, d_hidden=32):
super(MSARowAttentionWithBias, self).__init__()
self.norm_msa = nn.LayerNorm(d_msa)
self.norm_pair = nn.LayerNorm(d_pair)
#
self.seq_weight = SequenceWeight(d_msa, n_head, d_hidden, p_drop=0.1)
self.to_q = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_k = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_v = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_b = nn.Linear(d_pair, n_head, bias=False)
self.to_g = nn.Linear(d_msa, n_head*d_hidden)
self.to_out = nn.Linear(n_head*d_hidden, d_msa)
self.scaling = 1/math.sqrt(d_hidden)
self.h = n_head
self.dim = d_hidden
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_q.weight)
nn.init.xavier_uniform_(self.to_k.weight)
nn.init.xavier_uniform_(self.to_v.weight)
# bias: normal distribution
self.to_b = init_lecun_normal(self.to_b)
# gating: zero weights, one biases (mostly open gate at the begining)
nn.init.zeros_(self.to_g.weight)
nn.init.ones_(self.to_g.bias)
# to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
def forward(self, msa, pair): # TODO: make this as tied-attention
B, N, L = msa.shape[:3]
#
msa = self.norm_msa(msa)
pair = self.norm_pair(pair)
#
seq_weight = self.seq_weight(msa) # (B, N, L, h, 1)
query = self.to_q(msa).reshape(B, N, L, self.h, self.dim)
key = self.to_k(msa).reshape(B, N, L, self.h, self.dim)
value = self.to_v(msa).reshape(B, N, L, self.h, self.dim)
bias = self.to_b(pair) # (B, L, L, h)
gate = torch.sigmoid(self.to_g(msa))
#
query = query * seq_weight.expand(-1, -1, -1, -1, self.dim)
key = key * self.scaling
attn = einsum('bsqhd,bskhd->bqkh', query, key)
attn = attn + bias
attn = F.softmax(attn, dim=-2)
#
out = einsum('bqkh,bskhd->bsqhd', attn, value).reshape(B, N, L, -1)
out = gate * out
#
out = self.to_out(out)
return out
class MSAColAttention(nn.Module):
def __init__(self, d_msa=256, n_head=8, d_hidden=32):
super(MSAColAttention, self).__init__()
self.norm_msa = nn.LayerNorm(d_msa)
#
self.to_q = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_k = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_v = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_g = nn.Linear(d_msa, n_head*d_hidden)
self.to_out = nn.Linear(n_head*d_hidden, d_msa)
self.scaling = 1/math.sqrt(d_hidden)
self.h = n_head
self.dim = d_hidden
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_q.weight)
nn.init.xavier_uniform_(self.to_k.weight)
nn.init.xavier_uniform_(self.to_v.weight)
# gating: zero weights, one biases (mostly open gate at the begining)
nn.init.zeros_(self.to_g.weight)
nn.init.ones_(self.to_g.bias)
# to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
def forward(self, msa):
B, N, L = msa.shape[:3]
#
msa = self.norm_msa(msa)
#
query = self.to_q(msa).reshape(B, N, L, self.h, self.dim)
key = self.to_k(msa).reshape(B, N, L, self.h, self.dim)
value = self.to_v(msa).reshape(B, N, L, self.h, self.dim)
gate = torch.sigmoid(self.to_g(msa))
#
query = query * self.scaling
attn = einsum('bqihd,bkihd->bihqk', query, key)
attn = F.softmax(attn, dim=-1)
#
out = einsum('bihqk,bkihd->bqihd', attn, value).reshape(B, N, L, -1)
out = gate * out
#
out = self.to_out(out)
return out
class MSAColGlobalAttention(nn.Module):
def __init__(self, d_msa=64, n_head=8, d_hidden=8):
super(MSAColGlobalAttention, self).__init__()
self.norm_msa = nn.LayerNorm(d_msa)
#
self.to_q = nn.Linear(d_msa, n_head*d_hidden, bias=False)
self.to_k = nn.Linear(d_msa, d_hidden, bias=False)
self.to_v = nn.Linear(d_msa, d_hidden, bias=False)
self.to_g = nn.Linear(d_msa, n_head*d_hidden)
self.to_out = nn.Linear(n_head*d_hidden, d_msa)
self.scaling = 1/math.sqrt(d_hidden)
self.h = n_head
self.dim = d_hidden
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_q.weight)
nn.init.xavier_uniform_(self.to_k.weight)
nn.init.xavier_uniform_(self.to_v.weight)
# gating: zero weights, one biases (mostly open gate at the begining)
nn.init.zeros_(self.to_g.weight)
nn.init.ones_(self.to_g.bias)
# to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
def forward(self, msa):
B, N, L = msa.shape[:3]
#
msa = self.norm_msa(msa)
#
query = self.to_q(msa).reshape(B, N, L, self.h, self.dim)
query = query.mean(dim=1) # (B, L, h, dim)
key = self.to_k(msa) # (B, N, L, dim)
value = self.to_v(msa) # (B, N, L, dim)
gate = torch.sigmoid(self.to_g(msa)) # (B, N, L, h*dim)
#
query = query * self.scaling
attn = einsum('bihd,bkid->bihk', query, key) # (B, L, h, N)
attn = F.softmax(attn, dim=-1)
#
out = einsum('bihk,bkid->bihd', attn, value).reshape(B, 1, L, -1) # (B, 1, L, h*dim)
out = gate * out # (B, N, L, h*dim)
#
out = self.to_out(out)
return out
# Instead of triangle attention, use Tied axail attention with bias from coordinates..?
class BiasedAxialAttention(nn.Module):
def __init__(self, d_pair, d_bias, n_head, d_hidden, p_drop=0.1, is_row=True):
super(BiasedAxialAttention, self).__init__()
#
self.is_row = is_row
self.norm_pair = nn.LayerNorm(d_pair)
self.norm_bias = nn.LayerNorm(d_bias)
self.to_q = nn.Linear(d_pair, n_head*d_hidden, bias=False)
self.to_k = nn.Linear(d_pair, n_head*d_hidden, bias=False)
self.to_v = nn.Linear(d_pair, n_head*d_hidden, bias=False)
self.to_b = nn.Linear(d_bias, n_head, bias=False)
self.to_g = nn.Linear(d_pair, n_head*d_hidden)
self.to_out = nn.Linear(n_head*d_hidden, d_pair)
self.scaling = 1/math.sqrt(d_hidden)
self.h = n_head
self.dim = d_hidden
# initialize all parameters properly
self.reset_parameter()
def reset_parameter(self):
# query/key/value projection: Glorot uniform / Xavier uniform
nn.init.xavier_uniform_(self.to_q.weight)
nn.init.xavier_uniform_(self.to_k.weight)
nn.init.xavier_uniform_(self.to_v.weight)
# bias: normal distribution
self.to_b = init_lecun_normal(self.to_b)
# gating: zero weights, one biases (mostly open gate at the begining)
nn.init.zeros_(self.to_g.weight)
nn.init.ones_(self.to_g.bias)
# to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining
nn.init.zeros_(self.to_out.weight)
nn.init.zeros_(self.to_out.bias)
def forward(self, pair, bias):
# pair: (B, L, L, d_pair)
B, L = pair.shape[:2]
if self.is_row:
pair = pair.permute(0,2,1,3)
bias = bias.permute(0,2,1,3)
pair = self.norm_pair(pair)
bias = self.norm_bias(bias)
query = self.to_q(pair).reshape(B, L, L, self.h, self.dim)
key = self.to_k(pair).reshape(B, L, L, self.h, self.dim)
value = self.to_v(pair).reshape(B, L, L, self.h, self.dim)
bias = self.to_b(bias) # (B, L, L, h)
gate = torch.sigmoid(self.to_g(pair)) # (B, L, L, h*dim)
query = query * self.scaling
key = key / math.sqrt(L) # normalize for tied attention
attn = einsum('bnihk,bnjhk->bijh', query, key) # tied attention
attn = attn + bias # apply bias
attn = F.softmax(attn, dim=-2) # (B, L, L, h)
out = einsum('bijh,bkjhd->bikhd', attn, value).reshape(B, L, L, -1)
out = gate * out
out = self.to_out(out)
if self.is_row:
out = out.permute(0,2,1,3)
return out
| RFdiffusion-main | Attention_module.py |
import torch
import numpy as np
num2aa=[
'ALA','ARG','ASN','ASP','CYS',
'GLN','GLU','GLY','HIS','ILE',
'LEU','LYS','MET','PHE','PRO',
'SER','THR','TRP','TYR','VAL',
'UNK','MAS',
]
# Mapping 3 letter AA to 1 letter AA (e.g. ALA to A)
one_letter = ["A", "R", "N", "D", "C", \
"Q", "E", "G", "H", "I", \
"L", "K", "M", "F", "P", \
"S", "T", "W", "Y", "V", "?", "-"]
aa2num= {x:i for i,x in enumerate(num2aa)}
aa_321 = {a:b for a,b in zip(num2aa,one_letter)}
aa_123 = {val:key for key,val in aa_321.items()}
# create single letter code string from parsed integer sequence
def seq2chars(seq):
out = ''.join([aa_321[num2aa[a]] for a in seq])
return out
# full sc atom representation (Nx14)
aa2long=[
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # ala
(" N "," CA "," C "," O "," CB "," CG "," CD "," NE "," CZ "," NH1"," NH2", None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD "," HE ","1HH1","2HH1","1HH2","2HH2"), # arg
(" N "," CA "," C "," O "," CB "," CG "," OD1"," ND2", None, None, None, None, None, None," H "," HA ","1HB ","2HB ","1HD2","2HD2", None, None, None, None, None, None, None), # asn
(" N "," CA "," C "," O "," CB "," CG "," OD1"," OD2", None, None, None, None, None, None," H "," HA ","1HB ","2HB ", None, None, None, None, None, None, None, None, None), # asp
(" N "," CA "," C "," O "," CB "," SG ", None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB "," HG ", None, None, None, None, None, None, None, None), # cys
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," NE2", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HE2","2HE2", None, None, None, None, None), # gln
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," OE2", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ", None, None, None, None, None, None, None), # glu
(" N "," CA "," C "," O ", None, None, None, None, None, None, None, None, None, None," H ","1HA ","2HA ", None, None, None, None, None, None, None, None, None, None), # gly
(" N "," CA "," C "," O "," CB "," CG "," ND1"," CD2"," CE1"," NE2", None, None, None, None," H "," HA ","1HB ","2HB "," HD2"," HE1"," HE2", None, None, None, None, None, None), # his
(" N "," CA "," C "," O "," CB "," CG1"," CG2"," CD1", None, None, None, None, None, None," H "," HA "," HB ","1HG2","2HG2","3HG2","1HG1","2HG1","1HD1","2HD1","3HD1", None, None), # ile
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2", None, None, None, None, None, None," H "," HA ","1HB ","2HB "," HG ","1HD1","2HD1","3HD1","1HD2","2HD2","3HD2", None, None), # leu
(" N "," CA "," C "," O "," CB "," CG "," CD "," CE "," NZ ", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD ","1HE ","2HE ","1HZ ","2HZ ","3HZ "), # lys
(" N "," CA "," C "," O "," CB "," CG "," SD "," CE ", None, None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HE ","2HE ","3HE ", None, None, None, None), # met
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE1"," CE2"," CZ ", None, None, None," H "," HA ","1HB ","2HB "," HD1"," HD2"," HE1"," HE2"," HZ ", None, None, None, None), # phe
(" N "," CA "," C "," O "," CB "," CG "," CD ", None, None, None, None, None, None, None," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD ", None, None, None, None, None, None), # pro
(" N "," CA "," C "," O "," CB "," OG ", None, None, None, None, None, None, None, None," H "," HG "," HA ","1HB ","2HB ", None, None, None, None, None, None, None, None), # ser
(" N "," CA "," C "," O "," CB "," OG1"," CG2", None, None, None, None, None, None, None," H "," HG1"," HA "," HB ","1HG2","2HG2","3HG2", None, None, None, None, None, None), # thr
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," NE1"," CE2"," CE3"," CZ2"," CZ3"," CH2"," H "," HA ","1HB ","2HB "," HD1"," HE1"," HZ2"," HH2"," HZ3"," HE3", None, None, None), # trp
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE1"," CE2"," CZ "," OH ", None, None," H "," HA ","1HB ","2HB "," HD1"," HE1"," HE2"," HD2"," HH ", None, None, None, None), # tyr
(" N "," CA "," C "," O "," CB "," CG1"," CG2", None, None, None, None, None, None, None," H "," HA "," HB ","1HG1","2HG1","3HG1","1HG2","2HG2","3HG2", None, None, None, None), # val
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # unk
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # mask
]
# build the "alternate" sc mapping
aa2longalt=[
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # ala
(" N "," CA "," C "," O "," CB "," CG "," CD "," NE "," CZ "," NH1"," NH2", None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD "," HE ","1HH1","2HH1","1HH2","2HH2"), # arg
(" N "," CA "," C "," O "," CB "," CG "," OD1"," ND2", None, None, None, None, None, None," H "," HA ","1HB ","2HB ","1HD2","2HD2", None, None, None, None, None, None, None), # asn
(" N "," CA "," C "," O "," CB "," CG "," OD2"," OD1", None, None, None, None, None, None," H "," HA ","1HB ","2HB ", None, None, None, None, None, None, None, None, None), # asp
(" N "," CA "," C "," O "," CB "," SG ", None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB "," HG ", None, None, None, None, None, None, None, None), # cys
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," NE2", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HE2","2HE2", None, None, None, None, None), # gln
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE2"," OE1", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ", None, None, None, None, None, None, None), # glu
(" N "," CA "," C "," O ", None, None, None, None, None, None, None, None, None, None," H ","1HA ","2HA ", None, None, None, None, None, None, None, None, None, None), # gly
(" N "," CA "," C "," O "," CB "," CG "," NE2"," CD2"," CE1"," ND1", None, None, None, None," H "," HA ","1HB ","2HB "," HD2"," HE1"," HE2", None, None, None, None, None, None), # his
(" N "," CA "," C "," O "," CB "," CG1"," CG2"," CD1", None, None, None, None, None, None," H "," HA "," HB ","1HG2","2HG2","3HG2","1HG1","2HG1","1HD1","2HD1","3HD1", None, None), # ile
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2", None, None, None, None, None, None," H "," HA ","1HB ","2HB "," HG ","1HD1","2HD1","3HD1","1HD2","2HD2","3HD2", None, None), # leu
(" N "," CA "," C "," O "," CB "," CG "," CD "," CE "," NZ ", None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD ","1HE ","2HE ","1HZ ","2HZ ","3HZ "), # lys
(" N "," CA "," C "," O "," CB "," CG "," SD "," CE ", None, None, None, None, None, None," H "," HA ","1HB ","2HB ","1HG ","2HG ","1HE ","2HE ","3HE ", None, None, None, None), # met
(" N "," CA "," C "," O "," CB "," CG "," CD2"," CD1"," CE2"," CE1"," CZ ", None, None, None," H "," HD2"," HE2"," HZ "," HE1"," HD1"," HA ","1HB ","2HB ", None, None, None, None), # phe
(" N "," CA "," C "," O "," CB "," CG "," CD ", None, None, None, None, None, None, None," HA ","1HB ","2HB ","1HG ","2HG ","1HD ","2HD ", None, None, None, None, None, None), # pro
(" N "," CA "," C "," O "," CB "," OG ", None, None, None, None, None, None, None, None," H "," HG "," HA ","1HB ","2HB ", None, None, None, None, None, None, None, None), # ser
(" N "," CA "," C "," O "," CB "," OG1"," CG2", None, None, None, None, None, None, None," H "," HG1"," HA "," HB ","1HG2","2HG2","3HG2", None, None, None, None, None, None), # thr
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," NE1"," CE2"," CE3"," CZ2"," CZ3"," CH2"," H "," HA ","1HB ","2HB "," HD1"," HE1"," HZ2"," HH2"," HZ3"," HE3", None, None, None), # trp
(" N "," CA "," C "," O "," CB "," CG "," CD2"," CD1"," CE2"," CE1"," CZ "," OH ", None, None," H "," HA ","1HB ","2HB "," HD2"," HE2"," HE1"," HD1"," HH ", None, None, None, None), # tyr
(" N "," CA "," C "," O "," CB "," CG1"," CG2", None, None, None, None, None, None, None," H "," HA "," HB ","1HG1","2HG1","3HG1","1HG2","2HG2","3HG2", None, None, None, None), # val
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # unk
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None," H "," HA ","1HB ","2HB ","3HB ", None, None, None, None, None, None, None, None), # mask
]
aabonds=[
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB ","1HB "),(" CB ","2HB "),(" CB ","3HB ")) , # ala
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD "),(" CG ","1HG "),(" CG ","2HG "),(" CD "," NE "),(" CD ","1HD "),(" CD ","2HD "),(" NE "," CZ "),(" NE "," HE "),(" CZ "," NH1"),(" CZ "," NH2"),(" NH1","1HH1"),(" NH1","2HH1"),(" NH2","1HH2"),(" NH2","2HH2")) , # arg
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," OD1"),(" CG "," ND2"),(" ND2","1HD2"),(" ND2","2HD2")) , # asn
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," OD1"),(" CG "," OD2")) , # asp
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," SG "),(" CB ","1HB "),(" CB ","2HB "),(" SG "," HG ")) , # cys
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD "),(" CG ","1HG "),(" CG ","2HG "),(" CD "," OE1"),(" CD "," NE2"),(" NE2","1HE2"),(" NE2","2HE2")) , # gln
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD "),(" CG ","1HG "),(" CG ","2HG "),(" CD "," OE1"),(" CD "," OE2")) , # glu
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA ","1HA "),(" CA ","2HA "),(" C "," O ")) , # gly
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," ND1"),(" CG "," CD2"),(" ND1"," CE1"),(" CD2"," NE2"),(" CD2"," HD2"),(" CE1"," NE2"),(" CE1"," HE1"),(" NE2"," HE2")) , # his
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG1"),(" CB "," CG2"),(" CB "," HB "),(" CG1"," CD1"),(" CG1","1HG1"),(" CG1","2HG1"),(" CG2","1HG2"),(" CG2","2HG2"),(" CG2","3HG2"),(" CD1","1HD1"),(" CD1","2HD1"),(" CD1","3HD1")) , # ile
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD1"),(" CG "," CD2"),(" CG "," HG "),(" CD1","1HD1"),(" CD1","2HD1"),(" CD1","3HD1"),(" CD2","1HD2"),(" CD2","2HD2"),(" CD2","3HD2")) , # leu
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD "),(" CG ","1HG "),(" CG ","2HG "),(" CD "," CE "),(" CD ","1HD "),(" CD ","2HD "),(" CE "," NZ "),(" CE ","1HE "),(" CE ","2HE "),(" NZ ","1HZ "),(" NZ ","2HZ "),(" NZ ","3HZ ")) , # lys
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," SD "),(" CG ","1HG "),(" CG ","2HG "),(" SD "," CE "),(" CE ","1HE "),(" CE ","2HE "),(" CE ","3HE ")) , # met
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD1"),(" CG "," CD2"),(" CD1"," CE1"),(" CD1"," HD1"),(" CD2"," CE2"),(" CD2"," HD2"),(" CE1"," CZ "),(" CE1"," HE1"),(" CE2"," CZ "),(" CE2"," HE2"),(" CZ "," HZ ")) , # phe
((" N "," CA "),(" N "," CD "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD "),(" CG ","1HG "),(" CG ","2HG "),(" CD ","1HD "),(" CD ","2HD ")) , # pro
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," OG "),(" CB ","1HB "),(" CB ","2HB "),(" OG "," HG ")) , # ser
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," OG1"),(" CB "," CG2"),(" CB "," HB "),(" OG1"," HG1"),(" CG2","1HG2"),(" CG2","2HG2"),(" CG2","3HG2")) , # thr
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD1"),(" CG "," CD2"),(" CD1"," NE1"),(" CD1"," HD1"),(" CD2"," CE2"),(" CD2"," CE3"),(" NE1"," CE2"),(" NE1"," HE1"),(" CE2"," CZ2"),(" CE3"," CZ3"),(" CE3"," HE3"),(" CZ2"," CH2"),(" CZ2"," HZ2"),(" CZ3"," CH2"),(" CZ3"," HZ3"),(" CH2"," HH2")) , # trp
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG "),(" CB ","1HB "),(" CB ","2HB "),(" CG "," CD1"),(" CG "," CD2"),(" CD1"," CE1"),(" CD1"," HD1"),(" CD2"," CE2"),(" CD2"," HD2"),(" CE1"," CZ "),(" CE1"," HE1"),(" CE2"," CZ "),(" CE2"," HE2"),(" CZ "," OH "),(" OH "," HH ")) , # tyr
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB "," CG1"),(" CB "," CG2"),(" CB "," HB "),(" CG1","1HG1"),(" CG1","2HG1"),(" CG1","3HG1"),(" CG2","1HG2"),(" CG2","2HG2"),(" CG2","3HG2")), # val
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB ","1HB "),(" CB ","2HB "),(" CB ","3HB ")) , # unk
((" N "," CA "),(" N "," H "),(" CA "," C "),(" CA "," CB "),(" CA "," HA "),(" C "," O "),(" CB ","1HB "),(" CB ","2HB "),(" CB ","3HB ")) , # mask
]
aa2type = [
("Nbb", "CAbb","CObb","OCbb","CH3", None, None, None, None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo", None, None, None, None, None, None, None, None), # ala
("Nbb", "CAbb","CObb","OCbb","CH2", "CH2", "CH2", "NtrR","aroC","Narg","Narg", None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hpol","Hpol","Hpol","Hpol","Hpol"), # arg
("Nbb", "CAbb","CObb","OCbb","CH2", "CNH2","ONH2","NH2O", None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hpol","Hpol", None, None, None, None, None, None, None), # asn
("Nbb", "CAbb","CObb","OCbb","CH2", "COO", "OOC", "OOC", None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo", None, None, None, None, None, None, None, None, None), # asp
("Nbb", "CAbb","CObb","OCbb","CH2", "SH1", None, None, None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","HS", None, None, None, None, None, None, None, None), # cys
("Nbb", "CAbb","CObb","OCbb","CH2", "CH2", "CNH2","ONH2","NH2O", None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hpol","Hpol", None, None, None, None, None), # gln
("Nbb", "CAbb","CObb","OCbb","CH2", "CH2", "COO", "OOC", "OOC", None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo", None, None, None, None, None, None, None), # glu
("Nbb", "CAbb","CObb","OCbb", None, None, None, None, None, None, None, None, None, None,"HNbb","Hapo","Hapo", None, None, None, None, None, None, None, None, None, None), # gly
("Nbb", "CAbb","CObb","OCbb","CH2", "CH0", "Nhis","aroC","aroC","Ntrp", None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hpol","Hapo","Hapo", None, None, None, None, None, None), # his
("Nbb", "CAbb","CObb","OCbb","CH1", "CH2", "CH3", "CH3", None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo", None, None), # ile
("Nbb", "CAbb","CObb","OCbb","CH2", "CH1", "CH3", "CH3", None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo", None, None), # leu
("Nbb", "CAbb","CObb","OCbb","CH2", "CH2", "CH2", "CH2", "Nlys", None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hpol","Hpol","Hpol"), # lys
("Nbb", "CAbb","CObb","OCbb","CH2", "CH2", "S", "CH3", None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo", None, None, None, None), # met
("Nbb", "CAbb","CObb","OCbb","CH2", "CH0", "aroC","aroC","aroC","aroC","aroC", None, None, None,"HNbb","Hapo","Hapo","Hapo","Haro","Haro","Haro","Haro","Haro", None, None, None, None), # phe
("Npro","CAbb","CObb","OCbb","CH2", "CH2", "CH2", None, None, None, None, None, None, None,"Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo", None, None, None, None, None, None), # pro
("Nbb", "CAbb","CObb","OCbb","CH2", "OH", None, None, None, None, None, None, None, None,"HNbb","Hpol","Hapo","Hapo","Hapo", None, None, None, None, None, None, None, None), # ser
("Nbb", "CAbb","CObb","OCbb","CH1", "OH", "CH3", None, None, None, None, None, None, None,"HNbb","Hpol","Hapo","Hapo","Hapo","Hapo","Hapo", None, None, None, None, None, None), # thr
("Nbb", "CAbb","CObb","OCbb","CH2", "CH0", "aroC","CH0", "Ntrp","CH0", "aroC","aroC","aroC","aroC","HNbb","Haro","Hapo","Hapo","Hapo","Hpol","Haro","Haro","Haro","Haro", None, None, None), # trp
("Nbb", "CAbb","CObb","OCbb","CH2", "CH0", "aroC","aroC","aroC","aroC","CH0", "OHY", None, None,"HNbb","Haro","Haro","Haro","Haro","Hapo","Hapo","Hapo","Hpol", None, None, None, None), # tyr
("Nbb", "CAbb","CObb","OCbb","CH1", "CH3", "CH3", None, None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo","Hapo", None, None, None, None), # val
("Nbb", "CAbb","CObb","OCbb","CH3", None, None, None, None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo", None, None, None, None, None, None, None, None), # unk
("Nbb", "CAbb","CObb","OCbb","CH3", None, None, None, None, None, None, None, None, None,"HNbb","Hapo","Hapo","Hapo","Hapo", None, None, None, None, None, None, None, None), # mask
]
# tip atom
aa2tip = [
" CB ", # ala
" CZ ", # arg
" ND2", # asn
" CG ", # asp
" SG ", # cys
" NE2", # gln
" CD ", # glu
" CA ", # gly
" NE2", # his
" CD1", # ile
" CG ", # leu
" NZ ", # lys
" SD ", # met
" CZ ", # phe
" CG ", # pro
" OG ", # ser
" OG1", # thr
" CH2", # trp
" OH ", # tyr
" CB ", # val
" CB ", # unknown (gap etc)
" CB " # masked
]
torsions=[
[ None, None, None, None ], # ala
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD "], [" CB "," CG "," CD "," NE "], [" CG "," CD "," NE "," CZ "] ], # arg
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," OD1"], None, None ], # asn
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," OD1"], None, None ], # asp
[ [" N "," CA "," CB "," SG "], [" CA "," CB "," SG "," HG "], None, None ], # cys
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD "], [" CB "," CG "," CD "," OE1"], None ], # gln
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD "], [" CB "," CG "," CD "," OE1"], None ], # glu
[ None, None, None, None ], # gly
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," ND1"], [" CD2"," CE1"," HE1"," NE2"], None ], # his (protonation handled as a pseudo-torsion)
[ [" N "," CA "," CB "," CG1"], [" CA "," CB "," CG1"," CD1"], None, None ], # ile
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD1"], None, None ], # leu
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD "], [" CB "," CG "," CD "," CE "], [" CG "," CD "," CE "," NZ "] ], # lys
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," SD "], [" CB "," CG "," SD "," CE "], None ], # met
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD1"], None, None ], # phe
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD "], [" CB "," CG "," CD ","1HD "], None ], # pro
[ [" N "," CA "," CB "," OG "], [" CA "," CB "," OG "," HG "], None, None ], # ser
[ [" N "," CA "," CB "," OG1"], [" CA "," CB "," OG1"," HG1"], None, None ], # thr
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD1"], None, None ], # trp
[ [" N "," CA "," CB "," CG "], [" CA "," CB "," CG "," CD1"], [" CE1"," CZ "," OH "," HH "], None ], # tyr
[ [" N "," CA "," CB "," CG1"], None, None, None ], # val
[ None, None, None, None ], # unk
[ None, None, None, None ], # mask
]
# ideal N, CA, C initial coordinates
init_N = torch.tensor([-0.5272, 1.3593, 0.000]).float()
init_CA = torch.zeros_like(init_N)
init_C = torch.tensor([1.5233, 0.000, 0.000]).float()
INIT_CRDS = torch.full((27, 3), np.nan)
INIT_CRDS[:3] = torch.stack((init_N, init_CA, init_C), dim=0) # (3,3)
norm_N = init_N / (torch.norm(init_N, dim=-1, keepdim=True) + 1e-5)
norm_C = init_C / (torch.norm(init_C, dim=-1, keepdim=True) + 1e-5)
cos_ideal_NCAC = torch.sum(norm_N*norm_C, dim=-1) # cosine of ideal N-CA-C bond angle
#fd Rosetta ideal coords
#fd - uses same "frame-building" as AF2
ideal_coords = [
[ # 0 ala
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3341, -0.4928, 0.9132)],
[' CB ', 8, (-0.5289,-0.7734,-1.1991)],
['1HB ', 8, (-0.1265, -1.7863, -1.1851)],
['2HB ', 8, (-1.6173, -0.8147, -1.1541)],
['3HB ', 8, (-0.2229, -0.2744, -2.1172)],
],
[ # 1 arg
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3467, -0.5055, 0.9018)],
[' CB ', 8, (-0.5042,-0.7698,-1.2118)],
['1HB ', 4, ( 0.3635, -0.5318, 0.8781)],
['2HB ', 4, ( 0.3639, -0.5323, -0.8789)],
[' CG ', 4, (0.6396,1.3794, 0.000)],
['1HG ', 5, (0.3639, -0.5139, 0.8900)],
['2HG ', 5, (0.3641, -0.5140, -0.8903)],
[' CD ', 5, (0.5492,1.3801, 0.000)],
['1HD ', 6, (0.3637, -0.5135, 0.8895)],
['2HD ', 6, (0.3636, -0.5134, -0.8893)],
[' NE ', 6, (0.5423,1.3491, 0.000)],
[' NH1', 7, (0.2012,2.2965, 0.000)],
[' NH2', 7, (2.0824,1.0030, 0.000)],
[' CZ ', 7, (0.7650,1.1090, 0.000)],
[' HE ', 7, (0.4701,-0.8955, 0.000)],
['1HH1', 7, (-0.8059,2.3776, 0.000)],
['1HH2', 7, (2.5160,0.0898, 0.000)],
['2HH1', 7, (0.7745,3.1277, 0.000)],
['2HH2', 7, (2.6554,1.8336, 0.000)],
],
[ # 2 asn
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3233, -0.4967, 0.9162)],
[' CB ', 8, (-0.5341,-0.7799,-1.1874)],
['1HB ', 4, ( 0.3641, -0.5327, 0.8795)],
['2HB ', 4, ( 0.3639, -0.5323, -0.8789)],
[' CG ', 4, (0.5778,1.3881, 0.000)],
[' ND2', 5, (0.5839,-1.1711, 0.000)],
[' OD1', 5, (0.6331,1.0620, 0.000)],
['1HD2', 5, (1.5825, -1.2322, 0.000)],
['2HD2', 5, (0.0323, -2.0046, 0.000)],
],
[ # 3 asp
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3233, -0.4967, 0.9162)],
[' CB ', 8, (-0.5162,-0.7757,-1.2144)],
['1HB ', 4, ( 0.3639, -0.5324, 0.8791)],
['2HB ', 4, ( 0.3640, -0.5325, -0.8792)],
[' CG ', 4, (0.5926,1.4028, 0.000)],
[' OD1', 5, (0.5746,1.0629, 0.000)],
[' OD2', 5, (0.5738,-1.0627, 0.000)],
],
[ # 4 cys
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3481, -0.5059, 0.9006)],
[' CB ', 8, (-0.5046,-0.7727,-1.2189)],
['1HB ', 4, ( 0.3639, -0.5324, 0.8791)],
['2HB ', 4, ( 0.3638, -0.5322, -0.8787)],
[' SG ', 4, (0.7386,1.6511, 0.000)],
[' HG ', 5, (0.1387,1.3221, 0.000)],
],
[ # 5 gln
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3363, -0.5013, 0.9074)],
[' CB ', 8, (-0.5226,-0.7776,-1.2109)],
['1HB ', 4, ( 0.3638, -0.5323, 0.8789)],
['2HB ', 4, ( 0.3638, -0.5322, -0.8788)],
[' CG ', 4, (0.6225,1.3857, 0.000)],
['1HG ', 5, ( 0.3531, -0.5156, 0.8931)],
['2HG ', 5, ( 0.3531, -0.5156, -0.8931)],
[' CD ', 5, (0.5788,1.4021, 0.000)],
[' NE2', 6, (0.5908,-1.1895, 0.000)],
[' OE1', 6, (0.6347,1.0584, 0.000)],
['1HE2', 6, (1.5825, -1.2525, 0.000)],
['2HE2', 6, (0.0380, -2.0229, 0.000)],
],
[ # 6 glu
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3363, -0.5013, 0.9074)],
[' CB ', 8, (-0.5197,-0.7737,-1.2137)],
['1HB ', 4, ( 0.3638, -0.5323, 0.8789)],
['2HB ', 4, ( 0.3638, -0.5322, -0.8788)],
[' CG ', 4, (0.6287,1.3862, 0.000)],
['1HG ', 5, ( 0.3531, -0.5156, 0.8931)],
['2HG ', 5, ( 0.3531, -0.5156, -0.8931)],
[' CD ', 5, (0.5850,1.3849, 0.000)],
[' OE1', 6, (0.5752,1.0618, 0.000)],
[' OE2', 6, (0.5741,-1.0635, 0.000)],
],
[ # 7 gly
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
['1HA ', 0, ( -0.3676, -0.5329, 0.8771)],
['2HA ', 0, ( -0.3674, -0.5325, -0.8765)],
],
[ # 8 his
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3299, -0.5180, 0.9001)],
[' CB ', 8, (-0.5163,-0.7809,-1.2129)],
['1HB ', 4, ( 0.3640, -0.5325, 0.8793)],
['2HB ', 4, ( 0.3637, -0.5321, -0.8786)],
[' CG ', 4, (0.6016,1.3710, 0.000)],
[' CD2', 5, (0.8918,-1.0184, 0.000)],
[' CE1', 5, (2.0299,0.8564, 0.000)],
[' HE1', 5, (2.8542, 1.5693, 0.000)],
[' HD2', 5, ( 0.6584, -2.0835, 0.000) ],
[' ND1', 6, (-1.8631, -1.0722, 0.000)],
[' NE2', 6, (-1.8625, 1.0707, 0.000)],
[' HE2', 6, (-1.5439, 2.0292, 0.000)],
],
[ # 9 ile
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3405, -0.5028, 0.9044)],
[' CB ', 8, (-0.5140,-0.7885,-1.2184)],
[' HB ', 4, (0.3637, -0.4714, 0.9125)],
[' CG1', 4, (0.5339,1.4348,0.000)],
[' CG2', 4, (0.5319,-0.7693,-1.1994)],
['1HG2', 4, (1.6215, -0.7588, -1.1842)],
['2HG2', 4, (0.1785, -1.7986, -1.1569)],
['3HG2', 4, (0.1773, -0.3016, -2.1180)],
[' CD1', 5, (0.6106,1.3829, 0.000)],
['1HG1', 5, (0.3637, -0.5338, 0.8774)],
['2HG1', 5, (0.3640, -0.5322, -0.8793)],
['1HD1', 5, (1.6978, 1.3006, 0.000)],
['2HD1', 5, (0.2873, 1.9236, -0.8902)],
['3HD1', 5, (0.2888, 1.9224, 0.8896)],
],
[ # 10 leu
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.525, -0.000, -0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3435, -0.5040, 0.9027)],
[' CB ', 8, (-0.5175,-0.7692,-1.2220)],
['1HB ', 4, ( 0.3473, -0.5346, 0.8827)],
['2HB ', 4, ( 0.3476, -0.5351, -0.8836)],
[' CG ', 4, (0.6652,1.3823, 0.000)],
[' CD1', 5, (0.5083,1.4353, 0.000)],
[' CD2', 5, (0.5079,-0.7600,1.2163)],
[' HG ', 5, (0.3640, -0.4825, -0.9075)],
['1HD1', 5, (1.5984, 1.4353, 0.000)],
['2HD1', 5, (0.1462, 1.9496, -0.8903)],
['3HD1', 5, (0.1459, 1.9494, 0.8895)],
['1HD2', 5, (1.5983, -0.7606, 1.2158)],
['2HD2', 5, (0.1456, -0.2774, 2.1243)],
['3HD2', 5, (0.1444, -1.7871, 1.1815)],
],
[ # 11 lys
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3335, -0.5005, 0.9097)],
['1HB ', 4, ( 0.3640, -0.5324, 0.8791)],
['2HB ', 4, ( 0.3639, -0.5324, -0.8790)],
[' CB ', 8, (-0.5259,-0.7785,-1.2069)],
['1HG ', 5, (0.3641, -0.5229, 0.8852)],
['2HG ', 5, (0.3637, -0.5227, -0.8841)],
[' CG ', 4, (0.6291,1.3869, 0.000)],
[' CD ', 5, (0.5526,1.4174, 0.000)],
['1HD ', 6, (0.3641, -0.5239, 0.8848)],
['2HD ', 6, (0.3638, -0.5219, -0.8850)],
[' CE ', 6, (0.5544,1.4170, 0.000)],
[' NZ ', 7, (0.5566,1.3801, 0.000)],
['1HE ', 7, (0.4199, -0.4638, 0.9482)],
['2HE ', 7, (0.4202, -0.4631, -0.8172)],
['1HZ ', 7, (1.6223, 1.3980, 0.0658)],
['2HZ ', 7, (0.2970, 1.9326, -0.7584)],
['3HZ ', 7, (0.2981, 1.9319, 0.8909)],
],
[ # 12 met
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3303, -0.4990, 0.9108)],
['1HB ', 4, ( 0.3635, -0.5318, 0.8781)],
['2HB ', 4, ( 0.3641, -0.5326, -0.8795)],
[' CB ', 8, (-0.5331,-0.7727,-1.2048)],
['1HG ', 5, (0.3637, -0.5256, 0.8823)],
['2HG ', 5, (0.3638, -0.5249, -0.8831)],
[' CG ', 4, (0.6298,1.3858,0.000)],
[' SD ', 5, (0.6953,1.6645,0.000)],
[' CE ', 6, (0.3383,1.7581,0.000)],
['1HE ', 6, (1.7054, 2.0532, -0.0063)],
['2HE ', 6, (0.1906, 2.3099, -0.9072)],
['3HE ', 6, (0.1917, 2.3792, 0.8720)],
],
[ # 13 phe
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3303, -0.4990, 0.9108)],
['1HB ', 4, ( 0.3635, -0.5318, 0.8781)],
['2HB ', 4, ( 0.3641, -0.5326, -0.8795)],
[' CB ', 8, (-0.5150,-0.7729,-1.2156)],
[' CG ', 4, (0.6060,1.3746, 0.000)],
[' CD1', 5, (0.7078,1.1928, 0.000)],
[' CD2', 5, (0.7084,-1.1920, 0.000)],
[' CE1', 5, (2.0900,1.1940, 0.000)],
[' CE2', 5, (2.0897,-1.1939, 0.000)],
[' CZ ', 5, (2.7809, 0.000, 0.000)],
[' HD1', 5, (0.1613, 2.1362, 0.000)],
[' HD2', 5, (0.1621, -2.1360, 0.000)],
[' HE1', 5, (2.6335, 2.1384, 0.000)],
[' HE2', 5, (2.6344, -2.1378, 0.000)],
[' HZ ', 5, (3.8700, 0.000, 0.000)],
],
[ # 14 pro
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' HA ', 0, (-0.3868, -0.5380, 0.8781)],
['1HB ', 4, ( 0.3762, -0.5355, 0.8842)],
['2HB ', 4, ( 0.3762, -0.5355, -0.8842)],
[' CB ', 8, (-0.5649,-0.5888,-1.2966)],
[' CG ', 4, (0.3657,1.4451,0.0000)],
[' CD ', 5, (0.3744,1.4582, 0.0)],
['1HG ', 5, (0.3798, -0.5348, 0.8830)],
['2HG ', 5, (0.3798, -0.5348, -0.8830)],
['1HD ', 6, (0.3798, -0.5348, 0.8830)],
['2HD ', 6, (0.3798, -0.5348, -0.8830)],
],
[ # 15 ser
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3425, -0.5041, 0.9048)],
['1HB ', 4, ( 0.3637, -0.5321, 0.8786)],
['2HB ', 4, ( 0.3636, -0.5319, -0.8782)],
[' CB ', 8, (-0.5146,-0.7595,-1.2073)],
[' OG ', 4, (0.5021,1.3081, 0.000)],
[' HG ', 5, (0.2647, 0.9230, 0.000)],
],
[ # 16 thr
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3364, -0.5015, 0.9078)],
[' HB ', 4, ( 0.3638, -0.5006, 0.8971)],
['1HG2', 4, ( 1.6231, -0.7142, -1.2097)],
['2HG2', 4, ( 0.1792, -1.7546, -1.2237)],
['3HG2', 4, ( 0.1808, -0.2222, -2.1269)],
[' CB ', 8, (-0.5172,-0.7952,-1.2130)],
[' CG2', 4, (0.5334,-0.7239,-1.2267)],
[' OG1', 4, (0.4804,1.3506,0.000)],
[' HG1', 5, (0.3194, 0.9056, 0.000)],
],
[ # 17 trp
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3436, -0.5042, 0.9031)],
['1HB ', 4, ( 0.3639, -0.5323, 0.8790)],
['2HB ', 4, ( 0.3638, -0.5322, -0.8787)],
[' CB ', 8, (-0.5136,-0.7712,-1.2173)],
[' CG ', 4, (0.5984,1.3741, 0.000)],
[' CD1', 5, (0.8151,1.0921, 0.000)],
[' CD2', 5, (0.8753,-1.1538, 0.000)],
[' CE2', 5, (2.1865,-0.6707, 0.000)],
[' CE3', 5, (0.6541,-2.5366, 0.000)],
[' NE1', 5, (2.1309,0.7003, 0.000)],
[' CH2', 5, (3.0315,-2.8930, 0.000)],
[' CZ2', 5, (3.2813,-1.5205, 0.000)],
[' CZ3', 5, (1.7521,-3.3888, 0.000)],
[' HD1', 5, (0.4722, 2.1252, 0.000)],
[' HE1', 5, ( 2.9291, 1.3191, 0.000)],
[' HE3', 5, (-0.3597, -2.9356, 0.000)],
[' HZ2', 5, (4.3053, -1.1462, 0.000)],
[' HZ3', 5, ( 1.5712, -4.4640, 0.000)],
[' HH2', 5, ( 3.8700, -3.5898, 0.000)],
],
[ # 18 tyr
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3305, -0.4992, 0.9112)],
['1HB ', 4, ( 0.3642, -0.5327, 0.8797)],
['2HB ', 4, ( 0.3637, -0.5321, -0.8785)],
[' CB ', 8, (-0.5305,-0.7799,-1.2051)],
[' CG ', 4, (0.6104,1.3840, 0.000)],
[' CD1', 5, (0.6936,1.2013, 0.000)],
[' CD2', 5, (0.6934,-1.2011, 0.000)],
[' CE1', 5, (2.0751,1.2013, 0.000)],
[' CE2', 5, (2.0748,-1.2011, 0.000)],
[' OH ', 5, (4.1408, 0.000, 0.000)],
[' CZ ', 5, (2.7648, 0.000, 0.000)],
[' HD1', 5, (0.1485, 2.1455, 0.000)],
[' HD2', 5, (0.1484, -2.1451, 0.000)],
[' HE1', 5, (2.6200, 2.1450, 0.000)],
[' HE2', 5, (2.6199, -2.1453, 0.000)],
[' HH ', 6, (0.3190, 0.9057, 0.000)],
],
[ # 19 val
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3497, -0.5068, 0.9002)],
[' CB ', 8, (-0.5105,-0.7712,-1.2317)],
[' CG1', 4, (0.5326,1.4252, 0.000)],
[' CG2', 4, (0.5177,-0.7693,1.2057)],
[' HB ', 4, (0.3541, -0.4754, -0.9148)],
['1HG1', 4, (1.6228, 1.4063, 0.000)],
['2HG1', 4, (0.1790, 1.9457, -0.8898)],
['3HG1', 4, (0.1798, 1.9453, 0.8903)],
['1HG2', 4, (1.6073, -0.7659, 1.1989)],
['2HG2', 4, (0.1586, -0.2971, 2.1203)],
['3HG2', 4, (0.1582, -1.7976, 1.1631)],
],
[ # 20 unk
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3341, -0.4928, 0.9132)],
[' CB ', 8, (-0.5289,-0.7734,-1.1991)],
['1HB ', 8, (-0.1265, -1.7863, -1.1851)],
['2HB ', 8, (-1.6173, -0.8147, -1.1541)],
['3HB ', 8, (-0.2229, -0.2744, -2.1172)],
],
[ # 21 mask
[' N ', 0, (-0.5272, 1.3593, 0.000)],
[' CA ', 0, (0.000, 0.000, 0.000)],
[' C ', 0, (1.5233, 0.000, 0.000)],
[' O ', 3, (0.6303, 1.0574, 0.000)],
[' H ', 2, (0.4920,-0.8821, 0.0000)],
[' HA ', 0, (-0.3341, -0.4928, 0.9132)],
[' CB ', 8, (-0.5289,-0.7734,-1.1991)],
['1HB ', 8, (-0.1265, -1.7863, -1.1851)],
['2HB ', 8, (-1.6173, -0.8147, -1.1541)],
['3HB ', 8, (-0.2229, -0.2744, -2.1172)],
],
]
| RFdiffusion-main | chemical.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from opt_einsum import contract as einsum
import torch.utils.checkpoint as checkpoint
from util import get_tips
from util_module import Dropout, create_custom_forward, rbf, init_lecun_normal
from Attention_module import Attention, FeedForwardLayer, AttentionWithBias
from Track_module import PairStr2Pair
from icecream import ic
import math
# Module contains classes and functions to generate initial embeddings
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
# Code from https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(max_positions) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class Timestep_emb(nn.Module):
def __init__(
self,
input_size,
output_size,
T,
use_motif_timestep=True
):
super(Timestep_emb, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.T = T
# get source for timestep embeddings at all t AND zero (for the motif)
self.source_embeddings = get_timestep_embedding(torch.arange(self.T+1), self.input_size)
self.source_embeddings.requires_grad = False
# Layers to use for projection
self.node_embedder = nn.Sequential(
nn.Linear(input_size, output_size, bias=False),
nn.ReLU(),
nn.Linear(output_size, output_size, bias=True),
nn.LayerNorm(output_size),
)
def get_init_emb(self, t, L, motif_mask):
"""
Calculates and stacks a timestep embedding to project
Parameters:
t (int, required): Current timestep
L (int, required): Length of protein
motif_mask (torch.tensor, required): Boolean mask where True denotes a fixed motif position
"""
assert t > 0, 't should be 1-indexed and cant have t=0'
t_emb = torch.clone(self.source_embeddings[t.squeeze()]).to(motif_mask.device)
zero_emb = torch.clone(self.source_embeddings[0]).to(motif_mask.device)
# timestep embedding for all residues
timestep_embedding = torch.stack([t_emb]*L)
# slice in motif zero timestep features
timestep_embedding[motif_mask] = zero_emb
return timestep_embedding
def forward(self, L, t, motif_mask):
"""
Constructs and projects a timestep embedding
"""
emb_in = self.get_init_emb(t,L,motif_mask)
emb_out = self.node_embedder(emb_in)
return emb_out
class PositionalEncoding2D(nn.Module):
# Add relative positional encoding to pair features
def __init__(self, d_model, minpos=-32, maxpos=32, p_drop=0.1):
super(PositionalEncoding2D, self).__init__()
self.minpos = minpos
self.maxpos = maxpos
self.nbin = abs(minpos)+maxpos+1
self.emb = nn.Embedding(self.nbin, d_model)
self.drop = nn.Dropout(p_drop)
def forward(self, x, idx):
bins = torch.arange(self.minpos, self.maxpos, device=x.device)
seqsep = idx[:,None,:] - idx[:,:,None] # (B, L, L)
#
ib = torch.bucketize(seqsep, bins).long() # (B, L, L)
emb = self.emb(ib) #(B, L, L, d_model)
x = x + emb # add relative positional encoding
return self.drop(x)
class MSA_emb(nn.Module):
# Get initial seed MSA embedding
def __init__(self, d_msa=256, d_pair=128, d_state=32, d_init=22+22+2+2,
minpos=-32, maxpos=32, p_drop=0.1, input_seq_onehot=False):
super(MSA_emb, self).__init__()
self.emb = nn.Linear(d_init, d_msa) # embedding for general MSA
self.emb_q = nn.Embedding(22, d_msa) # embedding for query sequence -- used for MSA embedding
self.emb_left = nn.Embedding(22, d_pair) # embedding for query sequence -- used for pair embedding
self.emb_right = nn.Embedding(22, d_pair) # embedding for query sequence -- used for pair embedding
self.emb_state = nn.Embedding(22, d_state)
self.drop = nn.Dropout(p_drop)
self.pos = PositionalEncoding2D(d_pair, minpos=minpos, maxpos=maxpos, p_drop=p_drop)
self.input_seq_onehot=input_seq_onehot
self.reset_parameter()
def reset_parameter(self):
self.emb = init_lecun_normal(self.emb)
self.emb_q = init_lecun_normal(self.emb_q)
self.emb_left = init_lecun_normal(self.emb_left)
self.emb_right = init_lecun_normal(self.emb_right)
self.emb_state = init_lecun_normal(self.emb_state)
nn.init.zeros_(self.emb.bias)
def forward(self, msa, seq, idx):
# Inputs:
# - msa: Input MSA (B, N, L, d_init)
# - seq: Input Sequence (B, L)
# - idx: Residue index
# Outputs:
# - msa: Initial MSA embedding (B, N, L, d_msa)
# - pair: Initial Pair embedding (B, L, L, d_pair)
N = msa.shape[1] # number of sequenes in MSA
# msa embedding
msa = self.emb(msa) # (B, N, L, d_model) # MSA embedding
# Sergey's one hot trick
tmp = (seq @ self.emb_q.weight).unsqueeze(1) # (B, 1, L, d_model) -- query embedding
msa = msa + tmp.expand(-1, N, -1, -1) # adding query embedding to MSA
msa = self.drop(msa)
# pair embedding
# Sergey's one hot trick
left = (seq @ self.emb_left.weight)[:,None] # (B, 1, L, d_pair)
right = (seq @ self.emb_right.weight)[:,:,None] # (B, L, 1, d_pair)
pair = left + right # (B, L, L, d_pair)
pair = self.pos(pair, idx) # add relative position
# state embedding
# Sergey's one hot trick
state = self.drop(seq @ self.emb_state.weight)
return msa, pair, state
class Extra_emb(nn.Module):
# Get initial seed MSA embedding
def __init__(self, d_msa=256, d_init=22+1+2, p_drop=0.1, input_seq_onehot=False):
super(Extra_emb, self).__init__()
self.emb = nn.Linear(d_init, d_msa) # embedding for general MSA
self.emb_q = nn.Embedding(22, d_msa) # embedding for query sequence
self.drop = nn.Dropout(p_drop)
self.input_seq_onehot=input_seq_onehot
self.reset_parameter()
def reset_parameter(self):
self.emb = init_lecun_normal(self.emb)
nn.init.zeros_(self.emb.bias)
def forward(self, msa, seq, idx):
# Inputs:
# - msa: Input MSA (B, N, L, d_init)
# - seq: Input Sequence (B, L)
# - idx: Residue index
# Outputs:
# - msa: Initial MSA embedding (B, N, L, d_msa)
N = msa.shape[1] # number of sequenes in MSA
msa = self.emb(msa) # (B, N, L, d_model) # MSA embedding
# Sergey's one hot trick
seq = (seq @ self.emb_q.weight).unsqueeze(1) # (B, 1, L, d_model) -- query embedding
"""
#TODO delete this once verified
if self.input_seq_onehot:
# Sergey's one hot trick
seq = (seq @ self.emb_q.weight).unsqueeze(1) # (B, 1, L, d_model) -- query embedding
else:
seq = self.emb_q(seq).unsqueeze(1) # (B, 1, L, d_model) -- query embedding
"""
msa = msa + seq.expand(-1, N, -1, -1) # adding query embedding to MSA
return self.drop(msa)
class TemplatePairStack(nn.Module):
# process template pairwise features
# use structure-biased attention
def __init__(self, n_block=2, d_templ=64, n_head=4, d_hidden=16, p_drop=0.25):
super(TemplatePairStack, self).__init__()
self.n_block = n_block
proc_s = [PairStr2Pair(d_pair=d_templ, n_head=n_head, d_hidden=d_hidden, p_drop=p_drop) for i in range(n_block)]
self.block = nn.ModuleList(proc_s)
self.norm = nn.LayerNorm(d_templ)
def forward(self, templ, rbf_feat, use_checkpoint=False):
B, T, L = templ.shape[:3]
templ = templ.reshape(B*T, L, L, -1)
for i_block in range(self.n_block):
if use_checkpoint:
templ = checkpoint.checkpoint(create_custom_forward(self.block[i_block]), templ, rbf_feat)
else:
templ = self.block[i_block](templ, rbf_feat)
return self.norm(templ).reshape(B, T, L, L, -1)
class TemplateTorsionStack(nn.Module):
def __init__(self, n_block=2, d_templ=64, n_head=4, d_hidden=16, p_drop=0.15):
super(TemplateTorsionStack, self).__init__()
self.n_block=n_block
self.proj_pair = nn.Linear(d_templ+36, d_templ)
proc_s = [AttentionWithBias(d_in=d_templ, d_bias=d_templ,
n_head=n_head, d_hidden=d_hidden) for i in range(n_block)]
self.row_attn = nn.ModuleList(proc_s)
proc_s = [FeedForwardLayer(d_templ, 4, p_drop=p_drop) for i in range(n_block)]
self.ff = nn.ModuleList(proc_s)
self.norm = nn.LayerNorm(d_templ)
def reset_parameter(self):
self.proj_pair = init_lecun_normal(self.proj_pair)
nn.init.zeros_(self.proj_pair.bias)
def forward(self, tors, pair, rbf_feat, use_checkpoint=False):
B, T, L = tors.shape[:3]
tors = tors.reshape(B*T, L, -1)
pair = pair.reshape(B*T, L, L, -1)
pair = torch.cat((pair, rbf_feat), dim=-1)
pair = self.proj_pair(pair)
for i_block in range(self.n_block):
if use_checkpoint:
tors = tors + checkpoint.checkpoint(create_custom_forward(self.row_attn[i_block]), tors, pair)
else:
tors = tors + self.row_attn[i_block](tors, pair)
tors = tors + self.ff[i_block](tors)
return self.norm(tors).reshape(B, T, L, -1)
class Templ_emb(nn.Module):
# Get template embedding
# Features are
# t2d:
# - 37 distogram bins + 6 orientations (43)
# - Mask (missing/unaligned) (1)
# t1d:
# - tiled AA sequence (20 standard aa + gap)
# - confidence (1)
# - contacting or note (1). NB this is added for diffusion model. Used only in complex training examples - 1 signifies that a residue in the non-diffused chain\
# i.e. the context, is in contact with the diffused chain.
#
#Added extra t1d dimension for contacting or not
def __init__(self, d_t1d=21+1+1, d_t2d=43+1, d_tor=30, d_pair=128, d_state=32,
n_block=2, d_templ=64,
n_head=4, d_hidden=16, p_drop=0.25):
super(Templ_emb, self).__init__()
# process 2D features
self.emb = nn.Linear(d_t1d*2+d_t2d, d_templ)
self.templ_stack = TemplatePairStack(n_block=n_block, d_templ=d_templ, n_head=n_head,
d_hidden=d_hidden, p_drop=p_drop)
self.attn = Attention(d_pair, d_templ, n_head, d_hidden, d_pair, p_drop=p_drop)
# process torsion angles
self.emb_t1d = nn.Linear(d_t1d+d_tor, d_templ)
self.proj_t1d = nn.Linear(d_templ, d_templ)
#self.tor_stack = TemplateTorsionStack(n_block=n_block, d_templ=d_templ, n_head=n_head,
# d_hidden=d_hidden, p_drop=p_drop)
self.attn_tor = Attention(d_state, d_templ, n_head, d_hidden, d_state, p_drop=p_drop)
self.reset_parameter()
def reset_parameter(self):
self.emb = init_lecun_normal(self.emb)
nn.init.zeros_(self.emb.bias)
nn.init.kaiming_normal_(self.emb_t1d.weight, nonlinearity='relu')
nn.init.zeros_(self.emb_t1d.bias)
self.proj_t1d = init_lecun_normal(self.proj_t1d)
nn.init.zeros_(self.proj_t1d.bias)
def forward(self, t1d, t2d, alpha_t, xyz_t, pair, state, use_checkpoint=False):
# Input
# - t1d: 1D template info (B, T, L, 23)
# - t2d: 2D template info (B, T, L, L, 44)
B, T, L, _ = t1d.shape
# Prepare 2D template features
left = t1d.unsqueeze(3).expand(-1,-1,-1,L,-1)
right = t1d.unsqueeze(2).expand(-1,-1,L,-1,-1)
#
templ = torch.cat((t2d, left, right), -1) # (B, T, L, L, 90)
templ = self.emb(templ) # Template templures (B, T, L, L, d_templ)
# process each template features
xyz_t = xyz_t.reshape(B*T, L, -1, 3)
rbf_feat = rbf(torch.cdist(xyz_t[:,:,1], xyz_t[:,:,1]))
templ = self.templ_stack(templ, rbf_feat, use_checkpoint=use_checkpoint) # (B, T, L,L, d_templ)
# Prepare 1D template torsion angle features
t1d = torch.cat((t1d, alpha_t), dim=-1) # (B, T, L, 23+30)
# process each template features
t1d = self.proj_t1d(F.relu_(self.emb_t1d(t1d)))
# mixing query state features to template state features
state = state.reshape(B*L, 1, -1)
t1d = t1d.permute(0,2,1,3).reshape(B*L, T, -1)
if use_checkpoint:
out = checkpoint.checkpoint(create_custom_forward(self.attn_tor), state, t1d, t1d)
out = out.reshape(B, L, -1)
else:
out = self.attn_tor(state, t1d, t1d).reshape(B, L, -1)
state = state.reshape(B, L, -1)
state = state + out
# mixing query pair features to template information (Template pointwise attention)
pair = pair.reshape(B*L*L, 1, -1)
templ = templ.permute(0, 2, 3, 1, 4).reshape(B*L*L, T, -1)
if use_checkpoint:
out = checkpoint.checkpoint(create_custom_forward(self.attn), pair, templ, templ)
out = out.reshape(B, L, L, -1)
else:
out = self.attn(pair, templ, templ).reshape(B, L, L, -1)
#
pair = pair.reshape(B, L, L, -1)
pair = pair + out
return pair, state
class Recycling(nn.Module):
def __init__(self, d_msa=256, d_pair=128, d_state=32):
super(Recycling, self).__init__()
self.proj_dist = nn.Linear(36+d_state*2, d_pair)
self.norm_state = nn.LayerNorm(d_state)
self.norm_pair = nn.LayerNorm(d_pair)
self.norm_msa = nn.LayerNorm(d_msa)
self.reset_parameter()
def reset_parameter(self):
self.proj_dist = init_lecun_normal(self.proj_dist)
nn.init.zeros_(self.proj_dist.bias)
def forward(self, seq, msa, pair, xyz, state):
B, L = pair.shape[:2]
state = self.norm_state(state)
#
left = state.unsqueeze(2).expand(-1,-1,L,-1)
right = state.unsqueeze(1).expand(-1,L,-1,-1)
# three anchor atoms
N = xyz[:,:,0]
Ca = xyz[:,:,1]
C = xyz[:,:,2]
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = torch.cross(b, c, dim=-1)
Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
dist = rbf(torch.cdist(Cb, Cb))
dist = torch.cat((dist, left, right), dim=-1)
dist = self.proj_dist(dist)
pair = dist + self.norm_pair(pair)
msa = self.norm_msa(msa)
return msa, pair, state
| RFdiffusion-main | Embeddings.py |
#!/usr/bin/env python
"""
Inference script.
To run with base.yaml as the config,
> python run_inference.py
To specify a different config,
> python run_inference.py --config-name symmetry
where symmetry can be the filename of any other config (without .yaml extension)
See https://hydra.cc/docs/advanced/hydra-command-line-flags/ for more options.
"""
import re
import os, time, pickle
import torch
from omegaconf import DictConfig, OmegaConf
import hydra
import logging
from util import writepdb_multi, writepdb
from inference import utils as iu
from icecream import ic
from hydra.core.hydra_config import HydraConfig
import numpy as np
import random
import glob
def make_deterministic(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
@hydra.main(version_base=None, config_path="config/inference", config_name="base")
def main(conf: HydraConfig) -> None:
log = logging.getLogger(__name__)
if conf.inference.deterministic:
make_deterministic()
# Initialize sampler and target/contig.
sampler = iu.sampler_selector(conf)
# Loop over number of designs to sample.
design_startnum = sampler.inf_conf.design_startnum
if sampler.inf_conf.design_startnum == -1:
existing = glob.glob(sampler.inf_conf.output_prefix + "*.pdb")
indices = [-1]
for e in existing:
print(e)
m = re.match(".*_(\d+)\.pdb$", e)
print(m)
if not m:
continue
m = m.groups()[0]
indices.append(int(m))
design_startnum = max(indices) + 1
for i_des in range(design_startnum, design_startnum + sampler.inf_conf.num_designs):
if conf.inference.deterministic:
make_deterministic(i_des)
start_time = time.time()
out_prefix = f"{sampler.inf_conf.output_prefix}_{i_des}"
log.info(f"Making design {out_prefix}")
if sampler.inf_conf.cautious and os.path.exists(out_prefix + ".pdb"):
log.info(
f"(cautious mode) Skipping this design because {out_prefix}.pdb already exists."
)
continue
x_init, seq_init = sampler.sample_init()
denoised_xyz_stack = []
px0_xyz_stack = []
seq_stack = []
plddt_stack = []
x_t = torch.clone(x_init)
seq_t = torch.clone(seq_init)
# Loop over number of reverse diffusion time steps.
for t in range(int(sampler.t_step_input), sampler.inf_conf.final_step - 1, -1):
px0, x_t, seq_t, plddt = sampler.sample_step(
t=t, x_t=x_t, seq_init=seq_t, final_step=sampler.inf_conf.final_step
)
px0_xyz_stack.append(px0)
denoised_xyz_stack.append(x_t)
seq_stack.append(seq_t)
plddt_stack.append(plddt[0]) # remove singleton leading dimension
# Flip order for better visualization in pymol
denoised_xyz_stack = torch.stack(denoised_xyz_stack)
denoised_xyz_stack = torch.flip(
denoised_xyz_stack,
[
0,
],
)
px0_xyz_stack = torch.stack(px0_xyz_stack)
px0_xyz_stack = torch.flip(
px0_xyz_stack,
[
0,
],
)
# For logging -- don't flip
plddt_stack = torch.stack(plddt_stack)
# Save outputs
os.makedirs(os.path.dirname(out_prefix), exist_ok=True)
final_seq = seq_stack[-1]
# Output glycines, except for motif region
final_seq = torch.where(
torch.argmax(seq_init, dim=-1) == 21, 7, torch.argmax(seq_init, dim=-1)
) # 7 is glycine
bfacts = torch.ones_like(final_seq.squeeze())
# make bfact=0 for diffused coordinates
bfacts[torch.where(torch.argmax(seq_init, dim=-1) == 21, True, False)] = 0
# pX0 last step
out = f"{out_prefix}.pdb"
# Now don't output sidechains
writepdb(
out,
denoised_xyz_stack[0, :, :4],
final_seq,
sampler.binderlen,
chain_idx=sampler.chain_idx,
bfacts=bfacts,
)
# run metadata
trb = dict(
config=OmegaConf.to_container(sampler._conf, resolve=True),
plddt=plddt_stack.cpu().numpy(),
device=torch.cuda.get_device_name(torch.cuda.current_device())
if torch.cuda.is_available()
else "CPU",
time=time.time() - start_time,
)
if hasattr(sampler, "contig_map"):
for key, value in sampler.contig_map.get_mappings().items():
trb[key] = value
with open(f"{out_prefix}.trb", "wb") as f_out:
pickle.dump(trb, f_out)
if sampler.inf_conf.write_trajectory:
# trajectory pdbs
traj_prefix = (
os.path.dirname(out_prefix) + "/traj/" + os.path.basename(out_prefix)
)
os.makedirs(os.path.dirname(traj_prefix), exist_ok=True)
out = f"{traj_prefix}_Xt-1_traj.pdb"
writepdb_multi(
out,
denoised_xyz_stack,
bfacts,
final_seq.squeeze(),
use_hydrogens=False,
backbone_only=False,
chain_ids=sampler.chain_idx,
)
out = f"{traj_prefix}_pX0_traj.pdb"
writepdb_multi(
out,
px0_xyz_stack,
bfacts,
final_seq.squeeze(),
use_hydrogens=False,
backbone_only=False,
chain_ids=sampler.chain_idx,
)
log.info(f"Finished design in {(time.time()-start_time)/60:.2f} minutes")
if __name__ == "__main__":
main()
| RFdiffusion-main | run_inference.py |
"""SO(3) diffusion methods."""
import numpy as np
import os
from functools import cached_property
import torch
from scipy.spatial.transform import Rotation
import scipy.linalg
### First define geometric operations on the SO3 manifold
# hat map from vector space R^3 to Lie algebra so(3)
def hat(v):
hat_v = torch.zeros([v.shape[0], 3, 3])
hat_v[:, 0, 1], hat_v[:, 0, 2], hat_v[:, 1, 2] = -v[:, 2], v[:, 1], -v[:, 0]
return hat_v + -hat_v.transpose(2, 1)
# Logarithmic map from SO(3) to R^3 (i.e. rotation vector)
def Log(R): return torch.tensor(Rotation.from_matrix(R.numpy()).as_rotvec())
# logarithmic map from SO(3) to so(3), this is the matrix logarithm
def log(R): return hat(Log(R))
# Exponential map from vector space of so(3) to SO(3), this is the matrix
# exponential combined with the "hat" map
def Exp(A): return torch.tensor(Rotation.from_rotvec(A.numpy()).as_matrix())
# Angle of rotation SO(3) to R^+
def Omega(R): return np.linalg.norm(log(R), axis=[-2, -1])/np.sqrt(2.)
L_default = 2000
def f_igso3(omega, t, L=L_default):
"""Truncated sum of IGSO(3) distribution.
This function approximates the power series in equation 5 of
"DENOISING DIFFUSION PROBABILISTIC MODELS ON SO(3) FOR ROTATIONAL
ALIGNMENT"
Leach et al. 2022
This expression diverges from the expression in Leach in that here, sigma =
sqrt(2) * eps, if eps_leach were the scale parameter of the IGSO(3).
With this reparameterization, IGSO(3) agrees with the Brownian motion on
SO(3) with t=sigma^2 when defined for the canonical inner product on SO3,
<u, v>_SO3 = Trace(u v^T)/2
Args:
omega: i.e. the angle of rotation associated with rotation matrix
t: variance parameter of IGSO(3), maps onto time in Brownian motion
L: Truncation level
"""
ls = torch.arange(L)[None] # of shape [1, L]
return ((2*ls + 1) * torch.exp(-ls*(ls+1)*t/2) *
torch.sin(omega[:, None]*(ls+1/2)) / torch.sin(omega[:, None]/2)).sum(dim=-1)
def d_logf_d_omega(omega, t, L=L_default):
omega = torch.tensor(omega, requires_grad=True)
log_f = torch.log(f_igso3(omega, t, L))
return torch.autograd.grad(log_f.sum(), omega)[0].numpy()
# IGSO3 density with respect to the volume form on SO(3)
def igso3_density(Rt, t, L=L_default):
return f_igso3(torch.tensor(Omega(Rt)), t, L).numpy()
def igso3_density_angle(omega, t, L=L_default):
return f_igso3(torch.tensor(omega), t, L).numpy()*(1-np.cos(omega))/np.pi
# grad_R log IGSO3(R; I_3, t)
def igso3_score(R, t, L=L_default):
omega = Omega(R)
unit_vector = np.einsum('Nij,Njk->Nik', R, log(R))/omega[:, None, None]
return unit_vector * d_logf_d_omega(omega, t, L)[:, None, None]
def calculate_igso3(*, num_sigma, num_omega, min_sigma, max_sigma, L=L_default):
"""calculate_igso3 pre-computes numerical approximations to the IGSO3 cdfs
and score norms and expected squared score norms.
Args:
num_sigma: number of different sigmas for which to compute igso3
quantities.
num_omega: number of point in the discretization in the angle of
rotation.
min_sigma, max_sigma: the upper and lower ranges for the angle of
rotation on which to consider the IGSO3 distribution. This cannot
be too low or it will create numerical instability.
"""
# Discretize omegas for calculating CDFs. Skip omega=0.
discrete_omega = np.linspace(0, np.pi, num_omega+1)[1:]
# Exponential noise schedule. This choice is closely tied to the
# scalings used when simulating the reverse time SDE. For each step n,
# discrete_sigma[n] = min_eps^(1-n/num_eps) * max_eps^(n/num_eps)
discrete_sigma = 10 ** np.linspace(np.log10(min_sigma), np.log10(max_sigma), num_sigma + 1)[1:]
# Compute the pdf and cdf values for the marginal distribution of the angle
# of rotation (which is needed for sampling)
pdf_vals = np.asarray(
[igso3_density_angle(discrete_omega, sigma**2) for sigma in discrete_sigma])
cdf_vals = np.asarray(
[pdf.cumsum() / num_omega * np.pi for pdf in pdf_vals])
# Compute the norms of the scores. This are used to scale the rotation axis when
# computing the score as a vector.
score_norm = np.asarray(
[d_logf_d_omega(discrete_omega, sigma**2) for sigma in discrete_sigma])
# Compute the standard deviation of the score norm for each sigma
exp_score_norms = np.sqrt(
np.sum(
score_norm**2 * pdf_vals, axis=1) / np.sum(
pdf_vals, axis=1))
return {
'cdf': cdf_vals,
'score_norm': score_norm,
'exp_score_norms': exp_score_norms,
'discrete_omega': discrete_omega,
'discrete_sigma': discrete_sigma,
}
| RFdiffusion-main | igso3.py |
import torch
import torch.nn as nn
class DistanceNetwork(nn.Module):
def __init__(self, n_feat, p_drop=0.1):
super(DistanceNetwork, self).__init__()
#
self.proj_symm = nn.Linear(n_feat, 37*2)
self.proj_asymm = nn.Linear(n_feat, 37+19)
self.reset_parameter()
def reset_parameter(self):
# initialize linear layer for final logit prediction
nn.init.zeros_(self.proj_symm.weight)
nn.init.zeros_(self.proj_asymm.weight)
nn.init.zeros_(self.proj_symm.bias)
nn.init.zeros_(self.proj_asymm.bias)
def forward(self, x):
# input: pair info (B, L, L, C)
# predict theta, phi (non-symmetric)
logits_asymm = self.proj_asymm(x)
logits_theta = logits_asymm[:,:,:,:37].permute(0,3,1,2)
logits_phi = logits_asymm[:,:,:,37:].permute(0,3,1,2)
# predict dist, omega
logits_symm = self.proj_symm(x)
logits_symm = logits_symm + logits_symm.permute(0,2,1,3)
logits_dist = logits_symm[:,:,:,:37].permute(0,3,1,2)
logits_omega = logits_symm[:,:,:,37:].permute(0,3,1,2)
return logits_dist, logits_omega, logits_theta, logits_phi
class MaskedTokenNetwork(nn.Module):
def __init__(self, n_feat, p_drop=0.1):
super(MaskedTokenNetwork, self).__init__()
self.proj = nn.Linear(n_feat, 21)
self.reset_parameter()
def reset_parameter(self):
nn.init.zeros_(self.proj.weight)
nn.init.zeros_(self.proj.bias)
def forward(self, x):
B, N, L = x.shape[:3]
logits = self.proj(x).permute(0,3,1,2).reshape(B, -1, N*L)
return logits
class LDDTNetwork(nn.Module):
def __init__(self, n_feat, n_bin_lddt=50):
super(LDDTNetwork, self).__init__()
self.proj = nn.Linear(n_feat, n_bin_lddt)
self.reset_parameter()
def reset_parameter(self):
nn.init.zeros_(self.proj.weight)
nn.init.zeros_(self.proj.bias)
def forward(self, x):
logits = self.proj(x) # (B, L, 50)
return logits.permute(0,2,1)
class ExpResolvedNetwork(nn.Module):
def __init__(self, d_msa, d_state, p_drop=0.1):
super(ExpResolvedNetwork, self).__init__()
self.norm_msa = nn.LayerNorm(d_msa)
self.norm_state = nn.LayerNorm(d_state)
self.proj = nn.Linear(d_msa+d_state, 1)
self.reset_parameter()
def reset_parameter(self):
nn.init.zeros_(self.proj.weight)
nn.init.zeros_(self.proj.bias)
def forward(self, seq, state):
B, L = seq.shape[:2]
seq = self.norm_msa(seq)
state = self.norm_state(state)
feat = torch.cat((seq, state), dim=-1)
logits = self.proj(feat)
return logits.reshape(B, L)
| RFdiffusion-main | AuxiliaryPredictor.py |
import traceback
import os
from inspect import signature
import pickle
import datetime
def pickle_function_call_wrapper(func, output_dir='pickled_inputs'):
i = 0
os.makedirs(output_dir)
# pickle.dump({'args': args, 'kwargs': kwargs}, fh)
def wrapper(*args, **kwargs):
"""
Wrap the original function call to print the arguments before
calling the intended function
"""
nonlocal i
i += 1
func_sig = signature(func)
# Create the argument binding so we can determine what
# parameters are given what values
argument_binding = func_sig.bind(*args, **kwargs)
argument_map = argument_binding.arguments
# Perform the print so that it shows the function name
# and arguments as a dictionary
path = os.path.join(output_dir, f'{i:05d}.pkl')
print(f"logging {func.__name__} arguments: {[k for k in argument_map]} to {path}")
argument_map['stack'] = traceback.format_stack()
for k, v in argument_map.items():
if hasattr(v, 'detach'):
argument_map[k] = v.cpu().detach()
with open(path, 'wb') as fh:
pickle.dump(argument_map, fh)
return func(*args, **kwargs)
return wrapper
def wrap_it(wrapper, instance, method, **kwargs):
class_method = getattr(instance, method)
wrapped_method = wrapper(class_method, **kwargs)
setattr(instance, method, wrapped_method)
def pickle_function_call(instance, method, subdir):
output_dir = os.path.join(os.getcwd(), 'pickled_inputs', subdir, datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
wrap_it(pickle_function_call_wrapper, instance, method, output_dir=output_dir)
return output_dir
# For testing
if __name__=='__main__':
import glob
class Dog:
def __init__(self, name):
self.name = name
def bark(self, arg, kwarg=None):
print(f'{self.name}:{arg}:{kwarg}')
dog = Dog('fido')
dog.bark('ruff')
output_dir = pickle_function_call(dog, 'bark', 'debugging')
dog.bark('ruff', kwarg='wooof')
for p in glob.glob(os.path.join(output_dir, '*')):
print(p)
with open(p, 'rb') as fh:
print(pickle.load(fh))
| RFdiffusion-main | model_input_logger.py |
import sys
import numpy as np
import random
from icecream import ic
class ContigMap:
"""
Class for doing mapping.
Inherited from Inpainting. To update at some point.
Supports multichain or multiple crops from a single receptor chain.
Also supports indexing jump (+200) or not, based on contig input.
Default chain outputs are inpainted chains as A (and B, C etc if multiple chains), and all fragments of receptor chain on the next one (generally B)
Output chains can be specified. Sequence must be the same number of elements as in contig string
"""
def __init__(
self,
parsed_pdb,
contigs=None,
inpaint_seq=None,
inpaint_str=None,
length=None,
ref_idx=None,
hal_idx=None,
idx_rf=None,
inpaint_seq_tensor=None,
inpaint_str_tensor=None,
topo=False,
provide_seq=None,
):
# sanity checks
if contigs is None and ref_idx is None:
sys.exit("Must either specify a contig string or precise mapping")
if idx_rf is not None or hal_idx is not None or ref_idx is not None:
if idx_rf is None or hal_idx is None or ref_idx is None:
sys.exit(
"If you're specifying specific contig mappings, the reference and output positions must be specified, AND the indexing for RoseTTAFold (idx_rf)"
)
self.chain_order = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if length is not None:
if "-" not in length:
self.length = [int(length), int(length) + 1]
else:
self.length = [int(length.split("-")[0]), int(length.split("-")[1]) + 1]
else:
self.length = None
self.ref_idx = ref_idx
self.hal_idx = hal_idx
self.idx_rf = idx_rf
self.inpaint_seq = (
"/".join(inpaint_seq).split("/") if inpaint_seq is not None else None
)
self.inpaint_str = (
"/".join(inpaint_str).split("/") if inpaint_str is not None else None
)
self.inpaint_seq_tensor = inpaint_seq_tensor
self.inpaint_str_tensor = inpaint_str_tensor
self.parsed_pdb = parsed_pdb
self.topo = topo
if ref_idx is None:
# using default contig generation, which outputs in rosetta-like format
self.contigs = contigs
(
self.sampled_mask,
self.contig_length,
self.n_inpaint_chains,
) = self.get_sampled_mask()
self.receptor_chain = self.chain_order[self.n_inpaint_chains]
(
self.receptor,
self.receptor_hal,
self.receptor_rf,
self.inpaint,
self.inpaint_hal,
self.inpaint_rf,
) = self.expand_sampled_mask()
self.ref = self.inpaint + self.receptor
self.hal = self.inpaint_hal + self.receptor_hal
self.rf = self.inpaint_rf + self.receptor_rf
else:
# specifying precise mappings
self.ref = ref_idx
self.hal = hal_idx
self.rf = rf_idx
self.mask_1d = [False if i == ("_", "_") else True for i in self.ref]
# take care of sequence and structure masking
if self.inpaint_seq_tensor is None:
if self.inpaint_seq is not None:
self.inpaint_seq = self.get_inpaint_seq_str(self.inpaint_seq)
else:
self.inpaint_seq = np.array(
[True if i != ("_", "_") else False for i in self.ref]
)
else:
self.inpaint_seq = self.inpaint_seq_tensor
if self.inpaint_str_tensor is None:
if self.inpaint_str is not None:
self.inpaint_str = self.get_inpaint_seq_str(self.inpaint_str)
else:
self.inpaint_str = np.array(
[True if i != ("_", "_") else False for i in self.ref]
)
else:
self.inpaint_str = self.inpaint_str_tensor
# get 0-indexed input/output (for trb file)
(
self.ref_idx0,
self.hal_idx0,
self.ref_idx0_inpaint,
self.hal_idx0_inpaint,
self.ref_idx0_receptor,
self.hal_idx0_receptor,
) = self.get_idx0()
self.con_ref_pdb_idx = [i for i in self.ref if i != ("_", "_")]
# Handle provide seq. This is zero-indexed, and used only for partial diffusion
if provide_seq is not None:
for i in provide_seq[0].split(","):
if "-" in i:
self.inpaint_seq[
int(i.split("-")[0]) : int(i.split("-")[1]) + 1
] = True
else:
self.inpaint_seq[int(i)] = True
def get_sampled_mask(self):
"""
Function to get a sampled mask from a contig.
"""
length_compatible = False
count = 0
while length_compatible is False:
inpaint_chains = 0
contig_list = self.contigs[0].strip().split()
sampled_mask = []
sampled_mask_length = 0
# allow receptor chain to be last in contig string
if all([i[0].isalpha() for i in contig_list[-1].split("/")]):
contig_list[-1] = f"{contig_list[-1]}/0"
for con in contig_list:
if (
all([i[0].isalpha() for i in con.split("/")[:-1]])
and con.split("/")[-1] == "0"
) or self.topo is True:
# receptor chain
sampled_mask.append(con)
else:
inpaint_chains += 1
# chain to be inpainted. These are the only chains that count towards the length of the contig
subcons = con.split("/")
subcon_out = []
for subcon in subcons:
if subcon[0].isalpha():
subcon_out.append(subcon)
if "-" in subcon:
sampled_mask_length += (
int(subcon.split("-")[1])
- int(subcon.split("-")[0][1:])
+ 1
)
else:
sampled_mask_length += 1
else:
if "-" in subcon:
length_inpaint = random.randint(
int(subcon.split("-")[0]), int(subcon.split("-")[1])
)
subcon_out.append(f"{length_inpaint}-{length_inpaint}")
sampled_mask_length += length_inpaint
elif subcon == "0":
subcon_out.append("0")
else:
length_inpaint = int(subcon)
subcon_out.append(f"{length_inpaint}-{length_inpaint}")
sampled_mask_length += int(subcon)
sampled_mask.append("/".join(subcon_out))
# check length is compatible
if self.length is not None:
if (
sampled_mask_length >= self.length[0]
and sampled_mask_length < self.length[1]
):
length_compatible = True
else:
length_compatible = True
count += 1
if count == 100000: # contig string incompatible with this length
sys.exit("Contig string incompatible with --length range")
return sampled_mask, sampled_mask_length, inpaint_chains
def expand_sampled_mask(self):
chain_order = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
receptor = []
inpaint = []
receptor_hal = []
inpaint_hal = []
receptor_idx = 1
inpaint_idx = 1
inpaint_chain_idx = -1
receptor_chain_break = []
inpaint_chain_break = []
for con in self.sampled_mask:
if (
all([i[0].isalpha() for i in con.split("/")[:-1]])
and con.split("/")[-1] == "0"
) or self.topo is True:
# receptor chain
subcons = con.split("/")[:-1]
assert all(
[i[0] == subcons[0][0] for i in subcons]
), "If specifying fragmented receptor in a single block of the contig string, they MUST derive from the same chain"
assert all(
int(subcons[i].split("-")[0][1:])
< int(subcons[i + 1].split("-")[0][1:])
for i in range(len(subcons) - 1)
), "If specifying multiple fragments from the same chain, pdb indices must be in ascending order!"
for idx, subcon in enumerate(subcons):
ref_to_add = [
(subcon[0], i)
for i in np.arange(
int(subcon.split("-")[0][1:]), int(subcon.split("-")[1]) + 1
)
]
receptor.extend(ref_to_add)
receptor_hal.extend(
[
(self.receptor_chain, i)
for i in np.arange(
receptor_idx, receptor_idx + len(ref_to_add)
)
]
)
receptor_idx += len(ref_to_add)
if idx != len(subcons) - 1:
idx_jump = (
int(subcons[idx + 1].split("-")[0][1:])
- int(subcon.split("-")[1])
- 1
)
receptor_chain_break.append(
(receptor_idx - 1, idx_jump)
) # actual chain break in pdb chain
else:
receptor_chain_break.append(
(receptor_idx - 1, 200)
) # 200 aa chain break
else:
inpaint_chain_idx += 1
for subcon in con.split("/"):
if subcon[0].isalpha():
ref_to_add = [
(subcon[0], i)
for i in np.arange(
int(subcon.split("-")[0][1:]),
int(subcon.split("-")[1]) + 1,
)
]
inpaint.extend(ref_to_add)
inpaint_hal.extend(
[
(chain_order[inpaint_chain_idx], i)
for i in np.arange(
inpaint_idx, inpaint_idx + len(ref_to_add)
)
]
)
inpaint_idx += len(ref_to_add)
else:
inpaint.extend([("_", "_")] * int(subcon.split("-")[0]))
inpaint_hal.extend(
[
(chain_order[inpaint_chain_idx], i)
for i in np.arange(
inpaint_idx, inpaint_idx + int(subcon.split("-")[0])
)
]
)
inpaint_idx += int(subcon.split("-")[0])
inpaint_chain_break.append((inpaint_idx - 1, 200))
if self.topo is True or inpaint_hal == []:
receptor_hal = [(i[0], i[1]) for i in receptor_hal]
else:
receptor_hal = [
(i[0], i[1] + inpaint_hal[-1][1]) for i in receptor_hal
] # rosetta-like numbering
# get rf indexes, with chain breaks
inpaint_rf = np.arange(0, len(inpaint))
receptor_rf = np.arange(len(inpaint) + 200, len(inpaint) + len(receptor) + 200)
for ch_break in inpaint_chain_break[:-1]:
receptor_rf[:] += 200
inpaint_rf[ch_break[0] :] += ch_break[1]
for ch_break in receptor_chain_break[:-1]:
receptor_rf[ch_break[0] :] += ch_break[1]
return (
receptor,
receptor_hal,
receptor_rf.tolist(),
inpaint,
inpaint_hal,
inpaint_rf.tolist(),
)
def get_inpaint_seq_str(self, inpaint_s):
"""
function to generate inpaint_str or inpaint_seq masks specific to this contig
"""
s_mask = np.copy(self.mask_1d)
inpaint_s_list = []
for i in inpaint_s:
if "-" in i:
inpaint_s_list.extend(
[
(i[0], p)
for p in range(
int(i.split("-")[0][1:]), int(i.split("-")[1]) + 1
)
]
)
else:
inpaint_s_list.append((i[0], int(i[1:])))
for res in inpaint_s_list:
if res in self.ref:
s_mask[self.ref.index(res)] = False # mask this residue
return np.array(s_mask)
def get_idx0(self):
ref_idx0 = []
hal_idx0 = []
ref_idx0_inpaint = []
hal_idx0_inpaint = []
ref_idx0_receptor = []
hal_idx0_receptor = []
for idx, val in enumerate(self.ref):
if val != ("_", "_"):
assert val in self.parsed_pdb["pdb_idx"], f"{val} is not in pdb file!"
hal_idx0.append(idx)
ref_idx0.append(self.parsed_pdb["pdb_idx"].index(val))
for idx, val in enumerate(self.inpaint):
if val != ("_", "_"):
hal_idx0_inpaint.append(idx)
ref_idx0_inpaint.append(self.parsed_pdb["pdb_idx"].index(val))
for idx, val in enumerate(self.receptor):
if val != ("_", "_"):
hal_idx0_receptor.append(idx)
ref_idx0_receptor.append(self.parsed_pdb["pdb_idx"].index(val))
return (
ref_idx0,
hal_idx0,
ref_idx0_inpaint,
hal_idx0_inpaint,
ref_idx0_receptor,
hal_idx0_receptor,
)
def get_mappings(self):
mappings = {}
mappings["con_ref_pdb_idx"] = [i for i in self.inpaint if i != ("_", "_")]
mappings["con_hal_pdb_idx"] = [
self.inpaint_hal[i]
for i in range(len(self.inpaint_hal))
if self.inpaint[i] != ("_", "_")
]
mappings["con_ref_idx0"] = np.array(self.ref_idx0_inpaint)
mappings["con_hal_idx0"] = np.array(self.hal_idx0_inpaint)
if self.inpaint != self.ref:
mappings["complex_con_ref_pdb_idx"] = [
i for i in self.ref if i != ("_", "_")
]
mappings["complex_con_hal_pdb_idx"] = [
self.hal[i] for i in range(len(self.hal)) if self.ref[i] != ("_", "_")
]
mappings["receptor_con_ref_pdb_idx"] = [
i for i in self.receptor if i != ("_", "_")
]
mappings["receptor_con_hal_pdb_idx"] = [
self.receptor_hal[i]
for i in range(len(self.receptor_hal))
if self.receptor[i] != ("_", "_")
]
mappings["complex_con_ref_idx0"] = np.array(self.ref_idx0)
mappings["complex_con_hal_idx0"] = np.array(self.hal_idx0)
mappings["receptor_con_ref_idx0"] = np.array(self.ref_idx0_receptor)
mappings["receptor_con_hal_idx0"] = np.array(self.hal_idx0_receptor)
mappings["inpaint_str"] = self.inpaint_str
mappings["inpaint_seq"] = self.inpaint_seq
mappings["sampled_mask"] = self.sampled_mask
mappings["mask_1d"] = self.mask_1d
return mappings
| RFdiffusion-main | contigs.py |
import numpy as np
import scipy
import scipy.spatial
# calculate dihedral angles defined by 4 sets of points
def get_dihedrals(a, b, c, d):
b0 = -1.0*(b - a)
b1 = c - b
b2 = d - c
b1 /= np.linalg.norm(b1, axis=-1)[:,None]
v = b0 - np.sum(b0*b1, axis=-1)[:,None]*b1
w = b2 - np.sum(b2*b1, axis=-1)[:,None]*b1
x = np.sum(v*w, axis=-1)
y = np.sum(np.cross(b1, v)*w, axis=-1)
return np.arctan2(y, x)
# calculate planar angles defined by 3 sets of points
def get_angles(a, b, c):
v = a - b
v /= np.linalg.norm(v, axis=-1)[:,None]
w = c - b
w /= np.linalg.norm(w, axis=-1)[:,None]
x = np.sum(v*w, axis=1)
#return np.arccos(x)
return np.arccos(np.clip(x, -1.0, 1.0))
# get 6d coordinates from x,y,z coords of N,Ca,C atoms
def get_coords6d(xyz, dmax):
nres = xyz.shape[1]
# three anchor atoms
N = xyz[0]
Ca = xyz[1]
C = xyz[2]
# recreate Cb given N,Ca,C
b = Ca - N
c = C - Ca
a = np.cross(b, c)
Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
# fast neighbors search to collect all
# Cb-Cb pairs within dmax
kdCb = scipy.spatial.cKDTree(Cb)
indices = kdCb.query_ball_tree(kdCb, dmax)
# indices of contacting residues
idx = np.array([[i,j] for i in range(len(indices)) for j in indices[i] if i != j]).T
idx0 = idx[0]
idx1 = idx[1]
# Cb-Cb distance matrix
dist6d = np.full((nres, nres),999.9, dtype=np.float32)
dist6d[idx0,idx1] = np.linalg.norm(Cb[idx1]-Cb[idx0], axis=-1)
# matrix of Ca-Cb-Cb-Ca dihedrals
omega6d = np.zeros((nres, nres), dtype=np.float32)
omega6d[idx0,idx1] = get_dihedrals(Ca[idx0], Cb[idx0], Cb[idx1], Ca[idx1])
# matrix of polar coord theta
theta6d = np.zeros((nres, nres), dtype=np.float32)
theta6d[idx0,idx1] = get_dihedrals(N[idx0], Ca[idx0], Cb[idx0], Cb[idx1])
# matrix of polar coord phi
phi6d = np.zeros((nres, nres), dtype=np.float32)
phi6d[idx0,idx1] = get_angles(Ca[idx0], Cb[idx0], Cb[idx1])
mask = np.zeros((nres, nres), dtype=np.float32)
mask[idx0, idx1] = 1.0
return dist6d, omega6d, theta6d, phi6d, mask
| RFdiffusion-main | coords6d.py |
import torch
import numpy as np
from util import generate_Cbeta
class Potential:
'''
Interface class that defines the functions a potential must implement
'''
def compute(self, xyz):
'''
Given the current structure of the model prediction, return the current
potential as a PyTorch tensor with a single entry
Args:
xyz (torch.tensor, size: [L,27,3]: The current coordinates of the sample
Returns:
potential (torch.tensor, size: [1]): A potential whose value will be MAXIMIZED
by taking a step along it's gradient
'''
raise NotImplementedError('Potential compute function was not overwritten')
class monomer_ROG(Potential):
'''
Radius of Gyration potential for encouraging monomer compactness
Written by DJ and refactored into a class by NRB
'''
def __init__(self, weight=1, min_dist=15):
self.weight = weight
self.min_dist = min_dist
def compute(self, xyz):
Ca = xyz[:,1] # [L,3]
centroid = torch.mean(Ca, dim=0, keepdim=True) # [1,3]
dgram = torch.cdist(Ca[None,...].contiguous(), centroid[None,...].contiguous(), p=2) # [1,L,1,3]
dgram = torch.maximum(self.min_dist * torch.ones_like(dgram.squeeze(0)), dgram.squeeze(0)) # [L,1,3]
rad_of_gyration = torch.sqrt( torch.sum(torch.square(dgram)) / Ca.shape[0] ) # [1]
return -1 * self.weight * rad_of_gyration
class binder_ROG(Potential):
'''
Radius of Gyration potential for encouraging binder compactness
Author: NRB
'''
def __init__(self, binderlen, weight=1, min_dist=15):
self.binderlen = binderlen
self.min_dist = min_dist
self.weight = weight
def compute(self, xyz):
# Only look at binder residues
Ca = xyz[:self.binderlen,1] # [Lb,3]
centroid = torch.mean(Ca, dim=0, keepdim=True) # [1,3]
# cdist needs a batch dimension - NRB
dgram = torch.cdist(Ca[None,...].contiguous(), centroid[None,...].contiguous(), p=2) # [1,Lb,1,3]
dgram = torch.maximum(self.min_dist * torch.ones_like(dgram.squeeze(0)), dgram.squeeze(0)) # [Lb,1,3]
rad_of_gyration = torch.sqrt( torch.sum(torch.square(dgram)) / Ca.shape[0] ) # [1]
return -1 * self.weight * rad_of_gyration
class dimer_ROG(Potential):
'''
Radius of Gyration potential for encouraging compactness of both monomers when designing dimers
Author: PV
'''
def __init__(self, binderlen, weight=1, min_dist=15):
self.binderlen = binderlen
self.min_dist = min_dist
self.weight = weight
def compute(self, xyz):
# Only look at monomer 1 residues
Ca_m1 = xyz[:self.binderlen,1] # [Lb,3]
# Only look at monomer 2 residues
Ca_m2 = xyz[self.binderlen:,1] # [Lb,3]
centroid_m1 = torch.mean(Ca_m1, dim=0, keepdim=True) # [1,3]
centroid_m2 = torch.mean(Ca_m1, dim=0, keepdim=True) # [1,3]
# cdist needs a batch dimension - NRB
#This calculates RoG for Monomer 1
dgram_m1 = torch.cdist(Ca_m1[None,...].contiguous(), centroid_m1[None,...].contiguous(), p=2) # [1,Lb,1,3]
dgram_m1 = torch.maximum(self.min_dist * torch.ones_like(dgram_m1.squeeze(0)), dgram_m1.squeeze(0)) # [Lb,1,3]
rad_of_gyration_m1 = torch.sqrt( torch.sum(torch.square(dgram_m1)) / Ca_m1.shape[0] ) # [1]
# cdist needs a batch dimension - NRB
#This calculates RoG for Monomer 2
dgram_m2 = torch.cdist(Ca_m2[None,...].contiguous(), centroid_m2[None,...].contiguous(), p=2) # [1,Lb,1,3]
dgram_m2 = torch.maximum(self.min_dist * torch.ones_like(dgram_m2.squeeze(0)), dgram_m2.squeeze(0)) # [Lb,1,3]
rad_of_gyration_m2 = torch.sqrt( torch.sum(torch.square(dgram_m2)) / Ca_m2.shape[0] ) # [1]
#Potential value is the average of both radii of gyration (is avg. the best way to do this?)
return -1 * self.weight * (rad_of_gyration_m1 + rad_of_gyration_m2)/2
class binder_ncontacts(Potential):
'''
Differentiable way to maximise number of contacts within a protein
Motivation is given here: https://www.plumed.org/doc-v2.7/user-doc/html/_c_o_o_r_d_i_n_a_t_i_o_n.html
'''
def __init__(self, binderlen, weight=1, r_0=8, d_0=4):
self.binderlen = binderlen
self.r_0 = r_0
self.weight = weight
self.d_0 = d_0
def compute(self, xyz):
# Only look at binder Ca residues
Ca = xyz[:self.binderlen,1] # [Lb,3]
#cdist needs a batch dimension - NRB
dgram = torch.cdist(Ca[None,...].contiguous(), Ca[None,...].contiguous(), p=2) # [1,Lb,Lb]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
binder_ncontacts = (1 - numerator) / (1 - denominator)
print("BINDER CONTACTS:", binder_ncontacts.sum())
#Potential value is the average of both radii of gyration (is avg. the best way to do this?)
return self.weight * binder_ncontacts.sum()
class dimer_ncontacts(Potential):
'''
Differentiable way to maximise number of contacts for two individual monomers in a dimer
Motivation is given here: https://www.plumed.org/doc-v2.7/user-doc/html/_c_o_o_r_d_i_n_a_t_i_o_n.html
Author: PV
'''
def __init__(self, binderlen, weight=1, r_0=8, d_0=4):
self.binderlen = binderlen
self.r_0 = r_0
self.weight = weight
self.d_0 = d_0
def compute(self, xyz):
# Only look at binder Ca residues
Ca = xyz[:self.binderlen,1] # [Lb,3]
#cdist needs a batch dimension - NRB
dgram = torch.cdist(Ca[None,...].contiguous(), Ca[None,...].contiguous(), p=2) # [1,Lb,Lb]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
binder_ncontacts = (1 - numerator) / (1 - denominator)
#Potential is the sum of values in the tensor
binder_ncontacts = binder_ncontacts.sum()
# Only look at target Ca residues
Ca = xyz[self.binderlen:,1] # [Lb,3]
dgram = torch.cdist(Ca[None,...].contiguous(), Ca[None,...].contiguous(), p=2) # [1,Lb,Lb]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
target_ncontacts = (1 - numerator) / (1 - denominator)
#Potential is the sum of values in the tensor
target_ncontacts = target_ncontacts.sum()
print("DIMER NCONTACTS:", (binder_ncontacts+target_ncontacts)/2)
#Returns average of n contacts withiin monomer 1 and monomer 2
return self.weight * (binder_ncontacts+target_ncontacts)/2
class interface_ncontacts(Potential):
'''
Differentiable way to maximise number of contacts between binder and target
Motivation is given here: https://www.plumed.org/doc-v2.7/user-doc/html/_c_o_o_r_d_i_n_a_t_i_o_n.html
Author: PV
'''
def __init__(self, binderlen, weight=1, r_0=8, d_0=6):
self.binderlen = binderlen
self.r_0 = r_0
self.weight = weight
self.d_0 = d_0
def compute(self, xyz):
# Extract binder Ca residues
Ca_b = xyz[:self.binderlen,1] # [Lb,3]
# Extract target Ca residues
Ca_t = xyz[self.binderlen:,1] # [Lt,3]
#cdist needs a batch dimension - NRB
dgram = torch.cdist(Ca_b[None,...].contiguous(), Ca_t[None,...].contiguous(), p=2) # [1,Lb,Lt]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
interface_ncontacts = (1 - numerator) / (1 - denominator)
#Potential is the sum of values in the tensor
interface_ncontacts = interface_ncontacts.sum()
print("INTERFACE CONTACTS:", interface_ncontacts.sum())
return self.weight * interface_ncontacts
class monomer_contacts(Potential):
'''
Differentiable way to maximise number of contacts within a protein
Motivation is given here: https://www.plumed.org/doc-v2.7/user-doc/html/_c_o_o_r_d_i_n_a_t_i_o_n.html
Author: PV
NOTE: This function sometimes produces NaN's -- added check in reverse diffusion for nan grads
'''
def __init__(self, weight=1, r_0=8, d_0=2, eps=1e-6):
self.r_0 = r_0
self.weight = weight
self.d_0 = d_0
self.eps = eps
def compute(self, xyz):
Ca = xyz[:,1] # [L,3]
#cdist needs a batch dimension - NRB
dgram = torch.cdist(Ca[None,...].contiguous(), Ca[None,...].contiguous(), p=2) # [1,Lb,Lb]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
ncontacts = (1 - numerator) / ((1 - denominator))
#Potential value is the average of both radii of gyration (is avg. the best way to do this?)
return self.weight * ncontacts.sum()
def make_contact_matrix(nchain, contact_string=None):
"""
Calculate a matrix of inter/intra chain contact indicators
Parameters:
nchain (int, required): How many chains are in this design
contact_str (str, optional): String denoting how to define contacts, comma delimited between pairs of chains
'!' denotes repulsive, '&' denotes attractive
"""
alphabet = [a for a in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
letter2num = {a:i for i,a in enumerate(alphabet)}
contacts = np.zeros((nchain,nchain))
written = np.zeros((nchain,nchain))
contact_list = contact_string.split(',')
for c in contact_list:
if not len(c) == 3:
raise SyntaxError('Invalid contact(s) specification')
i,j = letter2num[c[0]],letter2num[c[2]]
symbol = c[1]
# denote contacting/repulsive
assert symbol in ['!','&']
if symbol == '!':
contacts[i,j] = -1
contacts[j,i] = -1
else:
contacts[i,j] = 1
contacts[j,i] = 1
return contacts
class olig_contacts(Potential):
"""
Applies PV's num contacts potential within/between chains in symmetric oligomers
Author: DJ
"""
def __init__(self,
contact_matrix,
weight_intra=1,
weight_inter=1,
r_0=8, d_0=2):
"""
Parameters:
chain_lengths (list, required): List of chain lengths, length is (Nchains)
contact_matrix (torch.tensor/np.array, required):
square matrix of shape (Nchains,Nchains) whose (i,j) enry represents
attractive (1), repulsive (-1), or non-existent (0) contact potentials
between chains in the complex
weight (int/float, optional): Scaling/weighting factor
"""
self.contact_matrix = contact_matrix
self.weight_intra = weight_intra
self.weight_inter = weight_inter
self.r_0 = r_0
self.d_0 = d_0
# check contact matrix only contains valid entries
assert all([i in [-1,0,1] for i in contact_matrix.flatten()]), 'Contact matrix must contain only 0, 1, or -1 in entries'
# assert the matrix is square and symmetric
shape = contact_matrix.shape
assert len(shape) == 2
assert shape[0] == shape[1]
for i in range(shape[0]):
for j in range(shape[1]):
assert contact_matrix[i,j] == contact_matrix[j,i]
self.nchain=shape[0]
# self._compute_chain_indices()
# def _compute_chain_indices(self):
# # make list of shape [i,N] for indices of each chain in total length
# indices = []
# start = 0
# for l in self.chain_lengths:
# indices.append(torch.arange(start,start+l))
# start += l
# self.indices = indices
def _get_idx(self,i,L):
"""
Returns the zero-indexed indices of the residues in chain i
"""
assert L%self.nchain == 0
Lchain = L//self.nchain
return i*Lchain + torch.arange(Lchain)
def compute(self, xyz):
"""
Iterate through the contact matrix, compute contact potentials between chains that need it,
and negate contacts for any
"""
L = xyz.shape[0]
all_contacts = 0
start = 0
for i in range(self.nchain):
for j in range(self.nchain):
# only compute for upper triangle, disregard zeros in contact matrix
if (i <= j) and (self.contact_matrix[i,j] != 0):
# get the indices for these two chains
idx_i = self._get_idx(i,L)
idx_j = self._get_idx(j,L)
Ca_i = xyz[idx_i,1] # slice out crds for this chain
Ca_j = xyz[idx_j,1] # slice out crds for that chain
dgram = torch.cdist(Ca_i[None,...].contiguous(), Ca_j[None,...].contiguous(), p=2) # [1,Lb,Lb]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
ncontacts = (1 - numerator) / (1 - denominator)
# weight, don't double count intra
scalar = (i==j)*self.weight_intra/2 + (i!=j)*self.weight_inter
# contacts attr/repuls relative weights
all_contacts += ncontacts.sum() * self.contact_matrix[i,j] * scalar
return all_contacts
class olig_intra_contacts(Potential):
"""
Applies PV's num contacts potential for each chain individually in an oligomer design
Author: DJ
"""
def __init__(self, chain_lengths, weight=1):
"""
Parameters:
chain_lengths (list, required): Ordered list of chain lengths
weight (int/float, optional): Scaling/weighting factor
"""
self.chain_lengths = chain_lengths
self.weight = weight
def compute(self, xyz):
"""
Computes intra-chain num contacts potential
"""
assert sum(self.chain_lengths) == xyz.shape[0], 'given chain lengths do not match total protein length'
all_contacts = 0
start = 0
for Lc in self.chain_lengths:
Ca = xyz[start:start+Lc] # slice out crds for this chain
dgram = torch.cdist(Ca[None,...].contiguous(), Ca[None,...].contiguous(), p=2) # [1,Lb,Lb]
divide_by_r_0 = (dgram - self.d_0) / self.r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
ncontacts = (1 - numerator) / (1 - denominator)
# add contacts for this chain to all contacts
all_contacts += ncontacts.sum()
# increment the start to be at the next chain
start += Lc
return self.weight * all_contacts
def get_damped_lj(r_min, r_lin,p1=6,p2=12):
y_at_r_lin = lj(r_lin, r_min, p1, p2)
ydot_at_r_lin = lj_grad(r_lin, r_min,p1,p2)
def inner(dgram):
return (dgram < r_lin) * (ydot_at_r_lin * (dgram - r_lin) + y_at_r_lin) + (dgram >= r_lin) * lj(dgram, r_min, p1, p2)
return inner
def lj(dgram, r_min,p1=6, p2=12):
return 4 * ((r_min / (2**(1/p1) * dgram))**p2 - (r_min / (2**(1/p1) * dgram))**p1)
def lj_grad(dgram, r_min,p1=6,p2=12):
return -p2 * r_min**p1*(r_min**p1-dgram**p1) / (dgram**(p2+1))
def mask_expand(mask, n=1):
mask_out = mask.clone()
assert mask.ndim == 1
for i in torch.where(mask)[0]:
for j in range(i-n, i+n+1):
if j >= 0 and j < len(mask):
mask_out[j] = True
return mask_out
def contact_energy(dgram, d_0, r_0):
divide_by_r_0 = (dgram - d_0) / r_0
numerator = torch.pow(divide_by_r_0,6)
denominator = torch.pow(divide_by_r_0,12)
ncontacts = (1 - numerator) / ((1 - denominator)).float()
return - ncontacts
def poly_repulse(dgram, r, slope, p=1):
a = slope / (p * r**(p-1))
return (dgram < r) * a * torch.abs(r - dgram)**p * slope
#def only_top_n(dgram
class substrate_contacts(Potential):
'''
Implicitly models a ligand with an attractive-repulsive potential.
'''
def __init__(self, weight=1, r_0=8, d_0=2, s=1, eps=1e-6, rep_r_0=5, rep_s=2, rep_r_min=1):
self.r_0 = r_0
self.weight = weight
self.d_0 = d_0
self.eps = eps
# motif frame coordinates
# NOTE: these probably need to be set after sample_init() call, because the motif sequence position in design must be known
self.motif_frame = None # [4,3] xyz coordinates from 4 atoms of input motif
self.motif_mapping = None # list of tuples giving positions of above atoms in design [(resi, atom_idx)]
self.motif_substrate_atoms = None # xyz coordinates of substrate from input motif
r_min = 2
self.energies = []
self.energies.append(lambda dgram: s * contact_energy(torch.min(dgram, dim=-1)[0], d_0, r_0))
if rep_r_min:
self.energies.append(lambda dgram: poly_repulse(torch.min(dgram, dim=-1)[0], rep_r_0, rep_s, p=1.5))
else:
self.energies.append(lambda dgram: poly_repulse(dgram, rep_r_0, rep_s, p=1.5))
def compute(self, xyz):
# First, get random set of atoms
# This operates on self.xyz_motif, which is assigned to this class in the model runner (for horrible plumbing reasons)
self._grab_motif_residues(self.xyz_motif)
# for checking affine transformation is corect
first_distance = torch.sqrt(torch.sqrt(torch.sum(torch.square(self.motif_substrate_atoms[0] - self.motif_frame[0]), dim=-1)))
# grab the coordinates of the corresponding atoms in the new frame using mapping
res = torch.tensor([k[0] for k in self.motif_mapping])
atoms = torch.tensor([k[1] for k in self.motif_mapping])
new_frame = xyz[self.diffusion_mask][res,atoms,:]
# calculate affine transformation matrix and translation vector b/w new frame and motif frame
A, t = self._recover_affine(self.motif_frame, new_frame)
# apply affine transformation to substrate atoms
substrate_atoms = torch.mm(A, self.motif_substrate_atoms.transpose(0,1)).transpose(0,1) + t
second_distance = torch.sqrt(torch.sqrt(torch.sum(torch.square(new_frame[0] - substrate_atoms[0]), dim=-1)))
assert abs(first_distance - second_distance) < 0.01, "Alignment seems to be bad"
diffusion_mask = mask_expand(self.diffusion_mask, 1)
Ca = xyz[~diffusion_mask, 1]
#cdist needs a batch dimension - NRB
dgram = torch.cdist(Ca[None,...].contiguous(), substrate_atoms.float()[None], p=2)[0] # [Lb,Lb]
all_energies = []
for i, energy_fn in enumerate(self.energies):
energy = energy_fn(dgram)
all_energies.append(energy.sum())
return - self.weight * sum(all_energies)
#Potential value is the average of both radii of gyration (is avg. the best way to do this?)
return self.weight * ncontacts.sum()
def _recover_affine(self,frame1, frame2):
"""
Uses Simplex Affine Matrix (SAM) formula to recover affine transform between two sets of 4 xyz coordinates
See: https://www.researchgate.net/publication/332410209_Beginner%27s_guide_to_mapping_simplexes_affinely
Args:
frame1 - 4 coordinates from starting frame [4,3]
frame2 - 4 coordinates from ending frame [4,3]
Outputs:
A - affine transformation matrix from frame1->frame2
t - affine translation vector from frame1->frame2
"""
l = len(frame1)
# construct SAM denominator matrix
B = torch.vstack([frame1.T, torch.ones(l)])
D = 1.0 / torch.linalg.det(B) # SAM denominator
M = torch.zeros((3,4), dtype=torch.float64)
for i, R in enumerate(frame2.T):
for j in range(l):
num = torch.vstack([R, B])
# make SAM numerator matrix
num = torch.cat((num[:j+1],num[j+2:])) # make numerator matrix
# calculate SAM entry
M[i][j] = (-1)**j * D * torch.linalg.det(num)
A, t = torch.hsplit(M, [l-1])
t = t.transpose(0,1)
return A, t
def _grab_motif_residues(self, xyz) -> None:
"""
Grabs 4 atoms in the motif.
Currently random subset of Ca atoms if the motif is >= 4 residues, or else 4 random atoms from a single residue
"""
idx = torch.arange(self.diffusion_mask.shape[0])
idx = idx[self.diffusion_mask].float()
if torch.sum(self.diffusion_mask) >= 4:
rand_idx = torch.multinomial(idx, 4).long()
# get Ca atoms
self.motif_frame = xyz[rand_idx, 1]
self.motif_mapping = [(i,1) for i in rand_idx]
else:
rand_idx = torch.multinomial(idx, 1).long()
self.motif_frame = xyz[rand_idx[0],:4]
self.motif_mapping = [(rand_idx, i) for i in range(4)]
class binder_distance_ReLU(Potential):
'''
Given the current coordinates of the diffusion trajectory, calculate a potential that is the distance between each residue
and the closest target residue.
This potential is meant to encourage the binder to interact with a certain subset of residues on the target that
define the binding site.
Author: NRB
'''
def __init__(self, binderlen, hotspot_res, weight=1, min_dist=15, use_Cb=False):
self.binderlen = binderlen
self.hotspot_res = [res + binderlen for res in hotspot_res]
self.weight = weight
self.min_dist = min_dist
self.use_Cb = use_Cb
def compute(self, xyz):
binder = xyz[:self.binderlen,:,:] # (Lb,27,3)
target = xyz[self.hotspot_res,:,:] # (N,27,3)
if self.use_Cb:
N = binder[:,0]
Ca = binder[:,1]
C = binder[:,2]
Cb = generate_Cbeta(N,Ca,C) # (Lb,3)
N_t = target[:,0]
Ca_t = target[:,1]
C_t = target[:,2]
Cb_t = generate_Cbeta(N_t,Ca_t,C_t) # (N,3)
dgram = torch.cdist(Cb[None,...], Cb_t[None,...], p=2) # (1,Lb,N)
else:
# Use Ca dist for potential
Ca = binder[:,1] # (Lb,3)
Ca_t = target[:,1] # (N,3)
dgram = torch.cdist(Ca[None,...], Ca_t[None,...], p=2) # (1,Lb,N)
closest_dist = torch.min(dgram.squeeze(0), dim=1)[0] # (Lb)
# Cap the distance at a minimum value
min_distance = self.min_dist * torch.ones_like(closest_dist) # (Lb)
potential = torch.maximum(min_distance, closest_dist) # (Lb)
# torch.Tensor.backward() requires the potential to be a single value
potential = torch.sum(potential, dim=-1)
return -1 * self.weight * potential
class binder_any_ReLU(Potential):
'''
Given the current coordinates of the diffusion trajectory, calculate a potential that is the minimum distance between
ANY residue and the closest target residue.
In contrast to binder_distance_ReLU this potential will only penalize a pose if all of the binder residues are outside
of a certain distance from the target residues.
Author: NRB
'''
def __init__(self, binderlen, hotspot_res, weight=1, min_dist=15, use_Cb=False):
self.binderlen = binderlen
self.hotspot_res = [res + binderlen for res in hotspot_res]
self.weight = weight
self.min_dist = min_dist
self.use_Cb = use_Cb
def compute(self, xyz):
binder = xyz[:self.binderlen,:,:] # (Lb,27,3)
target = xyz[self.hotspot_res,:,:] # (N,27,3)
if use_Cb:
N = binder[:,0]
Ca = binder[:,1]
C = binder[:,2]
Cb = generate_Cbeta(N,Ca,C) # (Lb,3)
N_t = target[:,0]
Ca_t = target[:,1]
C_t = target[:,2]
Cb_t = generate_Cbeta(N_t,Ca_t,C_t) # (N,3)
dgram = torch.cdist(Cb[None,...], Cb_t[None,...], p=2) # (1,Lb,N)
else:
# Use Ca dist for potential
Ca = binder[:,1] # (Lb,3)
Ca_t = target[:,1] # (N,3)
dgram = torch.cdist(Ca[None,...], Ca_t[None,...], p=2) # (1,Lb,N)
closest_dist = torch.min(dgram.squeeze(0)) # (1)
potential = torch.maximum(min_dist, closest_dist) # (1)
return -1 * self.weight * potential
# Dictionary of types of potentials indexed by name of potential. Used by PotentialManager.
# If you implement a new potential you must add it to this dictionary for it to be used by
# the PotentialManager
implemented_potentials = { 'monomer_ROG': monomer_ROG,
'binder_ROG': binder_ROG,
'binder_distance_ReLU': binder_distance_ReLU,
'binder_any_ReLU': binder_any_ReLU,
'dimer_ROG': dimer_ROG,
'binder_ncontacts': binder_ncontacts,
'dimer_ncontacts': dimer_ncontacts,
'interface_ncontacts': interface_ncontacts,
'monomer_contacts': monomer_contacts,
'olig_intra_contacts': olig_intra_contacts,
'olig_contacts': olig_contacts,
'substrate_contacts': substrate_contacts}
require_binderlen = { 'binder_ROG',
'binder_distance_ReLU',
'binder_any_ReLU',
'dimer_ROG',
'binder_ncontacts',
'dimer_ncontacts',
'interface_ncontacts'}
require_hotspot_res = { 'binder_distance_ReLU',
'binder_any_ReLU' }
| RFdiffusion-main | potentials/potentials.py |
import torch
from icecream import ic
import potentials.potentials as potentials
import numpy as np
def make_contact_matrix(nchain, intra_all=False, inter_all=False, contact_string=None):
"""
Calculate a matrix of inter/intra chain contact indicators
Parameters:
nchain (int, required): How many chains are in this design
contact_str (str, optional): String denoting how to define contacts, comma delimited between pairs of chains
'!' denotes repulsive, '&' denotes attractive
"""
alphabet = [a for a in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
letter2num = {a:i for i,a in enumerate(alphabet)}
contacts = np.zeros((nchain,nchain))
written = np.zeros((nchain,nchain))
# intra_all - everything on the diagonal has contact potential
if intra_all:
contacts[np.arange(nchain),np.arange(nchain)] = 1
# inter all - everything off the diagonal has contact potential
if inter_all:
mask2d = np.full_like(contacts,False)
for i in range(len(contacts)):
for j in range(len(contacts)):
if i!=j:
mask2d[i,j] = True
contacts[mask2d.astype(bool)] = 1
# custom contacts/repulsions from user
if contact_string != None:
contact_list = contact_string.split(',')
for c in contact_list:
assert len(c) == 3
i,j = letter2num[c[0]],letter2num[c[2]]
symbol = c[1]
assert symbol in ['!','&']
if symbol == '!':
contacts[i,j] = -1
contacts[j,i] = -1
else:
contacts[i,j] = 1
contacts[j,i] = 1
return contacts
def calc_nchains(symbol, components=1):
"""
Calculates number of chains for given symmetry
"""
S = symbol.lower()
if S.startswith('c'):
return int(S[1:])*components
elif S.startswith('d'):
return 2*int(S[1:])*components
elif S.startswith('o'):
raise NotImplementedError()
elif S.startswith('t'):
return 12*components
else:
raise RuntimeError('Unknown symmetry symbol ',S)
class PotentialManager:
'''
Class to define a set of potentials from the given config object and to apply all of the specified potentials
during each cycle of the inference loop.
Author: NRB
'''
def __init__(self,
potentials_config,
ppi_config,
diffuser_config,
inference_config,
hotspot_0idx,
binderlen,
):
self.potentials_config = potentials_config
self.ppi_config = ppi_config
self.inference_config = inference_config
self.guide_scale = potentials_config.guide_scale
self.guide_decay = potentials_config.guide_decay
if potentials_config.guiding_potentials is None:
setting_list = []
else:
setting_list = [self.parse_potential_string(potstr) for potstr in potentials_config.guiding_potentials]
# PPI potentials require knowledge about the binderlen which may be detected at runtime
# This is a mechanism to still allow this info to be used in potentials - NRB
if binderlen > 0:
binderlen_update = { 'binderlen': binderlen }
hotspot_res_update = { 'hotspot_res': hotspot_0idx }
for setting in setting_list:
if setting['type'] in potentials.require_binderlen:
setting.update(binderlen_update)
if setting['type'] in potentials.require_hotspot_res:
setting.update(hotspot_res_update)
self.potentials_to_apply = self.initialize_all_potentials(setting_list)
self.T = diffuser_config.T
def is_empty(self):
'''
Check whether this instance of PotentialManager actually contains any potentials
'''
return len(self.potentials_to_apply) == 0
def parse_potential_string(self, potstr):
'''
Parse a single entry in the list of potentials to be run to a dictionary of settings for that potential.
An example of how this parsing is done:
'setting1:val1,setting2:val2,setting3:val3' -> {setting1:val1,setting2:val2,setting3:val3}
'''
setting_dict = {entry.split(':')[0]:entry.split(':')[1] for entry in potstr.split(',')}
for key in setting_dict:
if not key == 'type': setting_dict[key] = float(setting_dict[key])
return setting_dict
def initialize_all_potentials(self, setting_list):
'''
Given a list of potential dictionaries where each dictionary defines the configurations for a single potential,
initialize all potentials and add to the list of potentials to be applies
'''
to_apply = []
for potential_dict in setting_list:
assert(potential_dict['type'] in potentials.implemented_potentials), f'potential with name: {potential_dict["type"]} is not one of the implemented potentials: {potentials.implemented_potentials.keys()}'
kwargs = {k: potential_dict[k] for k in potential_dict.keys() - {'type'}}
# symmetric oligomer contact potential args
if self.inference_config.symmetry:
num_chains = calc_nchains(symbol=self.inference_config.symmetry, components=1) # hard code 1 for now
contact_kwargs={'nchain':num_chains,
'intra_all':self.potentials_config.olig_intra_all,
'inter_all':self.potentials_config.olig_inter_all,
'contact_string':self.potentials_config.olig_custom_contact }
contact_matrix = make_contact_matrix(**contact_kwargs)
kwargs.update({'contact_matrix':contact_matrix})
to_apply.append( potentials.implemented_potentials[potential_dict['type']](**kwargs) )
return to_apply
def compute_all_potentials(self, xyz):
'''
This is the money call. Take the current sequence and structure information and get the sum of all of the potentials that are being used
'''
potential_list = [potential.compute(xyz) for potential in self.potentials_to_apply]
potential_stack = torch.stack(potential_list, dim=0)
return torch.sum(potential_stack, dim=0)
def get_guide_scale(self, t):
'''
Given a timestep and a decay type, get the appropriate scale factor to use for applying guiding potentials
Inputs:
t (int, required): The current timestep
Output:
scale (int): The scale factor to use for applying guiding potentials
'''
implemented_decay_types = {
'constant': lambda t: self.guide_scale,
# Linear interpolation with y2: 0, y1: guide_scale, x2: 0, x1: T, x: t
'linear' : lambda t: t/self.T * self.guide_scale,
'quadratic' : lambda t: t**2/self.T**2 * self.guide_scale,
'cubic' : lambda t: t**3/self.T**3
}
if self.guide_decay not in implemented_decay_types:
sys.exit(f'decay_type must be one of {implemented_decay_types.keys()}. Received decay_type={self.guide_decay}. Exiting.')
return implemented_decay_types[self.guide_decay](t)
| RFdiffusion-main | potentials/manager.py |
from setuptools import setup, find_packages
setup(
name='se3-transformer',
packages=find_packages(),
include_package_data=True,
version='1.0.0',
description='PyTorch + DGL implementation of SE(3)-Transformers',
author='Alexandre Milesi',
author_email='[email protected]',
)
| RFdiffusion-main | env/SE3Transformer/setup.py |
RFdiffusion-main | env/SE3Transformer/se3_transformer/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Tuple
import dgl
import pathlib
import torch
from dgl.data import QM9EdgeDataset
from dgl import DGLGraph
from torch import Tensor
from torch.utils.data import random_split, DataLoader, Dataset
from tqdm import tqdm
from se3_transformer.data_loading.data_module import DataModule
from se3_transformer.model.basis import get_basis
from se3_transformer.runtime.utils import get_local_rank, str2bool, using_tensor_cores
def _get_relative_pos(qm9_graph: DGLGraph) -> Tensor:
x = qm9_graph.ndata['pos']
src, dst = qm9_graph.edges()
rel_pos = x[dst] - x[src]
return rel_pos
def _get_split_sizes(full_dataset: Dataset) -> Tuple[int, int, int]:
len_full = len(full_dataset)
len_train = 100_000
len_test = int(0.1 * len_full)
len_val = len_full - len_train - len_test
return len_train, len_val, len_test
class QM9DataModule(DataModule):
"""
Datamodule wrapping https://docs.dgl.ai/en/latest/api/python/dgl.data.html#qm9edge-dataset
Training set is 100k molecules. Test set is 10% of the dataset. Validation set is the rest.
This includes all the molecules from QM9 except the ones that are uncharacterized.
"""
NODE_FEATURE_DIM = 6
EDGE_FEATURE_DIM = 4
def __init__(self,
data_dir: pathlib.Path,
task: str = 'homo',
batch_size: int = 240,
num_workers: int = 8,
num_degrees: int = 4,
amp: bool = False,
precompute_bases: bool = False,
**kwargs):
self.data_dir = data_dir # This needs to be before __init__ so that prepare_data has access to it
super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=self._collate)
self.amp = amp
self.task = task
self.batch_size = batch_size
self.num_degrees = num_degrees
qm9_kwargs = dict(label_keys=[self.task], verbose=False, raw_dir=str(data_dir))
if precompute_bases:
bases_kwargs = dict(max_degree=num_degrees - 1, use_pad_trick=using_tensor_cores(amp), amp=amp)
full_dataset = CachedBasesQM9EdgeDataset(bases_kwargs=bases_kwargs, batch_size=batch_size,
num_workers=num_workers, **qm9_kwargs)
else:
full_dataset = QM9EdgeDataset(**qm9_kwargs)
self.ds_train, self.ds_val, self.ds_test = random_split(full_dataset, _get_split_sizes(full_dataset),
generator=torch.Generator().manual_seed(0))
train_targets = full_dataset.targets[self.ds_train.indices, full_dataset.label_keys[0]]
self.targets_mean = train_targets.mean()
self.targets_std = train_targets.std()
def prepare_data(self):
# Download the QM9 preprocessed data
QM9EdgeDataset(verbose=True, raw_dir=str(self.data_dir))
def _collate(self, samples):
graphs, y, *bases = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
edge_feats = {'0': batched_graph.edata['edge_attr'][..., None]}
batched_graph.edata['rel_pos'] = _get_relative_pos(batched_graph)
# get node features
node_feats = {'0': batched_graph.ndata['attr'][:, :6, None]}
targets = (torch.cat(y) - self.targets_mean) / self.targets_std
if bases:
# collate bases
all_bases = {
key: torch.cat([b[key] for b in bases[0]], dim=0)
for key in bases[0][0].keys()
}
return batched_graph, node_feats, edge_feats, all_bases, targets
else:
return batched_graph, node_feats, edge_feats, targets
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("QM9 dataset")
parser.add_argument('--task', type=str, default='homo', const='homo', nargs='?',
choices=['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv',
'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'],
help='Regression task to train on')
parser.add_argument('--precompute_bases', type=str2bool, nargs='?', const=True, default=False,
help='Precompute bases at the beginning of the script during dataset initialization,'
' instead of computing them at the beginning of each forward pass.')
return parent_parser
def __repr__(self):
return f'QM9({self.task})'
class CachedBasesQM9EdgeDataset(QM9EdgeDataset):
""" Dataset extending the QM9 dataset from DGL with precomputed (cached in RAM) pairwise bases """
def __init__(self, bases_kwargs: dict, batch_size: int, num_workers: int, *args, **kwargs):
"""
:param bases_kwargs: Arguments to feed the bases computation function
:param batch_size: Batch size to use when iterating over the dataset for computing bases
"""
self.bases_kwargs = bases_kwargs
self.batch_size = batch_size
self.bases = None
self.num_workers = num_workers
super().__init__(*args, **kwargs)
def load(self):
super().load()
# Iterate through the dataset and compute bases (pairwise only)
# Potential improvement: use multi-GPU and gather
dataloader = DataLoader(self, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers,
collate_fn=lambda samples: dgl.batch([sample[0] for sample in samples]))
bases = []
for i, graph in tqdm(enumerate(dataloader), total=len(dataloader), desc='Precomputing QM9 bases',
disable=get_local_rank() != 0):
rel_pos = _get_relative_pos(graph)
# Compute the bases with the GPU but convert the result to CPU to store in RAM
bases.append({k: v.cpu() for k, v in get_basis(rel_pos.cuda(), **self.bases_kwargs).items()})
self.bases = bases # Assign at the end so that __getitem__ isn't confused
def __getitem__(self, idx: int):
graph, label = super().__getitem__(idx)
if self.bases:
bases_idx = idx // self.batch_size
bases_cumsum_idx = self.ne_cumsum[idx] - self.ne_cumsum[bases_idx * self.batch_size]
bases_cumsum_next_idx = self.ne_cumsum[idx + 1] - self.ne_cumsum[bases_idx * self.batch_size]
return graph, label, {key: basis[bases_cumsum_idx:bases_cumsum_next_idx] for key, basis in
self.bases[bases_idx].items()}
else:
return graph, label
| RFdiffusion-main | env/SE3Transformer/se3_transformer/data_loading/qm9.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import torch.distributed as dist
from abc import ABC
from torch.utils.data import DataLoader, DistributedSampler, Dataset
from se3_transformer.runtime.utils import get_local_rank
def _get_dataloader(dataset: Dataset, shuffle: bool, **kwargs) -> DataLoader:
# Classic or distributed dataloader depending on the context
sampler = DistributedSampler(dataset, shuffle=shuffle) if dist.is_initialized() else None
return DataLoader(dataset, shuffle=(shuffle and sampler is None), sampler=sampler, **kwargs)
class DataModule(ABC):
""" Abstract DataModule. Children must define self.ds_{train | val | test}. """
def __init__(self, **dataloader_kwargs):
super().__init__()
if get_local_rank() == 0:
self.prepare_data()
# Wait until rank zero has prepared the data (download, preprocessing, ...)
if dist.is_initialized():
dist.barrier(device_ids=[get_local_rank()])
self.dataloader_kwargs = {'pin_memory': True, 'persistent_workers': True, **dataloader_kwargs}
self.ds_train, self.ds_val, self.ds_test = None, None, None
def prepare_data(self):
""" Method called only once per node. Put here any downloading or preprocessing """
pass
def train_dataloader(self) -> DataLoader:
return _get_dataloader(self.ds_train, shuffle=True, **self.dataloader_kwargs)
def val_dataloader(self) -> DataLoader:
return _get_dataloader(self.ds_val, shuffle=False, **self.dataloader_kwargs)
def test_dataloader(self) -> DataLoader:
return _get_dataloader(self.ds_test, shuffle=False, **self.dataloader_kwargs)
| RFdiffusion-main | env/SE3Transformer/se3_transformer/data_loading/data_module.py |
from .qm9 import QM9DataModule
| RFdiffusion-main | env/SE3Transformer/se3_transformer/data_loading/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from abc import ABC, abstractmethod
import torch
import torch.distributed as dist
from torch import Tensor
class Metric(ABC):
""" Metric class with synchronization capabilities similar to TorchMetrics """
def __init__(self):
self.states = {}
def add_state(self, name: str, default: Tensor):
assert name not in self.states
self.states[name] = default.clone()
setattr(self, name, default)
def synchronize(self):
if dist.is_initialized():
for state in self.states:
dist.all_reduce(getattr(self, state), op=dist.ReduceOp.SUM, group=dist.group.WORLD)
def __call__(self, *args, **kwargs):
self.update(*args, **kwargs)
def reset(self):
for name, default in self.states.items():
setattr(self, name, default.clone())
def compute(self):
self.synchronize()
value = self._compute().item()
self.reset()
return value
@abstractmethod
def _compute(self):
pass
@abstractmethod
def update(self, preds: Tensor, targets: Tensor):
pass
class MeanAbsoluteError(Metric):
def __init__(self):
super().__init__()
self.add_state('error', torch.tensor(0, dtype=torch.float32, device='cuda'))
self.add_state('total', torch.tensor(0, dtype=torch.int32, device='cuda'))
def update(self, preds: Tensor, targets: Tensor):
preds = preds.detach()
n = preds.shape[0]
error = torch.abs(preds.view(n, -1) - targets.view(n, -1)).sum()
self.total += n
self.error += error
def _compute(self):
return self.error / self.total
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/metrics.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import collections
import itertools
import math
import os
import pathlib
import re
import pynvml
class Device:
# assumes nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def get_name(self):
return pynvml.nvmlDeviceGetName(self.handle)
def get_uuid(self):
return pynvml.nvmlDeviceGetUUID(self.handle)
def get_cpu_affinity(self):
affinity_string = ""
for j in pynvml.nvmlDeviceGetCpuAffinity(self.handle, Device._nvml_affinity_elements):
# assume nvml returns list of 64 bit ints
affinity_string = "{:064b}".format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def get_thread_siblings_list():
"""
Returns a list of 2-element integer tuples representing pairs of
hyperthreading cores.
"""
path = "/sys/devices/system/cpu/cpu*/topology/thread_siblings_list"
thread_siblings_list = []
pattern = re.compile(r"(\d+)\D(\d+)")
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def check_socket_affinities(socket_affinities):
# sets of cores should be either identical or disjoint
for i, j in itertools.product(socket_affinities, socket_affinities):
if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
raise RuntimeError(f"Sets of cores should be either identical or disjoint, " f"but got {i} and {j}.")
def get_socket_affinities(nproc_per_node, exclude_unavailable_cores=True):
devices = [Device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.get_cpu_affinity() for dev in devices]
if exclude_unavailable_cores:
available_cores = os.sched_getaffinity(0)
socket_affinities = [list(set(affinity) & available_cores) for affinity in socket_affinities]
check_socket_affinities(socket_affinities)
return socket_affinities
def set_socket_affinity(gpu_id):
"""
The process is assigned with all available logical CPU cores from the CPU
socket connected to the GPU with a given id.
Args:
gpu_id: index of a GPU
"""
dev = Device(gpu_id)
affinity = dev.get_cpu_affinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
"""
The process is assigned with the first available logical CPU core from the
list of all CPU cores from the CPU socket connected to the GPU with a given
id.
Args:
gpu_id: index of a GPU
"""
dev = Device(gpu_id)
affinity = dev.get_cpu_affinity()
# exclude unavailable cores
available_cores = os.sched_getaffinity(0)
affinity = list(set(affinity) & available_cores)
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
"""
The process is assigned with a single unique available physical CPU core
from the list of all CPU cores from the CPU socket connected to the GPU with
a given id.
Args:
gpu_id: index of a GPU
"""
socket_affinities = get_socket_affinities(nproc_per_node)
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode, balanced=True):
"""
The process is assigned with an unique subset of available physical CPU
cores from the CPU socket connected to a GPU with a given id.
Assignment automatically includes hyperthreading siblings (if siblings are
available).
Args:
gpu_id: index of a GPU
nproc_per_node: total number of processes per node
mode: mode
balanced: assign an equal number of physical cores to each process
"""
socket_affinities = get_socket_affinities(nproc_per_node)
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove hyperthreading siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
# compute minimal number of physical cores per GPU across all GPUs and
# sockets, code assigns this number of cores per GPU if balanced == True
min_physical_cores_per_gpu = min(
[len(cores) // len(gpus) for cores, gpus in socket_affinities_to_device_ids.items()]
)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
if balanced:
cores_per_device = min_physical_cores_per_gpu
socket_affinity = socket_affinity[: devices_per_group * min_physical_cores_per_gpu]
else:
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
# In theory there should be no difference in performance between
# 'interleaved' and 'continuous' pattern on Intel-based DGX-1,
# but 'continuous' should be better for DGX A100 because on AMD
# Rome 4 consecutive cores are sharing L3 cache.
# TODO: code doesn't attempt to automatically detect layout of
# L3 cache, also external environment may already exclude some
# cores, this code makes no attempt to detect it and to align
# mapping to multiples of 4.
if mode == "interleaved":
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == "continuous":
affinity = list(socket_affinity[group_id * cores_per_device: (group_id + 1) * cores_per_device])
else:
raise RuntimeError("Unknown set_socket_unique_affinity mode")
# unconditionally reintroduce hyperthreading siblings, this step
# may result in a different numbers of logical cores assigned to
# each GPU even if balanced == True (if hyperthreading siblings
# aren't available for a subset of cores due to some external
# constraints, siblings are re-added unconditionally, in the
# worst case unavailable logical core will be ignored by
# os.sched_setaffinity().
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def set_affinity(gpu_id, nproc_per_node, mode="socket_unique_continuous", balanced=True):
"""
The process is assigned with a proper CPU affinity which matches hardware
architecture on a given platform. Usually it improves and stabilizes
performance of deep learning training workloads.
This function assumes that the workload is running in multi-process
single-device mode (there are multiple training processes and each process
is running on a single GPU), which is typical for multi-GPU training
workloads using `torch.nn.parallel.DistributedDataParallel`.
Available affinity modes:
* 'socket' - the process is assigned with all available logical CPU cores
from the CPU socket connected to the GPU with a given id.
* 'single' - the process is assigned with the first available logical CPU
core from the list of all CPU cores from the CPU socket connected to the GPU
with a given id (multiple GPUs could be assigned with the same CPU core).
* 'single_unique' - the process is assigned with a single unique available
physical CPU core from the list of all CPU cores from the CPU socket
connected to the GPU with a given id.
* 'socket_unique_interleaved' - the process is assigned with an unique
subset of available physical CPU cores from the CPU socket connected to a
GPU with a given id, hyperthreading siblings are included automatically,
cores are assigned with interleaved indexing pattern
* 'socket_unique_continuous' - (the default) the process is assigned with an
unique subset of available physical CPU cores from the CPU socket connected
to a GPU with a given id, hyperthreading siblings are included
automatically, cores are assigned with continuous indexing pattern
'socket_unique_continuous' is the recommended mode for deep learning
training workloads on NVIDIA DGX machines.
Args:
gpu_id: integer index of a GPU
nproc_per_node: number of processes per node
mode: affinity mode
balanced: assign an equal number of physical cores to each process,
affects only 'socket_unique_interleaved' and
'socket_unique_continuous' affinity modes
Returns a set of logical CPU cores on which the process is eligible to run.
Example:
import argparse
import os
import gpu_affinity
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--local_rank',
type=int,
default=os.getenv('LOCAL_RANK', 0),
)
args = parser.parse_args()
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
print(f'{args.local_rank}: core affinity: {affinity}')
if __name__ == "__main__":
main()
Launch the example with:
python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py
WARNING: On DGX A100 only a half of CPU cores have direct access to GPUs.
This function restricts execution only to the CPU cores directly connected
to GPUs, so on DGX A100 it will limit the code to half of CPU cores and half
of CPU memory bandwidth (which may be fine for many DL models).
"""
pynvml.nvmlInit()
if mode == "socket":
set_socket_affinity(gpu_id)
elif mode == "single":
set_single_affinity(gpu_id)
elif mode == "single_unique":
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == "socket_unique_interleaved":
set_socket_unique_affinity(gpu_id, nproc_per_node, "interleaved", balanced)
elif mode == "socket_unique_continuous":
set_socket_unique_affinity(gpu_id, nproc_per_node, "continuous", balanced)
else:
raise RuntimeError("Unknown affinity mode")
affinity = os.sched_getaffinity(0)
return affinity
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/gpu_affinity.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import pathlib
from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, Any, Callable, Optional
import dllogger
import torch.distributed as dist
import wandb
from dllogger import Verbosity
from se3_transformer.runtime.utils import rank_zero_only
class Logger(ABC):
@rank_zero_only
@abstractmethod
def log_hyperparams(self, params):
pass
@rank_zero_only
@abstractmethod
def log_metrics(self, metrics, step=None):
pass
@staticmethod
def _sanitize_params(params):
def _sanitize(val):
if isinstance(val, Callable):
try:
_val = val()
if isinstance(_val, Callable):
return val.__name__
return _val
except Exception:
return getattr(val, "__name__", None)
elif isinstance(val, pathlib.Path) or isinstance(val, Enum):
return str(val)
return val
return {key: _sanitize(val) for key, val in params.items()}
class LoggerCollection(Logger):
def __init__(self, loggers):
super().__init__()
self.loggers = loggers
def __getitem__(self, index):
return [logger for logger in self.loggers][index]
@rank_zero_only
def log_metrics(self, metrics, step=None):
for logger in self.loggers:
logger.log_metrics(metrics, step)
@rank_zero_only
def log_hyperparams(self, params):
for logger in self.loggers:
logger.log_hyperparams(params)
class DLLogger(Logger):
def __init__(self, save_dir: pathlib.Path, filename: str):
super().__init__()
if not dist.is_initialized() or dist.get_rank() == 0:
save_dir.mkdir(parents=True, exist_ok=True)
dllogger.init(
backends=[dllogger.JSONStreamBackend(Verbosity.DEFAULT, str(save_dir / filename))])
@rank_zero_only
def log_hyperparams(self, params):
params = self._sanitize_params(params)
dllogger.log(step="PARAMETER", data=params)
@rank_zero_only
def log_metrics(self, metrics, step=None):
if step is None:
step = tuple()
dllogger.log(step=step, data=metrics)
class WandbLogger(Logger):
def __init__(
self,
name: str,
save_dir: pathlib.Path,
id: Optional[str] = None,
project: Optional[str] = None
):
super().__init__()
if not dist.is_initialized() or dist.get_rank() == 0:
save_dir.mkdir(parents=True, exist_ok=True)
self.experiment = wandb.init(name=name,
project=project,
id=id,
dir=str(save_dir),
resume='allow',
anonymous='must')
@rank_zero_only
def log_hyperparams(self, params: Dict[str, Any]) -> None:
params = self._sanitize_params(params)
self.experiment.config.update(params, allow_val_change=True)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
if step is not None:
self.experiment.log({**metrics, 'epoch': step})
else:
self.experiment.log(metrics)
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/loggers.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import argparse
import pathlib
from se3_transformer.data_loading import QM9DataModule
from se3_transformer.model import SE3TransformerPooled
from se3_transformer.runtime.utils import str2bool
PARSER = argparse.ArgumentParser(description='SE(3)-Transformer')
paths = PARSER.add_argument_group('Paths')
paths.add_argument('--data_dir', type=pathlib.Path, default=pathlib.Path('./data'),
help='Directory where the data is located or should be downloaded')
paths.add_argument('--log_dir', type=pathlib.Path, default=pathlib.Path('/results'),
help='Directory where the results logs should be saved')
paths.add_argument('--dllogger_name', type=str, default='dllogger_results.json',
help='Name for the resulting DLLogger JSON file')
paths.add_argument('--save_ckpt_path', type=pathlib.Path, default=None,
help='File where the checkpoint should be saved')
paths.add_argument('--load_ckpt_path', type=pathlib.Path, default=None,
help='File of the checkpoint to be loaded')
optimizer = PARSER.add_argument_group('Optimizer')
optimizer.add_argument('--optimizer', choices=['adam', 'sgd', 'lamb'], default='adam')
optimizer.add_argument('--learning_rate', '--lr', dest='learning_rate', type=float, default=0.002)
optimizer.add_argument('--min_learning_rate', '--min_lr', dest='min_learning_rate', type=float, default=None)
optimizer.add_argument('--momentum', type=float, default=0.9)
optimizer.add_argument('--weight_decay', type=float, default=0.1)
PARSER.add_argument('--epochs', type=int, default=100, help='Number of training epochs')
PARSER.add_argument('--batch_size', type=int, default=240, help='Batch size')
PARSER.add_argument('--seed', type=int, default=None, help='Set a seed globally')
PARSER.add_argument('--num_workers', type=int, default=8, help='Number of dataloading workers')
PARSER.add_argument('--amp', type=str2bool, nargs='?', const=True, default=False, help='Use Automatic Mixed Precision')
PARSER.add_argument('--gradient_clip', type=float, default=None, help='Clipping of the gradient norms')
PARSER.add_argument('--accumulate_grad_batches', type=int, default=1, help='Gradient accumulation')
PARSER.add_argument('--ckpt_interval', type=int, default=-1, help='Save a checkpoint every N epochs')
PARSER.add_argument('--eval_interval', dest='eval_interval', type=int, default=1,
help='Do an evaluation round every N epochs')
PARSER.add_argument('--silent', type=str2bool, nargs='?', const=True, default=False,
help='Minimize stdout output')
PARSER.add_argument('--benchmark', type=str2bool, nargs='?', const=True, default=False,
help='Benchmark mode')
QM9DataModule.add_argparse_args(PARSER)
SE3TransformerPooled.add_argparse_args(PARSER)
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/arguments.py |
RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import argparse
import ctypes
import logging
import os
import random
from functools import wraps
from typing import Union, List, Dict
import numpy as np
import torch
import torch.distributed as dist
from torch import Tensor
def aggregate_residual(feats1, feats2, method: str):
""" Add or concatenate two fiber features together. If degrees don't match, will use the ones of feats2. """
if method in ['add', 'sum']:
return {k: (v + feats1[k]) if k in feats1 else v for k, v in feats2.items()}
elif method in ['cat', 'concat']:
return {k: torch.cat([v, feats1[k]], dim=1) if k in feats1 else v for k, v in feats2.items()}
else:
raise ValueError('Method must be add/sum or cat/concat')
def degree_to_dim(degree: int) -> int:
return 2 * degree + 1
def unfuse_features(features: Tensor, degrees: List[int]) -> Dict[str, Tensor]:
return dict(zip(map(str, degrees), features.split([degree_to_dim(deg) for deg in degrees], dim=-1)))
def str2bool(v: Union[bool, str]) -> bool:
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def to_cuda(x):
""" Try to convert a Tensor, a collection of Tensors or a DGLGraph to CUDA """
if isinstance(x, Tensor):
return x.cuda(non_blocking=True)
elif isinstance(x, tuple):
return (to_cuda(v) for v in x)
elif isinstance(x, list):
return [to_cuda(v) for v in x]
elif isinstance(x, dict):
return {k: to_cuda(v) for k, v in x.items()}
else:
# DGLGraph or other objects
return x.to(device=torch.cuda.current_device())
def get_local_rank() -> int:
return int(os.environ.get('LOCAL_RANK', 0))
def init_distributed() -> bool:
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = world_size > 1
if distributed:
backend = 'nccl' if torch.cuda.is_available() else 'gloo'
dist.init_process_group(backend=backend, init_method='env://')
if backend == 'nccl':
torch.cuda.set_device(get_local_rank())
else:
logging.warning('Running on CPU only!')
assert torch.distributed.is_initialized()
return distributed
def increase_l2_fetch_granularity():
# maximum fetch granularity of L2: 128 bytes
_libcudart = ctypes.CDLL('libcudart.so')
# set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def seed_everything(seed):
seed = int(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def rank_zero_only(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
if not dist.is_initialized() or dist.get_rank() == 0:
return fn(*args, **kwargs)
return wrapped_fn
def using_tensor_cores(amp: bool) -> bool:
major_cc, minor_cc = torch.cuda.get_device_capability()
return (amp and major_cc >= 7) or major_cc >= 8
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
import time
from abc import ABC, abstractmethod
from typing import Optional
import numpy as np
import torch
from se3_transformer.runtime.loggers import Logger
from se3_transformer.runtime.metrics import MeanAbsoluteError
class BaseCallback(ABC):
def on_fit_start(self, optimizer, args):
pass
def on_fit_end(self):
pass
def on_epoch_end(self):
pass
def on_batch_start(self):
pass
def on_validation_step(self, input, target, pred):
pass
def on_validation_end(self, epoch=None):
pass
def on_checkpoint_load(self, checkpoint):
pass
def on_checkpoint_save(self, checkpoint):
pass
class LRSchedulerCallback(BaseCallback):
def __init__(self, logger: Optional[Logger] = None):
self.logger = logger
self.scheduler = None
@abstractmethod
def get_scheduler(self, optimizer, args):
pass
def on_fit_start(self, optimizer, args):
self.scheduler = self.get_scheduler(optimizer, args)
def on_checkpoint_load(self, checkpoint):
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
def on_checkpoint_save(self, checkpoint):
checkpoint['scheduler_state_dict'] = self.scheduler.state_dict()
def on_epoch_end(self):
if self.logger is not None:
self.logger.log_metrics({'learning rate': self.scheduler.get_last_lr()[0]}, step=self.scheduler.last_epoch)
self.scheduler.step()
class QM9MetricCallback(BaseCallback):
""" Logs the rescaled mean absolute error for QM9 regression tasks """
def __init__(self, logger, targets_std, prefix=''):
self.mae = MeanAbsoluteError()
self.logger = logger
self.targets_std = targets_std
self.prefix = prefix
self.best_mae = float('inf')
def on_validation_step(self, input, target, pred):
self.mae(pred.detach(), target.detach())
def on_validation_end(self, epoch=None):
mae = self.mae.compute() * self.targets_std
logging.info(f'{self.prefix} MAE: {mae}')
self.logger.log_metrics({f'{self.prefix} MAE': mae}, epoch)
self.best_mae = min(self.best_mae, mae)
def on_fit_end(self):
if self.best_mae != float('inf'):
self.logger.log_metrics({f'{self.prefix} best MAE': self.best_mae})
class QM9LRSchedulerCallback(LRSchedulerCallback):
def __init__(self, logger, epochs):
super().__init__(logger)
self.epochs = epochs
def get_scheduler(self, optimizer, args):
min_lr = args.min_learning_rate if args.min_learning_rate else args.learning_rate / 10.0
return torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, self.epochs, eta_min=min_lr)
class PerformanceCallback(BaseCallback):
def __init__(self, logger, batch_size: int, warmup_epochs: int = 1, mode: str = 'train'):
self.batch_size = batch_size
self.warmup_epochs = warmup_epochs
self.epoch = 0
self.timestamps = []
self.mode = mode
self.logger = logger
def on_batch_start(self):
if self.epoch >= self.warmup_epochs:
self.timestamps.append(time.time() * 1000.0)
def _log_perf(self):
stats = self.process_performance_stats()
for k, v in stats.items():
logging.info(f'performance {k}: {v}')
self.logger.log_metrics(stats)
def on_epoch_end(self):
self.epoch += 1
def on_fit_end(self):
if self.epoch > self.warmup_epochs:
self._log_perf()
self.timestamps = []
def process_performance_stats(self):
timestamps = np.asarray(self.timestamps)
deltas = np.diff(timestamps)
throughput = (self.batch_size / deltas).mean()
stats = {
f"throughput_{self.mode}": throughput,
f"latency_{self.mode}_mean": deltas.mean(),
f"total_time_{self.mode}": timestamps[-1] - timestamps[0],
}
for level in [90, 95, 99]:
stats.update({f"latency_{self.mode}_{level}": np.percentile(deltas, level)})
return stats
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/callbacks.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import List
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
from se3_transformer.runtime import gpu_affinity
from se3_transformer.runtime.arguments import PARSER
from se3_transformer.runtime.callbacks import BaseCallback
from se3_transformer.runtime.loggers import DLLogger
from se3_transformer.runtime.utils import to_cuda, get_local_rank
@torch.inference_mode()
def evaluate(model: nn.Module,
dataloader: DataLoader,
callbacks: List[BaseCallback],
args):
model.eval()
for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), unit='batch', desc=f'Evaluation',
leave=False, disable=(args.silent or get_local_rank() != 0)):
*input, target = to_cuda(batch)
for callback in callbacks:
callback.on_batch_start()
with torch.cuda.amp.autocast(enabled=args.amp):
pred = model(*input)
for callback in callbacks:
callback.on_validation_step(input, target, pred)
if __name__ == '__main__':
from se3_transformer.runtime.callbacks import QM9MetricCallback, PerformanceCallback
from se3_transformer.runtime.utils import init_distributed, seed_everything
from se3_transformer.model import SE3TransformerPooled, Fiber
from se3_transformer.data_loading import QM9DataModule
import torch.distributed as dist
import logging
import sys
is_distributed = init_distributed()
local_rank = get_local_rank()
args = PARSER.parse_args()
logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO)
logging.info('====== SE(3)-Transformer ======')
logging.info('| Inference on the test set |')
logging.info('===============================')
if not args.benchmark and args.load_ckpt_path is None:
logging.error('No load_ckpt_path provided, you need to provide a saved model to evaluate')
sys.exit(1)
if args.benchmark:
logging.info('Running benchmark mode with one warmup pass')
if args.seed is not None:
seed_everything(args.seed)
major_cc, minor_cc = torch.cuda.get_device_capability()
logger = DLLogger(args.log_dir, filename=args.dllogger_name)
datamodule = QM9DataModule(**vars(args))
model = SE3TransformerPooled(
fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}),
fiber_out=Fiber({0: args.num_degrees * args.num_channels}),
fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}),
output_dim=1,
tensor_cores=(args.amp and major_cc >= 7) or major_cc >= 8, # use Tensor Cores more effectively
**vars(args)
)
callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='test')]
model.to(device=torch.cuda.current_device())
if args.load_ckpt_path is not None:
checkpoint = torch.load(str(args.load_ckpt_path), map_location={'cuda:0': f'cuda:{local_rank}'})
model.load_state_dict(checkpoint['state_dict'])
if is_distributed:
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(local_rank, nproc_per_node)
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
test_dataloader = datamodule.test_dataloader() if not args.benchmark else datamodule.train_dataloader()
evaluate(model,
test_dataloader,
callbacks,
args)
for callback in callbacks:
callback.on_validation_end()
if args.benchmark:
world_size = dist.get_world_size() if dist.is_initialized() else 1
callbacks = [PerformanceCallback(logger, args.batch_size * world_size, warmup_epochs=1, mode='inference')]
for _ in range(6):
evaluate(model,
test_dataloader,
callbacks,
args)
callbacks[0].on_epoch_end()
callbacks[0].on_fit_end()
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/inference.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
import pathlib
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from apex.optimizers import FusedAdam, FusedLAMB
from torch.nn.modules.loss import _Loss
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from torch.utils.data import DataLoader, DistributedSampler
from tqdm import tqdm
from se3_transformer.data_loading import QM9DataModule
from se3_transformer.model import SE3TransformerPooled
from se3_transformer.model.fiber import Fiber
from se3_transformer.runtime import gpu_affinity
from se3_transformer.runtime.arguments import PARSER
from se3_transformer.runtime.callbacks import QM9MetricCallback, QM9LRSchedulerCallback, BaseCallback, \
PerformanceCallback
from se3_transformer.runtime.inference import evaluate
from se3_transformer.runtime.loggers import LoggerCollection, DLLogger, WandbLogger, Logger
from se3_transformer.runtime.utils import to_cuda, get_local_rank, init_distributed, seed_everything, \
using_tensor_cores, increase_l2_fetch_granularity
def save_state(model: nn.Module, optimizer: Optimizer, epoch: int, path: pathlib.Path, callbacks: List[BaseCallback]):
""" Saves model, optimizer and epoch states to path (only once per node) """
if get_local_rank() == 0:
state_dict = model.module.state_dict() if isinstance(model, DistributedDataParallel) else model.state_dict()
checkpoint = {
'state_dict': state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch
}
for callback in callbacks:
callback.on_checkpoint_save(checkpoint)
torch.save(checkpoint, str(path))
logging.info(f'Saved checkpoint to {str(path)}')
def load_state(model: nn.Module, optimizer: Optimizer, path: pathlib.Path, callbacks: List[BaseCallback]):
""" Loads model, optimizer and epoch states from path """
checkpoint = torch.load(str(path), map_location={'cuda:0': f'cuda:{get_local_rank()}'})
if isinstance(model, DistributedDataParallel):
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for callback in callbacks:
callback.on_checkpoint_load(checkpoint)
logging.info(f'Loaded checkpoint from {str(path)}')
return checkpoint['epoch']
def train_epoch(model, train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks, args):
losses = []
for i, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader), unit='batch',
desc=f'Epoch {epoch_idx}', disable=(args.silent or local_rank != 0)):
*inputs, target = to_cuda(batch)
for callback in callbacks:
callback.on_batch_start()
with torch.cuda.amp.autocast(enabled=args.amp):
pred = model(*inputs)
loss = loss_fn(pred, target) / args.accumulate_grad_batches
grad_scaler.scale(loss).backward()
# gradient accumulation
if (i + 1) % args.accumulate_grad_batches == 0 or (i + 1) == len(train_dataloader):
if args.gradient_clip:
grad_scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)
grad_scaler.step(optimizer)
grad_scaler.update()
optimizer.zero_grad()
losses.append(loss.item())
return np.mean(losses)
def train(model: nn.Module,
loss_fn: _Loss,
train_dataloader: DataLoader,
val_dataloader: DataLoader,
callbacks: List[BaseCallback],
logger: Logger,
args):
device = torch.cuda.current_device()
model.to(device=device)
local_rank = get_local_rank()
world_size = dist.get_world_size() if dist.is_initialized() else 1
if dist.is_initialized():
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
model.train()
grad_scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
if args.optimizer == 'adam':
optimizer = FusedAdam(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999),
weight_decay=args.weight_decay)
elif args.optimizer == 'lamb':
optimizer = FusedLAMB(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999),
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
epoch_start = load_state(model, optimizer, args.load_ckpt_path, callbacks) if args.load_ckpt_path else 0
for callback in callbacks:
callback.on_fit_start(optimizer, args)
for epoch_idx in range(epoch_start, args.epochs):
if isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch_idx)
loss = train_epoch(model, train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks, args)
if dist.is_initialized():
loss = torch.tensor(loss, dtype=torch.float, device=device)
torch.distributed.all_reduce(loss)
loss = (loss / world_size).item()
logging.info(f'Train loss: {loss}')
logger.log_metrics({'train loss': loss}, epoch_idx)
for callback in callbacks:
callback.on_epoch_end()
if not args.benchmark and args.save_ckpt_path is not None and args.ckpt_interval > 0 \
and (epoch_idx + 1) % args.ckpt_interval == 0:
save_state(model, optimizer, epoch_idx, args.save_ckpt_path, callbacks)
if not args.benchmark and args.eval_interval > 0 and (epoch_idx + 1) % args.eval_interval == 0:
evaluate(model, val_dataloader, callbacks, args)
model.train()
for callback in callbacks:
callback.on_validation_end(epoch_idx)
if args.save_ckpt_path is not None and not args.benchmark:
save_state(model, optimizer, args.epochs, args.save_ckpt_path, callbacks)
for callback in callbacks:
callback.on_fit_end()
def print_parameters_count(model):
num_params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
logging.info(f'Number of trainable parameters: {num_params_trainable}')
if __name__ == '__main__':
is_distributed = init_distributed()
local_rank = get_local_rank()
args = PARSER.parse_args()
logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO)
logging.info('====== SE(3)-Transformer ======')
logging.info('| Training procedure |')
logging.info('===============================')
if args.seed is not None:
logging.info(f'Using seed {args.seed}')
seed_everything(args.seed)
logger = LoggerCollection([
DLLogger(save_dir=args.log_dir, filename=args.dllogger_name),
WandbLogger(name=f'QM9({args.task})', save_dir=args.log_dir, project='se3-transformer')
])
datamodule = QM9DataModule(**vars(args))
model = SE3TransformerPooled(
fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}),
fiber_out=Fiber({0: args.num_degrees * args.num_channels}),
fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}),
output_dim=1,
tensor_cores=using_tensor_cores(args.amp), # use Tensor Cores more effectively
**vars(args)
)
loss_fn = nn.L1Loss()
if args.benchmark:
logging.info('Running benchmark mode')
world_size = dist.get_world_size() if dist.is_initialized() else 1
callbacks = [PerformanceCallback(logger, args.batch_size * world_size)]
else:
callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='validation'),
QM9LRSchedulerCallback(logger, epochs=args.epochs)]
if is_distributed:
gpu_affinity.set_affinity(gpu_id=get_local_rank(), nproc_per_node=torch.cuda.device_count())
print_parameters_count(model)
logger.log_hyperparams(vars(args))
increase_l2_fetch_granularity()
train(model,
loss_fn,
datamodule.train_dataloader(),
datamodule.val_dataloader(),
callbacks,
logger,
args)
logging.info('Training finished successfully')
| RFdiffusion-main | env/SE3Transformer/se3_transformer/runtime/training.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from functools import lru_cache
from typing import Dict, List
import e3nn.o3 as o3
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.runtime.utils import degree_to_dim
@lru_cache(maxsize=None)
def get_clebsch_gordon(J: int, d_in: int, d_out: int, device) -> Tensor:
""" Get the (cached) Q^{d_out,d_in}_J matrices from equation (8) """
return o3.wigner_3j(J, d_in, d_out, dtype=torch.float64, device=device).permute(2, 1, 0)
@lru_cache(maxsize=None)
def get_all_clebsch_gordon(max_degree: int, device) -> List[List[Tensor]]:
all_cb = []
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
K_Js = []
for J in range(abs(d_in - d_out), d_in + d_out + 1):
K_Js.append(get_clebsch_gordon(J, d_in, d_out, device))
all_cb.append(K_Js)
return all_cb
def get_spherical_harmonics(relative_pos: Tensor, max_degree: int) -> List[Tensor]:
all_degrees = list(range(2 * max_degree + 1))
with nvtx_range('spherical harmonics'):
sh = o3.spherical_harmonics(all_degrees, relative_pos, normalize=True)
return torch.split(sh, [degree_to_dim(d) for d in all_degrees], dim=1)
@torch.jit.script
def get_basis_script(max_degree: int,
use_pad_trick: bool,
spherical_harmonics: List[Tensor],
clebsch_gordon: List[List[Tensor]],
amp: bool) -> Dict[str, Tensor]:
"""
Compute pairwise bases matrices for degrees up to max_degree
:param max_degree: Maximum input or output degree
:param use_pad_trick: Pad some of the odd dimensions for a better use of Tensor Cores
:param spherical_harmonics: List of computed spherical harmonics
:param clebsch_gordon: List of computed CB-coefficients
:param amp: When true, return bases in FP16 precision
"""
basis = {}
idx = 0
# Double for loop instead of product() because of JIT script
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
key = f'{d_in},{d_out}'
K_Js = []
for freq_idx, J in enumerate(range(abs(d_in - d_out), d_in + d_out + 1)):
Q_J = clebsch_gordon[idx][freq_idx]
K_Js.append(torch.einsum('n f, k l f -> n l k', spherical_harmonics[J].float(), Q_J.float()))
basis[key] = torch.stack(K_Js, 2) # Stack on second dim so order is n l f k
if amp:
basis[key] = basis[key].half()
if use_pad_trick:
basis[key] = F.pad(basis[key], (0, 1)) # Pad the k dimension, that can be sliced later
idx += 1
return basis
@torch.jit.script
def update_basis_with_fused(basis: Dict[str, Tensor],
max_degree: int,
use_pad_trick: bool,
fully_fused: bool) -> Dict[str, Tensor]:
""" Update the basis dict with partially and optionally fully fused bases """
num_edges = basis['0,0'].shape[0]
device = basis['0,0'].device
dtype = basis['0,0'].dtype
sum_dim = sum([degree_to_dim(d) for d in range(max_degree + 1)])
# Fused per output degree
for d_out in range(max_degree + 1):
sum_freq = sum([degree_to_dim(min(d, d_out)) for d in range(max_degree + 1)])
basis_fused = torch.zeros(num_edges, sum_dim, sum_freq, degree_to_dim(d_out) + int(use_pad_trick),
device=device, dtype=dtype)
acc_d, acc_f = 0, 0
for d_in in range(max_degree + 1):
basis_fused[:, acc_d:acc_d + degree_to_dim(d_in), acc_f:acc_f + degree_to_dim(min(d_out, d_in)),
:degree_to_dim(d_out)] = basis[f'{d_in},{d_out}'][:, :, :, :degree_to_dim(d_out)]
acc_d += degree_to_dim(d_in)
acc_f += degree_to_dim(min(d_out, d_in))
basis[f'out{d_out}_fused'] = basis_fused
# Fused per input degree
for d_in in range(max_degree + 1):
sum_freq = sum([degree_to_dim(min(d, d_in)) for d in range(max_degree + 1)])
basis_fused = torch.zeros(num_edges, degree_to_dim(d_in), sum_freq, sum_dim,
device=device, dtype=dtype)
acc_d, acc_f = 0, 0
for d_out in range(max_degree + 1):
basis_fused[:, :, acc_f:acc_f + degree_to_dim(min(d_out, d_in)), acc_d:acc_d + degree_to_dim(d_out)] \
= basis[f'{d_in},{d_out}'][:, :, :, :degree_to_dim(d_out)]
acc_d += degree_to_dim(d_out)
acc_f += degree_to_dim(min(d_out, d_in))
basis[f'in{d_in}_fused'] = basis_fused
if fully_fused:
# Fully fused
# Double sum this way because of JIT script
sum_freq = sum([
sum([degree_to_dim(min(d_in, d_out)) for d_in in range(max_degree + 1)]) for d_out in range(max_degree + 1)
])
basis_fused = torch.zeros(num_edges, sum_dim, sum_freq, sum_dim, device=device, dtype=dtype)
acc_d, acc_f = 0, 0
for d_out in range(max_degree + 1):
b = basis[f'out{d_out}_fused']
basis_fused[:, :, acc_f:acc_f + b.shape[2], acc_d:acc_d + degree_to_dim(d_out)] = b[:, :, :,
:degree_to_dim(d_out)]
acc_f += b.shape[2]
acc_d += degree_to_dim(d_out)
basis['fully_fused'] = basis_fused
del basis['0,0'] # We know that the basis for l = k = 0 is filled with a constant
return basis
def get_basis(relative_pos: Tensor,
max_degree: int = 4,
compute_gradients: bool = False,
use_pad_trick: bool = False,
amp: bool = False) -> Dict[str, Tensor]:
with nvtx_range('spherical harmonics'):
spherical_harmonics = get_spherical_harmonics(relative_pos, max_degree)
with nvtx_range('CB coefficients'):
clebsch_gordon = get_all_clebsch_gordon(max_degree, relative_pos.device)
with torch.autograd.set_grad_enabled(compute_gradients):
with nvtx_range('bases'):
basis = get_basis_script(max_degree=max_degree,
use_pad_trick=use_pad_trick,
spherical_harmonics=spherical_harmonics,
clebsch_gordon=clebsch_gordon,
amp=amp)
return basis
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/basis.py |
from .transformer import SE3Transformer, SE3TransformerPooled
from .fiber import Fiber
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
from typing import Optional, Literal, Dict
import torch
import torch.nn as nn
from dgl import DGLGraph
from torch import Tensor
from se3_transformer.model.basis import get_basis, update_basis_with_fused
from se3_transformer.model.layers.attention import AttentionBlockSE3
from se3_transformer.model.layers.convolution import ConvSE3, ConvSE3FuseLevel
from se3_transformer.model.layers.norm import NormSE3
from se3_transformer.model.layers.pooling import GPooling
from se3_transformer.runtime.utils import str2bool
from se3_transformer.model.fiber import Fiber
class Sequential(nn.Sequential):
""" Sequential module with arbitrary forward args and kwargs. Used to pass graph, basis and edge features. """
def forward(self, input, *args, **kwargs):
for module in self:
input = module(input, *args, **kwargs)
return input
def get_populated_edge_features(relative_pos: Tensor, edge_features: Optional[Dict[str, Tensor]] = None):
""" Add relative positions to existing edge features """
edge_features = edge_features.copy() if edge_features else {}
r = relative_pos.norm(dim=-1, keepdim=True)
if '0' in edge_features:
edge_features['0'] = torch.cat([edge_features['0'], r[..., None]], dim=1)
else:
edge_features['0'] = r[..., None]
return edge_features
class SE3Transformer(nn.Module):
def __init__(self,
num_layers: int,
fiber_in: Fiber,
fiber_hidden: Fiber,
fiber_out: Fiber,
num_heads: int,
channels_div: int,
fiber_edge: Fiber = Fiber({}),
return_type: Optional[int] = None,
pooling: Optional[Literal['avg', 'max']] = None,
norm: bool = True,
use_layer_norm: bool = True,
tensor_cores: bool = False,
low_memory: bool = False,
**kwargs):
"""
:param num_layers: Number of attention layers
:param fiber_in: Input fiber description
:param fiber_hidden: Hidden fiber description
:param fiber_out: Output fiber description
:param fiber_edge: Input edge fiber description
:param num_heads: Number of attention heads
:param channels_div: Channels division before feeding to attention layer
:param return_type: Return only features of this type
:param pooling: 'avg' or 'max' graph pooling before MLP layers
:param norm: Apply a normalization layer after each attention block
:param use_layer_norm: Apply layer normalization between MLP layers
:param tensor_cores: True if using Tensor Cores (affects the use of fully fused convs, and padded bases)
:param low_memory: If True, will use slower ops that use less memory
"""
super().__init__()
self.num_layers = num_layers
self.fiber_edge = fiber_edge
self.num_heads = num_heads
self.channels_div = channels_div
self.return_type = return_type
self.pooling = pooling
self.max_degree = max(*fiber_in.degrees, *fiber_hidden.degrees, *fiber_out.degrees)
self.tensor_cores = tensor_cores
self.low_memory = low_memory
if low_memory and not tensor_cores:
logging.warning('Low memory mode will have no effect with no Tensor Cores')
# Fully fused convolutions when using Tensor Cores (and not low memory mode)
fuse_level = ConvSE3FuseLevel.FULL if tensor_cores and not low_memory else ConvSE3FuseLevel.PARTIAL
graph_modules = []
for i in range(num_layers):
graph_modules.append(AttentionBlockSE3(fiber_in=fiber_in,
fiber_out=fiber_hidden,
fiber_edge=fiber_edge,
num_heads=num_heads,
channels_div=channels_div,
use_layer_norm=use_layer_norm,
max_degree=self.max_degree,
fuse_level=fuse_level))
if norm:
graph_modules.append(NormSE3(fiber_hidden))
fiber_in = fiber_hidden
graph_modules.append(ConvSE3(fiber_in=fiber_in,
fiber_out=fiber_out,
fiber_edge=fiber_edge,
self_interaction=True,
use_layer_norm=use_layer_norm,
max_degree=self.max_degree))
self.graph_modules = Sequential(*graph_modules)
if pooling is not None:
assert return_type is not None, 'return_type must be specified when pooling'
self.pooling_module = GPooling(pool=pooling, feat_type=return_type)
def forward(self, graph: DGLGraph, node_feats: Dict[str, Tensor],
edge_feats: Optional[Dict[str, Tensor]] = None,
basis: Optional[Dict[str, Tensor]] = None):
# Compute bases in case they weren't precomputed as part of the data loading
basis = basis or get_basis(graph.edata['rel_pos'], max_degree=self.max_degree, compute_gradients=False,
use_pad_trick=self.tensor_cores and not self.low_memory,
amp=torch.is_autocast_enabled())
# Add fused bases (per output degree, per input degree, and fully fused) to the dict
basis = update_basis_with_fused(basis, self.max_degree, use_pad_trick=self.tensor_cores and not self.low_memory,
fully_fused=self.tensor_cores and not self.low_memory)
edge_feats = get_populated_edge_features(graph.edata['rel_pos'], edge_feats)
node_feats = self.graph_modules(node_feats, edge_feats, graph=graph, basis=basis)
if self.pooling is not None:
return self.pooling_module(node_feats, graph=graph)
if self.return_type is not None:
return node_feats[str(self.return_type)]
return node_feats
@staticmethod
def add_argparse_args(parser):
parser.add_argument('--num_layers', type=int, default=7,
help='Number of stacked Transformer layers')
parser.add_argument('--num_heads', type=int, default=8,
help='Number of heads in self-attention')
parser.add_argument('--channels_div', type=int, default=2,
help='Channels division before feeding to attention layer')
parser.add_argument('--pooling', type=str, default=None, const=None, nargs='?', choices=['max', 'avg'],
help='Type of graph pooling')
parser.add_argument('--norm', type=str2bool, nargs='?', const=True, default=False,
help='Apply a normalization layer after each attention block')
parser.add_argument('--use_layer_norm', type=str2bool, nargs='?', const=True, default=False,
help='Apply layer normalization between MLP layers')
parser.add_argument('--low_memory', type=str2bool, nargs='?', const=True, default=False,
help='If true, will use fused ops that are slower but that use less memory '
'(expect 25 percent less memory). '
'Only has an effect if AMP is enabled on Volta GPUs, or if running on Ampere GPUs')
return parser
class SE3TransformerPooled(nn.Module):
def __init__(self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Fiber,
num_degrees: int,
num_channels: int,
output_dim: int,
**kwargs):
super().__init__()
kwargs['pooling'] = kwargs['pooling'] or 'max'
self.transformer = SE3Transformer(
fiber_in=fiber_in,
fiber_hidden=Fiber.create(num_degrees, num_channels),
fiber_out=fiber_out,
fiber_edge=fiber_edge,
return_type=0,
**kwargs
)
n_out_features = fiber_out.num_features
self.mlp = nn.Sequential(
nn.Linear(n_out_features, n_out_features),
nn.ReLU(),
nn.Linear(n_out_features, output_dim)
)
def forward(self, graph, node_feats, edge_feats, basis=None):
feats = self.transformer(graph, node_feats, edge_feats, basis).squeeze(-1)
y = self.mlp(feats).squeeze(-1)
return y
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("Model architecture")
SE3Transformer.add_argparse_args(parser)
parser.add_argument('--num_degrees',
help='Number of degrees to use. Hidden features will have types [0, ..., num_degrees - 1]',
type=int, default=4)
parser.add_argument('--num_channels', help='Number of channels for the hidden features', type=int, default=32)
return parent_parser
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/transformer.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from collections import namedtuple
from itertools import product
from typing import Dict
import torch
from torch import Tensor
from se3_transformer.runtime.utils import degree_to_dim
FiberEl = namedtuple('FiberEl', ['degree', 'channels'])
class Fiber(dict):
"""
Describes the structure of some set of features.
Features are split into types (0, 1, 2, 3, ...). A feature of type k has a dimension of 2k+1.
Type-0 features: invariant scalars
Type-1 features: equivariant 3D vectors
Type-2 features: equivariant symmetric traceless matrices
...
As inputs to a SE3 layer, there can be many features of the same types, and many features of different types.
The 'multiplicity' or 'number of channels' is the number of features of a given type.
This class puts together all the degrees and their multiplicities in order to describe
the inputs, outputs or hidden features of SE3 layers.
"""
def __init__(self, structure):
if isinstance(structure, dict):
structure = [FiberEl(int(d), int(m)) for d, m in sorted(structure.items(), key=lambda x: x[1])]
elif not isinstance(structure[0], FiberEl):
structure = list(map(lambda t: FiberEl(*t), sorted(structure, key=lambda x: x[1])))
self.structure = structure
super().__init__({d: m for d, m in self.structure})
@property
def degrees(self):
return sorted([t.degree for t in self.structure])
@property
def channels(self):
return [self[d] for d in self.degrees]
@property
def num_features(self):
""" Size of the resulting tensor if all features were concatenated together """
return sum(t.channels * degree_to_dim(t.degree) for t in self.structure)
@staticmethod
def create(num_degrees: int, num_channels: int):
""" Create a Fiber with degrees 0..num_degrees-1, all with the same multiplicity """
return Fiber([(degree, num_channels) for degree in range(num_degrees)])
@staticmethod
def from_features(feats: Dict[str, Tensor]):
""" Infer the Fiber structure from a feature dict """
structure = {}
for k, v in feats.items():
degree = int(k)
assert len(v.shape) == 3, 'Feature shape should be (N, C, 2D+1)'
assert v.shape[-1] == degree_to_dim(degree)
structure[degree] = v.shape[-2]
return Fiber(structure)
def __getitem__(self, degree: int):
""" fiber[degree] returns the multiplicity for this degree """
return dict(self.structure).get(degree, 0)
def __iter__(self):
""" Iterate over namedtuples (degree, channels) """
return iter(self.structure)
def __mul__(self, other):
"""
If other in an int, multiplies all the multiplicities by other.
If other is a fiber, returns the cartesian product.
"""
if isinstance(other, Fiber):
return product(self.structure, other.structure)
elif isinstance(other, int):
return Fiber({t.degree: t.channels * other for t in self.structure})
def __add__(self, other):
"""
If other in an int, add other to all the multiplicities.
If other is a fiber, add the multiplicities of the fibers together.
"""
if isinstance(other, Fiber):
return Fiber({t.degree: t.channels + other[t.degree] for t in self.structure})
elif isinstance(other, int):
return Fiber({t.degree: t.channels + other for t in self.structure})
def __repr__(self):
return str(self.structure)
@staticmethod
def combine_max(f1, f2):
""" Combine two fiber by taking the maximum multiplicity for each degree in both fibers """
new_dict = dict(f1.structure)
for k, m in f2.structure:
new_dict[k] = max(new_dict.get(k, 0), m)
return Fiber(list(new_dict.items()))
@staticmethod
def combine_selectively(f1, f2):
""" Combine two fiber by taking the sum of multiplicities for each degree in the first fiber """
# only use orders which occur in fiber f1
new_dict = dict(f1.structure)
for k in f1.degrees:
if k in f2.degrees:
new_dict[k] += f2[k]
return Fiber(list(new_dict.items()))
def to_attention_heads(self, tensors: Dict[str, Tensor], num_heads: int):
# dict(N, num_channels, 2d+1) -> (N, num_heads, -1)
fibers = [tensors[str(degree)].reshape(*tensors[str(degree)].shape[:-2], num_heads, -1) for degree in
self.degrees]
fibers = torch.cat(fibers, -1)
return fibers
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/fiber.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import dgl
import numpy as np
import torch
import torch.nn as nn
from dgl import DGLGraph
from dgl.ops import edge_softmax
from torch import Tensor
from typing import Dict, Optional, Union
from se3_transformer.model.fiber import Fiber
from se3_transformer.model.layers.convolution import ConvSE3, ConvSE3FuseLevel
from se3_transformer.model.layers.linear import LinearSE3
from se3_transformer.runtime.utils import degree_to_dim, aggregate_residual, unfuse_features
from torch.cuda.nvtx import range as nvtx_range
class AttentionSE3(nn.Module):
""" Multi-headed sparse graph self-attention (SE(3)-equivariant) """
def __init__(
self,
num_heads: int,
key_fiber: Fiber,
value_fiber: Fiber
):
"""
:param num_heads: Number of attention heads
:param key_fiber: Fiber for the keys (and also for the queries)
:param value_fiber: Fiber for the values
"""
super().__init__()
self.num_heads = num_heads
self.key_fiber = key_fiber
self.value_fiber = value_fiber
def forward(
self,
value: Union[Tensor, Dict[str, Tensor]], # edge features (may be fused)
key: Union[Tensor, Dict[str, Tensor]], # edge features (may be fused)
query: Dict[str, Tensor], # node features
graph: DGLGraph
):
with nvtx_range('AttentionSE3'):
with nvtx_range('reshape keys and queries'):
if isinstance(key, Tensor):
# case where features of all types are fused
key = key.reshape(key.shape[0], self.num_heads, -1)
# need to reshape queries that way to keep the same layout as keys
out = torch.cat([query[str(d)] for d in self.key_fiber.degrees], dim=-1)
query = out.reshape(list(query.values())[0].shape[0], self.num_heads, -1)
else:
# features are not fused, need to fuse and reshape them
key = self.key_fiber.to_attention_heads(key, self.num_heads)
query = self.key_fiber.to_attention_heads(query, self.num_heads)
with nvtx_range('attention dot product + softmax'):
# Compute attention weights (softmax of inner product between key and query)
edge_weights = dgl.ops.e_dot_v(graph, key, query).squeeze(-1)
edge_weights /= np.sqrt(self.key_fiber.num_features)
edge_weights = edge_softmax(graph, edge_weights)
edge_weights = edge_weights[..., None, None]
with nvtx_range('weighted sum'):
if isinstance(value, Tensor):
# features of all types are fused
v = value.view(value.shape[0], self.num_heads, -1, value.shape[-1])
weights = edge_weights * v
feat_out = dgl.ops.copy_e_sum(graph, weights)
feat_out = feat_out.view(feat_out.shape[0], -1, feat_out.shape[-1]) # merge heads
out = unfuse_features(feat_out, self.value_fiber.degrees)
else:
out = {}
for degree, channels in self.value_fiber:
v = value[str(degree)].view(-1, self.num_heads, channels // self.num_heads,
degree_to_dim(degree))
weights = edge_weights * v
res = dgl.ops.copy_e_sum(graph, weights)
out[str(degree)] = res.view(-1, channels, degree_to_dim(degree)) # merge heads
return out
class AttentionBlockSE3(nn.Module):
""" Multi-headed sparse graph self-attention block with skip connection, linear projection (SE(3)-equivariant) """
def __init__(
self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Optional[Fiber] = None,
num_heads: int = 4,
channels_div: int = 2,
use_layer_norm: bool = False,
max_degree: bool = 4,
fuse_level: ConvSE3FuseLevel = ConvSE3FuseLevel.FULL,
**kwargs
):
"""
:param fiber_in: Fiber describing the input features
:param fiber_out: Fiber describing the output features
:param fiber_edge: Fiber describing the edge features (node distances excluded)
:param num_heads: Number of attention heads
:param channels_div: Divide the channels by this integer for computing values
:param use_layer_norm: Apply layer normalization between MLP layers
:param max_degree: Maximum degree used in the bases computation
:param fuse_level: Maximum fuse level to use in TFN convolutions
"""
super().__init__()
if fiber_edge is None:
fiber_edge = Fiber({})
self.fiber_in = fiber_in
# value_fiber has same structure as fiber_out but #channels divided by 'channels_div'
value_fiber = Fiber([(degree, channels // channels_div) for degree, channels in fiber_out])
# key_query_fiber has the same structure as fiber_out, but only degrees which are in in_fiber
# (queries are merely projected, hence degrees have to match input)
key_query_fiber = Fiber([(fe.degree, fe.channels) for fe in value_fiber if fe.degree in fiber_in.degrees])
self.to_key_value = ConvSE3(fiber_in, value_fiber + key_query_fiber, pool=False, fiber_edge=fiber_edge,
use_layer_norm=use_layer_norm, max_degree=max_degree, fuse_level=fuse_level,
allow_fused_output=True)
self.to_query = LinearSE3(fiber_in, key_query_fiber)
self.attention = AttentionSE3(num_heads, key_query_fiber, value_fiber)
self.project = LinearSE3(value_fiber + fiber_in, fiber_out)
def forward(
self,
node_features: Dict[str, Tensor],
edge_features: Dict[str, Tensor],
graph: DGLGraph,
basis: Dict[str, Tensor]
):
with nvtx_range('AttentionBlockSE3'):
with nvtx_range('keys / values'):
fused_key_value = self.to_key_value(node_features, edge_features, graph, basis)
key, value = self._get_key_value_from_fused(fused_key_value)
with nvtx_range('queries'):
query = self.to_query(node_features)
z = self.attention(value, key, query, graph)
z_concat = aggregate_residual(node_features, z, 'cat')
return self.project(z_concat)
def _get_key_value_from_fused(self, fused_key_value):
# Extract keys and queries features from fused features
if isinstance(fused_key_value, Tensor):
# Previous layer was a fully fused convolution
value, key = torch.chunk(fused_key_value, chunks=2, dim=-2)
else:
key, value = {}, {}
for degree, feat in fused_key_value.items():
if int(degree) in self.fiber_in.degrees:
value[degree], key[degree] = torch.chunk(feat, chunks=2, dim=-2)
else:
value[degree] = feat
return key, value
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/attention.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from enum import Enum
from itertools import product
from typing import Dict
import dgl
import numpy as np
import torch
import torch.nn as nn
from dgl import DGLGraph
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.model.fiber import Fiber
from se3_transformer.runtime.utils import degree_to_dim, unfuse_features
class ConvSE3FuseLevel(Enum):
"""
Enum to select a maximum level of fusing optimizations that will be applied when certain conditions are met.
If a desired level L is picked and the level L cannot be applied to a level, other fused ops < L are considered.
A higher level means faster training, but also more memory usage.
If you are tight on memory and want to feed large inputs to the network, choose a low value.
If you want to train fast, choose a high value.
Recommended value is FULL with AMP.
Fully fused TFN convolutions requirements:
- all input channels are the same
- all output channels are the same
- input degrees span the range [0, ..., max_degree]
- output degrees span the range [0, ..., max_degree]
Partially fused TFN convolutions requirements:
* For fusing by output degree:
- all input channels are the same
- input degrees span the range [0, ..., max_degree]
* For fusing by input degree:
- all output channels are the same
- output degrees span the range [0, ..., max_degree]
Original TFN pairwise convolutions: no requirements
"""
FULL = 2
PARTIAL = 1
NONE = 0
class RadialProfile(nn.Module):
"""
Radial profile function.
Outputs weights used to weigh basis matrices in order to get convolution kernels.
In TFN notation: $R^{l,k}$
In SE(3)-Transformer notation: $\phi^{l,k}$
Note:
In the original papers, this function only depends on relative node distances ||x||.
Here, we allow this function to also take as input additional invariant edge features.
This does not break equivariance and adds expressive power to the model.
Diagram:
invariant edge features (node distances included) ───> MLP layer (shared across edges) ───> radial weights
"""
def __init__(
self,
num_freq: int,
channels_in: int,
channels_out: int,
edge_dim: int = 1,
mid_dim: int = 32,
use_layer_norm: bool = False
):
"""
:param num_freq: Number of frequencies
:param channels_in: Number of input channels
:param channels_out: Number of output channels
:param edge_dim: Number of invariant edge features (input to the radial function)
:param mid_dim: Size of the hidden MLP layers
:param use_layer_norm: Apply layer normalization between MLP layers
"""
super().__init__()
modules = [
nn.Linear(edge_dim, mid_dim),
nn.LayerNorm(mid_dim) if use_layer_norm else None,
nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
nn.LayerNorm(mid_dim) if use_layer_norm else None,
nn.ReLU(),
nn.Linear(mid_dim, num_freq * channels_in * channels_out, bias=False)
]
self.net = nn.Sequential(*[m for m in modules if m is not None])
def forward(self, features: Tensor) -> Tensor:
return self.net(features)
class VersatileConvSE3(nn.Module):
"""
Building block for TFN convolutions.
This single module can be used for fully fused convolutions, partially fused convolutions, or pairwise convolutions.
"""
def __init__(self,
freq_sum: int,
channels_in: int,
channels_out: int,
edge_dim: int,
use_layer_norm: bool,
fuse_level: ConvSE3FuseLevel):
super().__init__()
self.freq_sum = freq_sum
self.channels_out = channels_out
self.channels_in = channels_in
self.fuse_level = fuse_level
self.radial_func = RadialProfile(num_freq=freq_sum,
channels_in=channels_in,
channels_out=channels_out,
edge_dim=edge_dim,
use_layer_norm=use_layer_norm)
def forward(self, features: Tensor, invariant_edge_feats: Tensor, basis: Tensor):
with nvtx_range(f'VersatileConvSE3'):
num_edges = features.shape[0]
in_dim = features.shape[2]
with nvtx_range(f'RadialProfile'):
radial_weights = self.radial_func(invariant_edge_feats) \
.view(-1, self.channels_out, self.channels_in * self.freq_sum)
if basis is not None:
# This block performs the einsum n i l, n o i f, n l f k -> n o k
out_dim = basis.shape[-1]
if self.fuse_level != ConvSE3FuseLevel.FULL:
out_dim += out_dim % 2 - 1 # Account for padded basis
basis_view = basis.view(num_edges, in_dim, -1)
tmp = (features @ basis_view).view(num_edges, -1, basis.shape[-1])
return (radial_weights @ tmp)[:, :, :out_dim]
else:
# k = l = 0 non-fused case
return radial_weights @ features
class ConvSE3(nn.Module):
"""
SE(3)-equivariant graph convolution (Tensor Field Network convolution).
This convolution can map an arbitrary input Fiber to an arbitrary output Fiber, while preserving equivariance.
Features of different degrees interact together to produce output features.
Note 1:
The option is given to not pool the output. This means that the convolution sum over neighbors will not be
done, and the returned features will be edge features instead of node features.
Note 2:
Unlike the original paper and implementation, this convolution can handle edge feature of degree greater than 0.
Input edge features are concatenated with input source node features before the kernel is applied.
"""
def __init__(
self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Fiber,
pool: bool = True,
use_layer_norm: bool = False,
self_interaction: bool = False,
max_degree: int = 4,
fuse_level: ConvSE3FuseLevel = ConvSE3FuseLevel.FULL,
allow_fused_output: bool = False
):
"""
:param fiber_in: Fiber describing the input features
:param fiber_out: Fiber describing the output features
:param fiber_edge: Fiber describing the edge features (node distances excluded)
:param pool: If True, compute final node features by averaging incoming edge features
:param use_layer_norm: Apply layer normalization between MLP layers
:param self_interaction: Apply self-interaction of nodes
:param max_degree: Maximum degree used in the bases computation
:param fuse_level: Maximum fuse level to use in TFN convolutions
:param allow_fused_output: Allow the module to output a fused representation of features
"""
super().__init__()
self.pool = pool
self.fiber_in = fiber_in
self.fiber_out = fiber_out
self.self_interaction = self_interaction
self.max_degree = max_degree
self.allow_fused_output = allow_fused_output
# channels_in: account for the concatenation of edge features
channels_in_set = set([f.channels + fiber_edge[f.degree] * (f.degree > 0) for f in self.fiber_in])
channels_out_set = set([f.channels for f in self.fiber_out])
unique_channels_in = (len(channels_in_set) == 1)
unique_channels_out = (len(channels_out_set) == 1)
degrees_up_to_max = list(range(max_degree + 1))
common_args = dict(edge_dim=fiber_edge[0] + 1, use_layer_norm=use_layer_norm)
if fuse_level.value >= ConvSE3FuseLevel.FULL.value and \
unique_channels_in and fiber_in.degrees == degrees_up_to_max and \
unique_channels_out and fiber_out.degrees == degrees_up_to_max:
# Single fused convolution
self.used_fuse_level = ConvSE3FuseLevel.FULL
sum_freq = sum([
degree_to_dim(min(d_in, d_out))
for d_in, d_out in product(degrees_up_to_max, degrees_up_to_max)
])
self.conv = VersatileConvSE3(sum_freq, list(channels_in_set)[0], list(channels_out_set)[0],
fuse_level=self.used_fuse_level, **common_args)
elif fuse_level.value >= ConvSE3FuseLevel.PARTIAL.value and \
unique_channels_in and fiber_in.degrees == degrees_up_to_max:
# Convolutions fused per output degree
self.used_fuse_level = ConvSE3FuseLevel.PARTIAL
self.conv_out = nn.ModuleDict()
for d_out, c_out in fiber_out:
sum_freq = sum([degree_to_dim(min(d_out, d)) for d in fiber_in.degrees])
self.conv_out[str(d_out)] = VersatileConvSE3(sum_freq, list(channels_in_set)[0], c_out,
fuse_level=self.used_fuse_level, **common_args)
elif fuse_level.value >= ConvSE3FuseLevel.PARTIAL.value and \
unique_channels_out and fiber_out.degrees == degrees_up_to_max:
# Convolutions fused per input degree
self.used_fuse_level = ConvSE3FuseLevel.PARTIAL
self.conv_in = nn.ModuleDict()
for d_in, c_in in fiber_in:
sum_freq = sum([degree_to_dim(min(d_in, d)) for d in fiber_out.degrees])
self.conv_in[str(d_in)] = VersatileConvSE3(sum_freq, c_in, list(channels_out_set)[0],
fuse_level=ConvSE3FuseLevel.FULL, **common_args)
#fuse_level=self.used_fuse_level, **common_args)
else:
# Use pairwise TFN convolutions
self.used_fuse_level = ConvSE3FuseLevel.NONE
self.conv = nn.ModuleDict()
for (degree_in, channels_in), (degree_out, channels_out) in (self.fiber_in * self.fiber_out):
dict_key = f'{degree_in},{degree_out}'
channels_in_new = channels_in + fiber_edge[degree_in] * (degree_in > 0)
sum_freq = degree_to_dim(min(degree_in, degree_out))
self.conv[dict_key] = VersatileConvSE3(sum_freq, channels_in_new, channels_out,
fuse_level=self.used_fuse_level, **common_args)
if self_interaction:
self.to_kernel_self = nn.ParameterDict()
for degree_out, channels_out in fiber_out:
if fiber_in[degree_out]:
self.to_kernel_self[str(degree_out)] = nn.Parameter(
torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))
def forward(
self,
node_feats: Dict[str, Tensor],
edge_feats: Dict[str, Tensor],
graph: DGLGraph,
basis: Dict[str, Tensor]
):
with nvtx_range(f'ConvSE3'):
invariant_edge_feats = edge_feats['0'].squeeze(-1)
src, dst = graph.edges()
out = {}
in_features = []
# Fetch all input features from edge and node features
for degree_in in self.fiber_in.degrees:
src_node_features = node_feats[str(degree_in)][src]
if degree_in > 0 and str(degree_in) in edge_feats:
# Handle edge features of any type by concatenating them to node features
src_node_features = torch.cat([src_node_features, edge_feats[str(degree_in)]], dim=1)
in_features.append(src_node_features)
if self.used_fuse_level == ConvSE3FuseLevel.FULL:
in_features_fused = torch.cat(in_features, dim=-1)
out = self.conv(in_features_fused, invariant_edge_feats, basis['fully_fused'])
if not self.allow_fused_output or self.self_interaction or self.pool:
out = unfuse_features(out, self.fiber_out.degrees)
elif self.used_fuse_level == ConvSE3FuseLevel.PARTIAL and hasattr(self, 'conv_out'):
in_features_fused = torch.cat(in_features, dim=-1)
for degree_out in self.fiber_out.degrees:
out[str(degree_out)] = self.conv_out[str(degree_out)](in_features_fused, invariant_edge_feats,
basis[f'out{degree_out}_fused'])
elif self.used_fuse_level == ConvSE3FuseLevel.PARTIAL and hasattr(self, 'conv_in'):
out = 0
for degree_in, feature in zip(self.fiber_in.degrees, in_features):
out += self.conv_in[str(degree_in)](feature, invariant_edge_feats,
basis[f'in{degree_in}_fused'])
if not self.allow_fused_output or self.self_interaction or self.pool:
out = unfuse_features(out, self.fiber_out.degrees)
else:
# Fallback to pairwise TFN convolutions
for degree_out in self.fiber_out.degrees:
out_feature = 0
for degree_in, feature in zip(self.fiber_in.degrees, in_features):
dict_key = f'{degree_in},{degree_out}'
out_feature = out_feature + self.conv[dict_key](feature, invariant_edge_feats,
basis.get(dict_key, None))
out[str(degree_out)] = out_feature
for degree_out in self.fiber_out.degrees:
if self.self_interaction and str(degree_out) in self.to_kernel_self:
with nvtx_range(f'self interaction'):
dst_features = node_feats[str(degree_out)][dst]
kernel_self = self.to_kernel_self[str(degree_out)]
out[str(degree_out)] += kernel_self @ dst_features
if self.pool:
with nvtx_range(f'pooling'):
if isinstance(out, dict):
out[str(degree_out)] = dgl.ops.copy_e_sum(graph, out[str(degree_out)])
else:
out = dgl.ops.copy_e_sum(graph, out)
return out
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/convolution.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from se3_transformer.model.fiber import Fiber
class LinearSE3(nn.Module):
"""
Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.
Maps a fiber to a fiber with the same degrees (channels may be different).
No interaction between degrees, but interaction between channels.
type-0 features (C_0 channels) ────> Linear(bias=False) ────> type-0 features (C'_0 channels)
type-1 features (C_1 channels) ────> Linear(bias=False) ────> type-1 features (C'_1 channels)
:
type-k features (C_k channels) ────> Linear(bias=False) ────> type-k features (C'_k channels)
"""
def __init__(self, fiber_in: Fiber, fiber_out: Fiber):
super().__init__()
self.weights = nn.ParameterDict({
str(degree_out): nn.Parameter(
torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))
for degree_out, channels_out in fiber_out
})
def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:
return {
degree: self.weights[degree] @ features[degree]
for degree, weight in self.weights.items()
}
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/linear.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict
import torch
import torch.nn as nn
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.model.fiber import Fiber
class NormSE3(nn.Module):
"""
Norm-based SE(3)-equivariant nonlinearity.
┌──> feature_norm ──> LayerNorm() ──> ReLU() ──┐
feature_in ──┤ * ──> feature_out
└──> feature_phase ────────────────────────────┘
"""
NORM_CLAMP = 2 ** -24 # Minimum positive subnormal for FP16
def __init__(self, fiber: Fiber, nonlinearity: nn.Module = nn.ReLU()):
super().__init__()
self.fiber = fiber
self.nonlinearity = nonlinearity
if len(set(fiber.channels)) == 1:
# Fuse all the layer normalizations into a group normalization
self.group_norm = nn.GroupNorm(num_groups=len(fiber.degrees), num_channels=sum(fiber.channels))
else:
# Use multiple layer normalizations
self.layer_norms = nn.ModuleDict({
str(degree): nn.LayerNorm(channels)
for degree, channels in fiber
})
def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:
with nvtx_range('NormSE3'):
output = {}
if hasattr(self, 'group_norm'):
# Compute per-degree norms of features
norms = [features[str(d)].norm(dim=-1, keepdim=True).clamp(min=self.NORM_CLAMP)
for d in self.fiber.degrees]
fused_norms = torch.cat(norms, dim=-2)
# Transform the norms only
new_norms = self.nonlinearity(self.group_norm(fused_norms.squeeze(-1))).unsqueeze(-1)
new_norms = torch.chunk(new_norms, chunks=len(self.fiber.degrees), dim=-2)
# Scale features to the new norms
for norm, new_norm, d in zip(norms, new_norms, self.fiber.degrees):
output[str(d)] = features[str(d)] / norm * new_norm
else:
for degree, feat in features.items():
norm = feat.norm(dim=-1, keepdim=True).clamp(min=self.NORM_CLAMP)
new_norm = self.nonlinearity(self.layer_norms[degree](norm.squeeze(-1)).unsqueeze(-1))
output[degree] = new_norm * feat / norm
return output
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/norm.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict, Literal
import torch.nn as nn
from dgl import DGLGraph
from dgl.nn.pytorch import AvgPooling, MaxPooling
from torch import Tensor
class GPooling(nn.Module):
"""
Graph max/average pooling on a given feature type.
The average can be taken for any feature type, and equivariance will be maintained.
The maximum can only be taken for invariant features (type 0).
If you want max-pooling for type > 0 features, look into Vector Neurons.
"""
def __init__(self, feat_type: int = 0, pool: Literal['max', 'avg'] = 'max'):
"""
:param feat_type: Feature type to pool
:param pool: Type of pooling: max or avg
"""
super().__init__()
assert pool in ['max', 'avg'], f'Unknown pooling: {pool}'
assert feat_type == 0 or pool == 'avg', 'Max pooling on type > 0 features will break equivariance'
self.feat_type = feat_type
self.pool = MaxPooling() if pool == 'max' else AvgPooling()
def forward(self, features: Dict[str, Tensor], graph: DGLGraph, **kwargs) -> Tensor:
pooled = self.pool(graph, features[str(self.feat_type)])
return pooled.squeeze(dim=-1)
| RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/pooling.py |
from .linear import LinearSE3
from .norm import NormSE3
from .pooling import GPooling
from .convolution import ConvSE3
from .attention import AttentionBlockSE3 | RFdiffusion-main | env/SE3Transformer/se3_transformer/model/layers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.