python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from egnn_pytorch.egnn_pytorch import EGNN_Network
torch.set_default_dtype(torch.float64)
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader, len_thres = 200):
while True:
for data in loader:
if data.seqs.shape[1] > len_thres:
continue
yield data
net = EGNN_Network(
num_tokens = 21,
num_positions = 200 * 3, # maximum number of positions - absolute positional embedding since there is inherent order in the sequence
depth = 5,
dim = 8,
num_nearest_neighbors = 16,
fourier_features = 2,
norm_coors = True,
coor_weights_clamp_value = 2.
).cuda()
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = BATCH_SIZE,
dynamic_batching = False
)
dl = cycle(data['train'])
optim = Adam(net.parameters(), lr=1e-3)
for _ in range(10000):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seqs, coords, masks = batch.seqs, batch.crds, batch.msks
seqs = seqs.cuda().argmax(dim = -1)
coords = coords.cuda().type(torch.float64)
masks = masks.cuda().bool()
l = seqs.shape[1]
coords = rearrange(coords, 'b (l s) c -> b l s c', s = 14)
# Keeping only the backbone coordinates
coords = coords[:, :, 0:3, :]
coords = rearrange(coords, 'b l s c -> b (l s) c')
seq = repeat(seqs, 'b n -> b (n c)', c = 3)
masks = repeat(masks, 'b n -> b (n c)', c = 3)
i = torch.arange(seq.shape[-1], device = seq.device)
adj_mat = (i[:, None] >= (i[None, :] - 1)) & (i[:, None] <= (i[None, :] + 1))
noised_coords = coords + torch.randn_like(coords)
feats, denoised_coords = net(seq, noised_coords, adj_mat = adj_mat, mask = masks)
loss = F.mse_loss(denoised_coords[masks], coords[masks])
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
| egnn-pytorch-main | denoise_sparse.py |
from setuptools import setup, find_packages
setup(
name = 'egnn-pytorch',
packages = find_packages(),
version = '0.2.6',
license='MIT',
description = 'E(n)-Equivariant Graph Neural Network - Pytorch',
author = 'Phil Wang, Eric Alcaide',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/egnn-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'equivariance',
'graph neural network'
],
install_requires=[
'einops>=0.3',
'numba',
'numpy',
'torch>=1.6'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| egnn-pytorch-main | setup.py |
import torch
from torch import nn, einsum, broadcast_tensors
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def safe_div(num, den, eps = 1e-8):
res = num.div(den.clamp(min = eps))
res.masked_fill_(den == 0, 0.)
return res
def batched_index_select(values, indices, dim = 1):
value_dims = values.shape[(dim + 1):]
values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices))
indices = indices[(..., *((None,) * len(value_dims)))]
indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims)
value_expand_len = len(indices_shape) - (dim + 1)
values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)]
value_expand_shape = [-1] * len(values.shape)
expand_slice = slice(dim, (dim + value_expand_len))
value_expand_shape[expand_slice] = indices.shape[expand_slice]
values = values.expand(*value_expand_shape)
dim += value_expand_len
return values.gather(dim, indices)
def fourier_encode_dist(x, num_encodings = 4, include_self = True):
x = x.unsqueeze(-1)
device, dtype, orig_x = x.device, x.dtype, x
scales = 2 ** torch.arange(num_encodings, device = device, dtype = dtype)
x = x / scales
x = torch.cat([x.sin(), x.cos()], dim=-1)
x = torch.cat((x, orig_x), dim = -1) if include_self else x
return x
def embedd_token(x, dims, layers):
stop_concat = -len(dims)
to_embedd = x[:, stop_concat:].long()
for i,emb_layer in enumerate(layers):
# the portion corresponding to `to_embedd` part gets dropped
x = torch.cat([ x[:, :stop_concat],
emb_layer( to_embedd[:, i] )
], dim=-1)
stop_concat = x.shape[-1]
return x
# swish activation fallback
class Swish_(nn.Module):
def forward(self, x):
return x * x.sigmoid()
SiLU = nn.SiLU if hasattr(nn, 'SiLU') else Swish_
# helper classes
# this follows the same strategy for normalization as done in SE3 Transformers
# https://github.com/lucidrains/se3-transformer-pytorch/blob/main/se3_transformer_pytorch/se3_transformer_pytorch.py#L95
class CoorsNorm(nn.Module):
def __init__(self, eps = 1e-8, scale_init = 1.):
super().__init__()
self.eps = eps
scale = torch.zeros(1).fill_(scale_init)
self.scale = nn.Parameter(scale)
def forward(self, coors):
norm = coors.norm(dim = -1, keepdim = True)
normed_coors = coors / norm.clamp(min = self.eps)
return normed_coors * self.scale
# global linear attention
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64):
super().__init__()
inner_dim = heads * dim_head
self.heads = heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, context, mask = None):
h = self.heads
q = self.to_q(x)
kv = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, *kv))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if exists(mask):
mask_value = -torch.finfo(dots.dtype).max
mask = rearrange(mask, 'b n -> b () () n')
dots.masked_fill_(~mask, mask_value)
attn = dots.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
class GlobalLinearAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64
):
super().__init__()
self.norm_seq = nn.LayerNorm(dim)
self.norm_queries = nn.LayerNorm(dim)
self.attn1 = Attention(dim, heads, dim_head)
self.attn2 = Attention(dim, heads, dim_head)
self.ff = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim)
)
def forward(self, x, queries, mask = None):
res_x, res_queries = x, queries
x, queries = self.norm_seq(x), self.norm_queries(queries)
induced = self.attn1(queries, x, mask = mask)
out = self.attn2(x, induced)
x = out + res_x
queries = induced + res_queries
x = self.ff(x) + x
return x, queries
# classes
class EGNN(nn.Module):
def __init__(
self,
dim,
edge_dim = 0,
m_dim = 16,
fourier_features = 0,
num_nearest_neighbors = 0,
dropout = 0.0,
init_eps = 1e-3,
norm_feats = False,
norm_coors = False,
norm_coors_scale_init = 1e-2,
update_feats = True,
update_coors = True,
only_sparse_neighbors = False,
valid_radius = float('inf'),
m_pool_method = 'sum',
soft_edges = False,
coor_weights_clamp_value = None
):
super().__init__()
assert m_pool_method in {'sum', 'mean'}, 'pool method must be either sum or mean'
assert update_feats or update_coors, 'you must update either features, coordinates, or both'
self.fourier_features = fourier_features
edge_input_dim = (fourier_features * 2) + (dim * 2) + edge_dim + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
SiLU(),
nn.Linear(edge_input_dim * 2, m_dim),
SiLU()
)
self.edge_gate = nn.Sequential(
nn.Linear(m_dim, 1),
nn.Sigmoid()
) if soft_edges else None
self.node_norm = nn.LayerNorm(dim) if norm_feats else nn.Identity()
self.coors_norm = CoorsNorm(scale_init = norm_coors_scale_init) if norm_coors else nn.Identity()
self.m_pool_method = m_pool_method
self.node_mlp = nn.Sequential(
nn.Linear(dim + m_dim, dim * 2),
dropout,
SiLU(),
nn.Linear(dim * 2, dim),
) if update_feats else None
self.coors_mlp = nn.Sequential(
nn.Linear(m_dim, m_dim * 4),
dropout,
SiLU(),
nn.Linear(m_dim * 4, 1)
) if update_coors else None
self.num_nearest_neighbors = num_nearest_neighbors
self.only_sparse_neighbors = only_sparse_neighbors
self.valid_radius = valid_radius
self.coor_weights_clamp_value = coor_weights_clamp_value
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
# seems to be needed to keep the network from exploding to NaN with greater depths
nn.init.normal_(module.weight, std = self.init_eps)
def forward(self, feats, coors, edges = None, mask = None, adj_mat = None):
b, n, d, device, fourier_features, num_nearest, valid_radius, only_sparse_neighbors = *feats.shape, feats.device, self.fourier_features, self.num_nearest_neighbors, self.valid_radius, self.only_sparse_neighbors
if exists(mask):
num_nodes = mask.sum(dim = -1)
use_nearest = num_nearest > 0 or only_sparse_neighbors
rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d')
rel_dist = (rel_coors ** 2).sum(dim = -1, keepdim = True)
i = j = n
if use_nearest:
ranking = rel_dist[..., 0].clone()
if exists(mask):
rank_mask = mask[:, :, None] * mask[:, None, :]
ranking.masked_fill_(~rank_mask, 1e5)
if exists(adj_mat):
if len(adj_mat.shape) == 2:
adj_mat = repeat(adj_mat.clone(), 'i j -> b i j', b = b)
if only_sparse_neighbors:
num_nearest = int(adj_mat.float().sum(dim = -1).max().item())
valid_radius = 0
self_mask = rearrange(torch.eye(n, device = device, dtype = torch.bool), 'i j -> () i j')
adj_mat = adj_mat.masked_fill(self_mask, False)
ranking.masked_fill_(self_mask, -1.)
ranking.masked_fill_(adj_mat, 0.)
nbhd_ranking, nbhd_indices = ranking.topk(num_nearest, dim = -1, largest = False)
nbhd_mask = nbhd_ranking <= valid_radius
rel_coors = batched_index_select(rel_coors, nbhd_indices, dim = 2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim = 2)
if exists(edges):
edges = batched_index_select(edges, nbhd_indices, dim = 2)
j = num_nearest
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings = fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(feats, nbhd_indices, dim = 1)
else:
feats_j = rearrange(feats, 'b j d -> b () j d')
feats_i = rearrange(feats, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim = -1)
if exists(edges):
edge_input = torch.cat((edge_input, edges), dim = -1)
m_ij = self.edge_mlp(edge_input)
if exists(self.edge_gate):
m_ij = m_ij * self.edge_gate(m_ij)
if exists(mask):
mask_i = rearrange(mask, 'b i -> b i ()')
if use_nearest:
mask_j = batched_index_select(mask, nbhd_indices, dim = 1)
mask = (mask_i * mask_j) & nbhd_mask
else:
mask_j = rearrange(mask, 'b j -> b () j')
mask = mask_i * mask_j
if exists(self.coors_mlp):
coor_weights = self.coors_mlp(m_ij)
coor_weights = rearrange(coor_weights, 'b i j () -> b i j')
rel_coors = self.coors_norm(rel_coors)
if exists(mask):
coor_weights.masked_fill_(~mask, 0.)
if exists(self.coor_weights_clamp_value):
clamp_value = self.coor_weights_clamp_value
coor_weights.clamp_(min = -clamp_value, max = clamp_value)
coors_out = einsum('b i j, b i j c -> b i c', coor_weights, rel_coors) + coors
else:
coors_out = coors
if exists(self.node_mlp):
if exists(mask):
m_ij_mask = rearrange(mask, '... -> ... ()')
m_ij = m_ij.masked_fill(~m_ij_mask, 0.)
if self.m_pool_method == 'mean':
if exists(mask):
# masked mean
mask_sum = m_ij_mask.sum(dim = -2)
m_i = safe_div(m_ij.sum(dim = -2), mask_sum)
else:
m_i = m_ij.mean(dim = -2)
elif self.m_pool_method == 'sum':
m_i = m_ij.sum(dim = -2)
normed_feats = self.node_norm(feats)
node_mlp_input = torch.cat((normed_feats, m_i), dim = -1)
node_out = self.node_mlp(node_mlp_input) + feats
else:
node_out = feats
return node_out, coors_out
class EGNN_Network(nn.Module):
def __init__(
self,
*,
depth,
dim,
num_tokens = None,
num_edge_tokens = None,
num_positions = None,
edge_dim = 0,
num_adj_degrees = None,
adj_dim = 0,
global_linear_attn_every = 0,
global_linear_attn_heads = 8,
global_linear_attn_dim_head = 64,
num_global_tokens = 4,
**kwargs
):
super().__init__()
assert not (exists(num_adj_degrees) and num_adj_degrees < 1), 'make sure adjacent degrees is greater than 1'
self.num_positions = num_positions
self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None
self.pos_emb = nn.Embedding(num_positions, dim) if exists(num_positions) else None
self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None
self.has_edges = edge_dim > 0
self.num_adj_degrees = num_adj_degrees
self.adj_emb = nn.Embedding(num_adj_degrees + 1, adj_dim) if exists(num_adj_degrees) and adj_dim > 0 else None
edge_dim = edge_dim if self.has_edges else 0
adj_dim = adj_dim if exists(num_adj_degrees) else 0
has_global_attn = global_linear_attn_every > 0
self.global_tokens = None
if has_global_attn:
self.global_tokens = nn.Parameter(torch.randn(num_global_tokens, dim))
self.layers = nn.ModuleList([])
for ind in range(depth):
is_global_layer = has_global_attn and (ind % global_linear_attn_every) == 0
self.layers.append(nn.ModuleList([
GlobalLinearAttention(dim = dim, heads = global_linear_attn_heads, dim_head = global_linear_attn_dim_head) if is_global_layer else None,
EGNN(dim = dim, edge_dim = (edge_dim + adj_dim), norm_feats = True, **kwargs),
]))
def forward(
self,
feats,
coors,
adj_mat = None,
edges = None,
mask = None,
return_coor_changes = False
):
b, device = feats.shape[0], feats.device
if exists(self.token_emb):
feats = self.token_emb(feats)
if exists(self.pos_emb):
n = feats.shape[1]
assert n <= self.num_positions, f'given sequence length {n} must be less than the number of positions {self.num_positions} set at init'
pos_emb = self.pos_emb(torch.arange(n, device = device))
feats += rearrange(pos_emb, 'n d -> () n d')
if exists(edges) and exists(self.edge_emb):
edges = self.edge_emb(edges)
# create N-degrees adjacent matrix from 1st degree connections
if exists(self.num_adj_degrees):
assert exists(adj_mat), 'adjacency matrix must be passed in (keyword argument adj_mat)'
if len(adj_mat.shape) == 2:
adj_mat = repeat(adj_mat.clone(), 'i j -> b i j', b = b)
adj_indices = adj_mat.clone().long()
for ind in range(self.num_adj_degrees - 1):
degree = ind + 2
next_degree_adj_mat = (adj_mat.float() @ adj_mat.float()) > 0
next_degree_mask = (next_degree_adj_mat.float() - adj_mat.float()).bool()
adj_indices.masked_fill_(next_degree_mask, degree)
adj_mat = next_degree_adj_mat.clone()
if exists(self.adj_emb):
adj_emb = self.adj_emb(adj_indices)
edges = torch.cat((edges, adj_emb), dim = -1) if exists(edges) else adj_emb
# setup global attention
global_tokens = None
if exists(self.global_tokens):
global_tokens = repeat(self.global_tokens, 'n d -> b n d', b = b)
# go through layers
coor_changes = [coors]
for global_attn, egnn in self.layers:
if exists(global_attn):
feats, global_tokens = global_attn(feats, global_tokens, mask = mask)
feats, coors = egnn(feats, coors, adj_mat = adj_mat, edges = edges, mask = mask)
coor_changes.append(coors)
if return_coor_changes:
return feats, coors, coor_changes
return feats, coors
| egnn-pytorch-main | egnn_pytorch/egnn_pytorch.py |
from egnn_pytorch.egnn_pytorch import EGNN, EGNN_Network
from egnn_pytorch.egnn_pytorch_geometric import EGNN_Sparse, EGNN_Sparse_Network
| egnn-pytorch-main | egnn_pytorch/__init__.py |
import torch
from torch import sin, cos, atan2, acos
def rot_z(gamma):
return torch.tensor([
[cos(gamma), -sin(gamma), 0],
[sin(gamma), cos(gamma), 0],
[0, 0, 1]
], dtype=gamma.dtype)
def rot_y(beta):
return torch.tensor([
[cos(beta), 0, sin(beta)],
[0, 1, 0],
[-sin(beta), 0, cos(beta)]
], dtype=beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
| egnn-pytorch-main | egnn_pytorch/utils.py |
import torch
from torch import nn, einsum, broadcast_tensors
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# types
from typing import Optional, List, Union
# pytorch geometric
try:
import torch_geometric
from torch_geometric.nn import MessagePassing
from torch_geometric.typing import Adj, Size, OptTensor, Tensor
except:
Tensor = OptTensor = Adj = MessagePassing = Size = object
PYG_AVAILABLE = False
# to stop throwing errors from type suggestions
Adj = object
Size = object
OptTensor = object
Tensor = object
from .egnn_pytorch import *
# global linear attention
class Attention_Sparse(Attention):
def __init__(self, **kwargs):
""" Wraps the attention class to operate with pytorch-geometric inputs. """
super(Attention_Sparse, self).__init__(**kwargs)
def sparse_forward(self, x, context, batch=None, batch_uniques=None, mask=None):
assert batch is not None or batch_uniques is not None, "Batch/(uniques) must be passed for block_sparse_attn"
if batch_uniques is None:
batch_uniques = torch.unique(batch, return_counts=True)
# only one example in batch - do dense - faster
if batch_uniques[0].shape[0] == 1:
x, context = map(lambda t: rearrange(t, 'h d -> () h d'), (x, context))
return self.forward(x, context, mask=None).squeeze() # get rid of batch dim
# multiple examples in batch - do block-sparse by dense loop
else:
x_list = []
aux_count = 0
for bi,n_idxs in zip(*batch_uniques):
x_list.append(
self.sparse_forward(
x[aux_count:aux_count+n_i],
context[aux_count:aux_count+n_idxs],
batch_uniques = (bi.unsqueeze(-1), n_idxs.unsqueeze(-1))
)
)
return torch.cat(x_list, dim=0)
class GlobalLinearAttention_Sparse(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64
):
super().__init__()
self.norm_seq = torch_geomtric.nn.norm.LayerNorm(dim)
self.norm_queries = torch_geomtric.nn.norm.LayerNorm(dim)
self.attn1 = Attention_Sparse(dim, heads, dim_head)
self.attn2 = Attention_Sparse(dim, heads, dim_head)
# can't concat pyg norms with torch sequentials
self.ff_norm = torch_geomtric.nn.norm.LayerNorm(dim)
self.ff = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim)
)
def forward(self, x, queries, batch=None, batch_uniques=None, mask = None):
res_x, res_queries = x, queries
x, queries = self.norm_seq(x, batch=batch), self.norm_queries(queries, batch=batch)
induced = self.attn1.sparse_forward(queries, x, batch=batch, batch_uniques=batch_uniques, mask = mask)
out = self.attn2.sparse_forward(x, induced, batch=batch, batch_uniques=batch_uniques)
x = out + res_x
queries = induced + res_queries
x_norm = self.ff_norm(x, batch=batch)
x = self.ff(x_norm) + x_norm
return x, queries
# define pytorch-geometric equivalents
class EGNN_Sparse(MessagePassing):
""" Different from the above since it separates the edge assignment
from the computation (this allows for great reduction in time and
computations when the graph is locally or sparse connected).
* aggr: one of ["add", "mean", "max"]
"""
def __init__(
self,
feats_dim,
pos_dim=3,
edge_attr_dim = 0,
m_dim = 16,
fourier_features = 0,
soft_edge = 0,
norm_feats = False,
norm_coors = False,
norm_coors_scale_init = 1e-2,
update_feats = True,
update_coors = True,
dropout = 0.,
coor_weights_clamp_value = None,
aggr = "add",
**kwargs
):
assert aggr in {'add', 'sum', 'max', 'mean'}, 'pool method must be a valid option'
assert update_feats or update_coors, 'you must update either features, coordinates, or both'
kwargs.setdefault('aggr', aggr)
super(EGNN_Sparse, self).__init__(**kwargs)
# model params
self.fourier_features = fourier_features
self.feats_dim = feats_dim
self.pos_dim = pos_dim
self.m_dim = m_dim
self.soft_edge = soft_edge
self.norm_feats = norm_feats
self.norm_coors = norm_coors
self.update_coors = update_coors
self.update_feats = update_feats
self.coor_weights_clamp_value = None
self.edge_input_dim = (fourier_features * 2) + edge_attr_dim + 1 + (feats_dim * 2)
self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
# EDGES
self.edge_mlp = nn.Sequential(
nn.Linear(self.edge_input_dim, self.edge_input_dim * 2),
self.dropout,
SiLU(),
nn.Linear(self.edge_input_dim * 2, m_dim),
SiLU()
)
self.edge_weight = nn.Sequential(nn.Linear(m_dim, 1),
nn.Sigmoid()
) if soft_edge else None
# NODES - can't do identity in node_norm bc pyg expects 2 inputs, but identity expects 1.
self.node_norm = torch_geometric.nn.norm.LayerNorm(feats_dim) if norm_feats else None
self.coors_norm = CoorsNorm(scale_init = norm_coors_scale_init) if norm_coors else nn.Identity()
self.node_mlp = nn.Sequential(
nn.Linear(feats_dim + m_dim, feats_dim * 2),
self.dropout,
SiLU(),
nn.Linear(feats_dim * 2, feats_dim),
) if update_feats else None
# COORS
self.coors_mlp = nn.Sequential(
nn.Linear(m_dim, m_dim * 4),
self.dropout,
SiLU(),
nn.Linear(self.m_dim * 4, 1)
) if update_coors else None
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
# seems to be needed to keep the network from exploding to NaN with greater depths
nn.init.xavier_normal_(module.weight)
nn.init.zeros_(module.bias)
def forward(self, x: Tensor, edge_index: Adj,
edge_attr: OptTensor = None, batch: Adj = None,
angle_data: List = None, size: Size = None) -> Tensor:
""" Inputs:
* x: (n_points, d) where d is pos_dims + feat_dims
* edge_index: (2, n_edges)
* edge_attr: tensor (n_edges, n_feats) excluding basic distance feats.
* batch: (n_points,) long tensor. specifies xloud belonging for each point
* angle_data: list of tensors (levels, n_edges_i, n_length_path) long tensor.
* size: None
"""
coors, feats = x[:, :self.pos_dim], x[:, self.pos_dim:]
rel_coors = coors[edge_index[0]] - coors[edge_index[1]]
rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True)
if self.fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings = self.fourier_features)
rel_dist = rearrange(rel_dist, 'n () d -> n d')
if exists(edge_attr):
edge_attr_feats = torch.cat([edge_attr, rel_dist], dim=-1)
else:
edge_attr_feats = rel_dist
hidden_out, coors_out = self.propagate(edge_index, x=feats, edge_attr=edge_attr_feats,
coors=coors, rel_coors=rel_coors,
batch=batch)
return torch.cat([coors_out, hidden_out], dim=-1)
def message(self, x_i, x_j, edge_attr) -> Tensor:
m_ij = self.edge_mlp( torch.cat([x_i, x_j, edge_attr], dim=-1) )
return m_ij
def propagate(self, edge_index: Adj, size: Size = None, **kwargs):
"""The initial call to start propagating messages.
Args:
`edge_index` holds the indices of a general (sparse)
assignment matrix of shape :obj:`[N, M]`.
size (tuple, optional) if none, the size will be inferred
and assumed to be quadratic.
**kwargs: Any additional data which is needed to construct and
aggregate messages, and to update node embeddings.
"""
size = self.__check_input__(edge_index, size)
coll_dict = self.__collect__(self.__user_args__,
edge_index, size, kwargs)
msg_kwargs = self.inspector.distribute('message', coll_dict)
aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
update_kwargs = self.inspector.distribute('update', coll_dict)
# get messages
m_ij = self.message(**msg_kwargs)
# update coors if specified
if self.update_coors:
coor_wij = self.coors_mlp(m_ij)
# clamp if arg is set
if self.coor_weights_clamp_value:
coor_weights_clamp_value = self.coor_weights_clamp_value
coor_weights.clamp_(min = -clamp_value, max = clamp_value)
# normalize if needed
kwargs["rel_coors"] = self.coors_norm(kwargs["rel_coors"])
mhat_i = self.aggregate(coor_wij * kwargs["rel_coors"], **aggr_kwargs)
coors_out = kwargs["coors"] + mhat_i
else:
coors_out = kwargs["coors"]
# update feats if specified
if self.update_feats:
# weight the edges if arg is passed
if self.soft_edge:
m_ij = m_ij * self.edge_weight(m_ij)
m_i = self.aggregate(m_ij, **aggr_kwargs)
hidden_feats = self.node_norm(kwargs["x"], kwargs["batch"]) if self.node_norm else kwargs["x"]
hidden_out = self.node_mlp( torch.cat([hidden_feats, m_i], dim = -1) )
hidden_out = kwargs["x"] + hidden_out
else:
hidden_out = kwargs["x"]
# return tuple
return self.update((hidden_out, coors_out), **update_kwargs)
def __repr__(self):
dict_print = {}
return "E(n)-GNN Layer for Graphs " + str(self.__dict__)
class EGNN_Sparse_Network(nn.Module):
r"""Sample GNN model architecture that uses the EGNN-Sparse
message passing layer to learn over point clouds.
Main MPNN layer introduced in https://arxiv.org/abs/2102.09844v1
Inputs will be standard GNN: x, edge_index, edge_attr, batch, ...
Args:
* n_layers: int. number of MPNN layers
* ... : same interpretation as the base layer.
* embedding_nums: list. number of unique keys to embedd. for points
1 entry per embedding needed.
* embedding_dims: list. point - number of dimensions of
the resulting embedding. 1 entry per embedding needed.
* edge_embedding_nums: list. number of unique keys to embedd. for edges.
1 entry per embedding needed.
* edge_embedding_dims: list. point - number of dimensions of
the resulting embedding. 1 entry per embedding needed.
* recalc: int. Recalculate edge feats every `recalc` MPNN layers. 0 for no recalc
* verbose: bool. verbosity level.
-----
Diff with normal layer: one has to do preprocessing before (radius, global token, ...)
"""
def __init__(self, n_layers, feats_dim,
pos_dim = 3,
edge_attr_dim = 0,
m_dim = 16,
fourier_features = 0,
soft_edge = 0,
embedding_nums=[],
embedding_dims=[],
edge_embedding_nums=[],
edge_embedding_dims=[],
update_coors=True,
update_feats=True,
norm_feats=True,
norm_coors=False,
norm_coors_scale_init = 1e-2,
dropout=0.,
coor_weights_clamp_value=None,
aggr="add",
global_linear_attn_every = 0,
global_linear_attn_heads = 8,
global_linear_attn_dim_head = 64,
num_global_tokens = 4,
recalc=0 ,):
super().__init__()
self.n_layers = n_layers
# Embeddings? solve here
self.embedding_nums = embedding_nums
self.embedding_dims = embedding_dims
self.emb_layers = nn.ModuleList()
self.edge_embedding_nums = edge_embedding_nums
self.edge_embedding_dims = edge_embedding_dims
self.edge_emb_layers = nn.ModuleList()
# instantiate point and edge embedding layers
for i in range( len(self.embedding_dims) ):
self.emb_layers.append(nn.Embedding(num_embeddings = embedding_nums[i],
embedding_dim = embedding_dims[i]))
feats_dim += embedding_dims[i] - 1
for i in range( len(self.edge_embedding_dims) ):
self.edge_emb_layers.append(nn.Embedding(num_embeddings = edge_embedding_nums[i],
embedding_dim = edge_embedding_dims[i]))
edge_attr_dim += edge_embedding_dims[i] - 1
# rest
self.mpnn_layers = nn.ModuleList()
self.feats_dim = feats_dim
self.pos_dim = pos_dim
self.edge_attr_dim = edge_attr_dim
self.m_dim = m_dim
self.fourier_features = fourier_features
self.soft_edge = soft_edge
self.norm_feats = norm_feats
self.norm_coors = norm_coors
self.norm_coors_scale_init = norm_coors_scale_init
self.update_feats = update_feats
self.update_coors = update_coors
self.dropout = dropout
self.coor_weights_clamp_value = coor_weights_clamp_value
self.recalc = recalc
self.has_global_attn = global_linear_attn_every > 0
self.global_tokens = None
self.global_linear_attn_every = global_linear_attn_every
if self.has_global_attn:
self.global_tokens = nn.Parameter(torch.randn(num_global_tokens, dim))
# instantiate layers
for i in range(n_layers):
layer = EGNN_Sparse(feats_dim = feats_dim,
pos_dim = pos_dim,
edge_attr_dim = edge_attr_dim,
m_dim = m_dim,
fourier_features = fourier_features,
soft_edge = soft_edge,
norm_feats = norm_feats,
norm_coors = norm_coors,
norm_coors_scale_init = norm_coors_scale_init,
update_feats = update_feats,
update_coors = update_coors,
dropout = dropout,
coor_weights_clamp_value = coor_weights_clamp_value)
# global attention case
is_global_layer = self.has_global_attn and (i % self.global_linear_attn_every) == 0
if is_global_layer:
attn_layer = GlobalLinearAttention(dim = self.feats_dim,
heads = global_linear_attn_heads,
dim_head = global_linear_attn_dim_head)
self.mpnn_layers.append(nn.ModuleList([layer, attn_layer]))
# normal case
else:
self.mpnn_layers.append(layer)
def forward(self, x, edge_index, batch, edge_attr,
bsize=None, recalc_edge=None, verbose=0):
""" Recalculate edge features every `self.recalc_edge` with the
`recalc_edge` function if self.recalc_edge is set.
* x: (N, pos_dim+feats_dim) will be unpacked into coors, feats.
"""
# NODES - Embedd each dim to its target dimensions:
x = embedd_token(x, self.embedding_dims, self.emb_layers)
# regulates wether to embedd edges each layer
edges_need_embedding = True
for i,layer in enumerate(self.mpnn_layers):
# EDGES - Embedd each dim to its target dimensions:
if edges_need_embedding:
edge_attr = embedd_token(edge_attr, self.edge_embedding_dims, self.edge_emb_layers)
edges_need_embedding = False
# attn tokens
global_tokens = None
if exists(self.global_tokens):
unique, amounts = torch.unique(batch, return_counts)
num_idxs = torch.cat([torch.arange(num_idxs_i) for num_idxs_i in amounts], dim=-1)
global_tokens = self.global_tokens[num_idxs]
# pass layers
is_global_layer = self.has_global_attn and (i % self.global_linear_attn_every) == 0
if not is_global_layer:
x = layer(x, edge_index, edge_attr, batch=batch, size=bsize)
else:
# only pass feats to the attn layer
x_attn = layer[0](x[:, self.pos_dim:], global_tokens)
# merge attn-ed feats and coords
x = torch.cat( (x[:, :self.pos_dim], x_attn), dim=-1)
x = layer[-1](x, edge_index, edge_attr, batch=batch, size=bsize)
# recalculate edge info - not needed if last layer
if self.recalc and ((i%self.recalc == 0) and not (i == len(self.mpnn_layers)-1)) :
edge_index, edge_attr, _ = recalc_edge(x) # returns attr, idx, any_other_info
edges_need_embedding = True
return x
def __repr__(self):
return 'EGNN_Sparse_Network of: {0} layers'.format(len(self.mpnn_layers)) | egnn-pytorch-main | egnn_pytorch/egnn_pytorch_geometric.py |
import torch
from egnn_pytorch import EGNN, EGNN_Sparse
from egnn_pytorch.utils import rot
torch.set_default_dtype(torch.float64)
def test_egnn_equivariance():
layer = EGNN(dim=512, edge_dim=4)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 3)
edges = torch.randn(1, 16, 16, 4)
mask = torch.ones(1, 16).bool()
# Cache first two nodes' features
node1 = feats[:, 0, :]
node2 = feats[:, 1, :]
# Switch first and second nodes' positions
feats_permuted_row_wise = feats.clone().detach()
feats_permuted_row_wise[:, 0, :] = node2
feats_permuted_row_wise[:, 1, :] = node1
feats1, coors1 = layer(feats, coors @ R + T, edges, mask=mask)
feats2, coors2 = layer(feats, coors, edges, mask=mask)
feats3, coors3 = layer(feats_permuted_row_wise, coors, edges, mask=mask)
assert torch.allclose(feats1, feats2, atol=1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol=1e-6), 'type 1 features are equivariant'
assert not torch.allclose(feats1, feats3, atol=1e-6), 'layer must be equivariant to permutations of node order'
def test_higher_dimension():
layer = EGNN(dim=512, edge_dim=4)
feats = torch.randn(1, 16, 512)
coors = torch.randn(1, 16, 5)
edges = torch.randn(1, 16, 16, 4)
mask = torch.ones(1, 16).bool()
feats, coors = layer(feats, coors, edges, mask = mask)
assert True
def test_egnn_equivariance_with_nearest_neighbors():
layer = EGNN(dim=512, edge_dim=1, num_nearest_neighbors=8)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 256, 512)
coors = torch.randn(1, 256, 3)
edges = torch.randn(1, 256, 256, 1)
mask = torch.ones(1, 256).bool()
# Cache first two nodes' features
node1 = feats[:, 0, :]
node2 = feats[:, 1, :]
# Switch first and second nodes' positions
feats_permuted_row_wise = feats.clone().detach()
feats_permuted_row_wise[:, 0, :] = node2
feats_permuted_row_wise[:, 1, :] = node1
feats1, coors1 = layer(feats, coors @ R + T, edges, mask=mask)
feats2, coors2 = layer(feats, coors, edges, mask=mask)
feats3, coors3 = layer(feats_permuted_row_wise, coors, edges, mask=mask)
assert torch.allclose(feats1, feats2, atol=1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol=1e-6), 'type 1 features are equivariant'
assert not torch.allclose(feats1, feats3, atol=1e-6), 'layer must be equivariant to permutations of node order'
def test_egnn_equivariance_with_coord_norm():
layer = EGNN(dim=512, edge_dim=1, num_nearest_neighbors=8, norm_coors=True)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
feats = torch.randn(1, 256, 512)
coors = torch.randn(1, 256, 3)
edges = torch.randn(1, 256, 256, 1)
mask = torch.ones(1, 256).bool()
# Cache first two nodes' features
node1 = feats[:, 0, :]
node2 = feats[:, 1, :]
# Switch first and second nodes' positions
feats_permuted_row_wise = feats.clone().detach()
feats_permuted_row_wise[:, 0, :] = node2
feats_permuted_row_wise[:, 1, :] = node1
feats1, coors1 = layer(feats, coors @ R + T, edges, mask=mask)
feats2, coors2 = layer(feats, coors, edges, mask=mask)
feats3, coors3 = layer(feats_permuted_row_wise, coors, edges, mask=mask)
assert torch.allclose(feats1, feats2, atol=1e-6), 'type 0 features are invariant'
assert torch.allclose(coors1, (coors2 @ R + T), atol=1e-6), 'type 1 features are equivariant'
assert not torch.allclose(feats1, feats3, atol=1e-6), 'layer must be equivariant to permutations of node order'
def test_egnn_sparse_equivariance():
layer = EGNN_Sparse(feats_dim=1,
m_dim=16,
fourier_features=4)
R = rot(*torch.rand(3))
T = torch.randn(1, 1, 3)
apply_action = lambda t: (t @ R + T).squeeze()
feats = torch.randn(16, 1)
coors = torch.randn(16, 3)
edge_idxs = (torch.rand(2, 20) * 16).long()
# Cache first two nodes' features
node1 = feats[0, :]
node2 = feats[1, :]
# Switch first and second nodes' positions
feats_permuted_row_wise = feats.clone().detach()
feats_permuted_row_wise[0, :] = node2
feats_permuted_row_wise[1, :] = node1
x1 = torch.cat([coors, feats], dim=-1)
x2 = torch.cat([apply_action(coors), feats], dim=-1)
x3 = torch.cat([apply_action(coors), feats_permuted_row_wise], dim=-1)
out1 = layer(x=x1, edge_index=edge_idxs)
out2 = layer(x=x2, edge_index=edge_idxs)
out3 = layer(x=x3, edge_index=edge_idxs)
feats1, coors1 = out1[:, 3:], out1[:, :3]
feats2, coors2 = out2[:, 3:], out2[:, :3]
feats3, coors3 = out3[:, 3:], out3[:, :3]
print(feats1 - feats2)
print(apply_action(coors1) - coors2)
assert torch.allclose(feats1, feats2), 'features must be invariant'
assert torch.allclose(apply_action(coors1), coors2), 'coordinates must be equivariant'
assert not torch.allclose(feats1, feats3, atol=1e-6), 'layer must be equivariant to permutations of node order'
def test_geom_equivalence():
layer = EGNN_Sparse(feats_dim=128,
edge_attr_dim=4,
m_dim=16,
fourier_features=4)
feats = torch.randn(16, 128)
coors = torch.randn(16, 3)
x = torch.cat([coors, feats], dim=-1)
edge_idxs = (torch.rand(2, 20) * 16).long()
edges_attrs = torch.randn(16, 16, 4)
edges_attrs = edges_attrs[edge_idxs[0], edge_idxs[1]]
assert layer.forward(x, edge_idxs, edge_attr=edges_attrs).shape == x.shape
| egnn-pytorch-main | tests/test_equivariance.py |
from setuptools import setup, find_packages
setup(
name = 'token-shift-gpt',
packages = find_packages(),
version = '0.0.3',
license='MIT',
description = 'Token Shift GPT - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/token-shift-gpt',
keywords = [
'artificial intelligence',
'deep learning',
'autoregressive language modeling'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| token-shift-gpt-main | setup.py |
from token_shift_gpt import TokenShiftGPT
from token_shift_gpt.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 768
SEQ_LEN = 768
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = TokenShiftGPT(
num_tokens = 256,
max_seq_len = SEQ_LEN,
dim = 512,
depth = 8,
ff_mult = 8
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i is not 0 and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| token-shift-gpt-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| token-shift-gpt-main | token_shift_gpt/autoregressive_wrapper.py |
from token_shift_gpt.token_shift_gpt import TokenShiftGPT
| token-shift-gpt-main | token_shift_gpt/__init__.py |
from math import log2, ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def shift(x, amt, dim = -1):
return F.pad(x, (*((0, 0) * (-dim - 1)), amt, -amt), value = 0.)
def shift_tokens(x, amt, eps = 1e-5):
n, device = x.shape[1], x.device
cumsum = x.cumsum(dim = 1)
*x, x_pass = x.chunk(amt + 1, dim = -1)
*x_cumsum, _ = cumsum.chunk(amt + 1, dim = -1)
amts = 2 ** torch.arange(amt)
amts = amts.tolist()
shifts = []
denom = torch.arange(n, device = device)
for x_chunk, x_cumsum_chunk, amt in zip(x, x_cumsum, amts):
shifted_chunk = shift(x_cumsum_chunk, amt, dim = -2) - shift(x_cumsum_chunk, 2 * amt, dim = -2)
shifted_denom = shift(denom, amt, dim = -1) - shift(denom, 2 * amt, dim = -1)
shifted_denom = rearrange(shifted_denom, 'n -> () n ()')
normed_shifted_x = shifted_chunk / (shifted_denom + eps)
shifts.append(normed_shifted_x)
return torch.cat((*shifts, x_pass), dim = -1)
def discounted_cumsum(t, gamma):
try:
from torch_discounted_cumsum import discounted_cumsum_left
except ImportError:
print('unable to import torch_discounted_cumsum - please run `pip install torch-discounted-cumsum`')
b, n, d = t.shape
t = rearrange(t, 'b n d -> (b d) n')
t = discounted_cumsum_left(t, gamma)
t = rearrange(t, '(b d) n -> b n d', b = b)
return t
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class FeedForward(nn.Module):
def __init__(
self,
*,
dim,
max_seq_len,
num_shifts,
mult = 4,
eps = 1e-3,
use_discounted_cumsum = False,
discount_gamma = 0.9
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.project_in = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU()
)
self.num_shifts = num_shifts
hidden_dim = dim * mult // 2
self.gate_norm = nn.LayerNorm(hidden_dim)
self.to_gate = nn.Linear(hidden_dim, hidden_dim)
nn.init.constant_(self.to_gate.weight, eps)
nn.init.constant_(self.to_gate.bias, 1.)
self.project_out = nn.Linear(hidden_dim, dim)
# for using discounted cumsum approach
self.use_discounted_cumsum = use_discounted_cumsum
self.discount_gamma = discount_gamma
def forward(self, x):
x = self.norm(x)
x = self.project_in(x)
x, gate = x.chunk(2, dim = -1)
gate = self.gate_norm(gate)
if self.use_discounted_cumsum:
gate = shift(gate, 1, dim = -2)
gate = discounted_cumsum(gate, self.discount_gamma)
else:
gate = shift_tokens(gate, self.num_shifts)
x = x * self.to_gate(gate)
return self.project_out(x)
# classes
class TokenShiftGPT(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_seq_len,
depth,
ff_mult = 4,
use_discounted_cumsum = False,
discount_gamma = 0.9
):
super().__init__()
self.seq_len = max_seq_len
num_shifts = ceil(log2(max_seq_len)) - 1
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.net = nn.Sequential(
*[Residual(FeedForward(dim = dim, num_shifts = num_shifts, mult = ff_mult, max_seq_len = max_seq_len, use_discounted_cumsum = use_discounted_cumsum, discount_gamma = discount_gamma)) for _ in range(depth)],
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(x.shape[1], device = x.device))
x = x + rearrange(pos_emb, 'n d -> () n d')
return self.net(x)
| token-shift-gpt-main | token_shift_gpt/token_shift_gpt.py |
from setuptools import setup, find_packages
setup(
name = 'magic3d-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Magic3D - Nvidia\'s Text-to-3D',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/magic3d-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'text-to-3d',
'denoising diffusion',
'progressive refinement'
],
install_requires=[
'einops>=0.6',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| magic3d-pytorch-main | setup.py |
magic3d-pytorch-main | magic3d_pytorch/magic3d_pytorch.py |
|
magic3d-pytorch-main | magic3d_pytorch/__init__.py |
|
import gdb
import re
import sys
import traceback
# some feedback that the nim runtime support is loading, isn't a bad
# thing at all.
gdb.write("Loading Nim Runtime support.\n", gdb.STDERR)
# When error occure they occur regularly. This 'caches' known errors
# and prevents them from being reprinted over and over again.
errorSet = set()
def printErrorOnce(id, message):
global errorSet
if id not in errorSet:
errorSet.add(id)
gdb.write("printErrorOnce: " + message, gdb.STDERR)
################################################################################
##### Type pretty printers
################################################################################
type_hash_regex = re.compile("^([A-Za-z0-9]*)_([A-Za-z0-9]*)_+([A-Za-z0-9]*)$")
def getNimRti(type_name):
""" Return a ``gdb.Value`` object for the Nim Runtime Information of ``type_name``. """
# Get static const TNimType variable. This should be available for
# every non trivial Nim type.
m = type_hash_regex.match(type_name)
lookups = [
"NTI" + m.group(2).lower() + "__" + m.group(3) + "_",
"NTI" + "__" + m.group(3) + "_",
"NTI" + m.group(2).replace("colon", "58").lower() + "__" + m.group(3) + "_"
]
if m:
for l in lookups:
try:
return gdb.parse_and_eval(l)
except:
pass
None
def getNameFromNimRti(rti):
""" Return name (or None) given a Nim RTI ``gdb.Value`` """
try:
# sometimes there isn't a name field -- example enums
return rti['name'].string(encoding="utf-8", errors="ignore")
except:
return None
class NimTypeRecognizer:
# this type map maps from types that are generated in the C files to
# how they are called in nim. To not mix up the name ``int`` from
# system.nim with the name ``int`` that could still appear in
# generated code, ``NI`` is mapped to ``system.int`` and not just
# ``int``.
type_map_static = {
'NI': 'system.int', 'NI8': 'int8', 'NI16': 'int16', 'NI32': 'int32',
'NI64': 'int64',
'NU': 'uint', 'NU8': 'uint8','NU16': 'uint16', 'NU32': 'uint32',
'NU64': 'uint64',
'NF': 'float', 'NF32': 'float32', 'NF64': 'float64',
'NIM_BOOL': 'bool',
'NIM_CHAR': 'char', 'NCSTRING': 'cstring', 'NimStringDesc': 'string'
}
# object_type_pattern = re.compile("^(\w*):ObjectType$")
def recognize(self, type_obj):
# skip things we can't handle like functions
if type_obj.code in [gdb.TYPE_CODE_FUNC, gdb.TYPE_CODE_VOID]:
return None
tname = None
if type_obj.tag is not None:
tname = type_obj.tag
elif type_obj.name is not None:
tname = type_obj.name
# handle pointer types
if not tname:
target_type = type_obj
if type_obj.code in [gdb.TYPE_CODE_PTR]:
target_type = type_obj.target()
if target_type.name:
# visualize 'string' as non pointer type (unpack pointer type).
if target_type.name == "NimStringDesc":
tname = target_type.name # could also just return 'string'
else:
rti = getNimRti(target_type.name)
if rti:
return getNameFromNimRti(rti)
if tname:
result = self.type_map_static.get(tname, None)
if result:
return result
rti = getNimRti(tname)
if rti:
return getNameFromNimRti(rti)
return None
class NimTypePrinter:
"""Nim type printer. One printer for all Nim types."""
# enabling and disabling of type printers can be done with the
# following gdb commands:
#
# enable type-printer NimTypePrinter
# disable type-printer NimTypePrinter
# relevant docs: https://sourceware.org/gdb/onlinedocs/gdb/Type-Printing-API.html
name = "NimTypePrinter"
def __init__(self):
self.enabled = True
def instantiate(self):
return NimTypeRecognizer()
################################################################################
##### GDB Function, equivalent of Nim's $ operator
################################################################################
class DollarPrintFunction (gdb.Function):
"Nim's equivalent of $ operator as a gdb function, available in expressions `print $dollar(myvalue)"
dollar_functions = re.findall(
'NimStringDesc \*(dollar__[A-z0-9_]+?)\(([^,)]*)\);',
gdb.execute("info functions dollar__", True, True)
)
def __init__ (self):
super (DollarPrintFunction, self).__init__("dollar")
@staticmethod
def invoke_static(arg):
if arg.type.code == gdb.TYPE_CODE_PTR and arg.type.target().name == "NimStringDesc":
return arg
argTypeName = str(arg.type)
for func, arg_typ in DollarPrintFunction.dollar_functions:
# this way of overload resolution cannot deal with type aliases,
# therefore it won't find all overloads.
if arg_typ == argTypeName:
func_value = gdb.lookup_global_symbol(func, gdb.SYMBOL_FUNCTIONS_DOMAIN).value()
return func_value(arg)
elif arg_typ == argTypeName + " *":
func_value = gdb.lookup_global_symbol(func, gdb.SYMBOL_FUNCTIONS_DOMAIN).value()
return func_value(arg.address)
printErrorOnce(argTypeName, "No suitable Nim $ operator found for type: " + argTypeName + "\n")
return None
def invoke(self, arg):
return self.invoke_static(arg)
DollarPrintFunction()
################################################################################
##### GDB Function, Nim string comparison
################################################################################
class NimStringEqFunction (gdb.Function):
"""Compare Nim strings for example in conditionals for breakpoints."""
def __init__ (self):
super (NimStringEqFunction, self).__init__("nimstreq")
@staticmethod
def invoke_static(arg1,arg2):
if arg1.type.code == gdb.TYPE_CODE_PTR and arg1.type.target().name == "NimStringDesc":
str1 = NimStringPrinter(arg1).to_string()
else:
str1 = arg1.string()
if arg2.type.code == gdb.TYPE_CODE_PTR and arg2.type.target().name == "NimStringDesc":
str2 = NimStringPrinter(arg1).to_string()
else:
str2 = arg2.string()
return str1 == str2
def invoke(self, arg1, arg2):
return self.invoke_static(arg1, arg2)
NimStringEqFunction()
################################################################################
##### GDB Command, equivalent of Nim's $ operator
################################################################################
class DollarPrintCmd (gdb.Command):
"""Dollar print command for Nim, `$ expr` will invoke Nim's $ operator and print the result."""
def __init__ (self):
super (DollarPrintCmd, self).__init__ ("$", gdb.COMMAND_DATA, gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
param = gdb.parse_and_eval(arg)
strValue = DollarPrintFunction.invoke_static(param)
if strValue:
gdb.write(
NimStringPrinter(strValue).to_string() + "\n",
gdb.STDOUT
)
# could not find a suitable dollar overload. This here is the
# fallback to get sensible output of basic types anyway.
elif param.type.code == gdb.TYPE_CODE_ARRAY and param.type.target().name == "char":
gdb.write(param.string("utf-8", "ignore") + "\n", gdb.STDOUT)
elif param.type.code == gdb.TYPE_CODE_INT:
gdb.write(str(int(param)) + "\n", gdb.STDOUT)
elif param.type.name == "NIM_BOOL":
if int(param) != 0:
gdb.write("true\n", gdb.STDOUT)
else:
gdb.write("false\n", gdb.STDOUT)
DollarPrintCmd()
################################################################################
##### GDB Commands to invoke common nim tools.
################################################################################
import subprocess, os
class KochCmd (gdb.Command):
"""Command that invokes ``koch'', the build tool for the compiler."""
def __init__ (self):
super (KochCmd, self).__init__ ("koch",
gdb.COMMAND_USER, gdb.COMPLETE_FILENAME)
self.binary = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "koch")
def invoke(self, argument, from_tty):
import os
subprocess.run([self.binary] + gdb.string_to_argv(argument))
KochCmd()
class NimCmd (gdb.Command):
"""Command that invokes ``nim'', the nim compiler."""
def __init__ (self):
super (NimCmd, self).__init__ ("nim",
gdb.COMMAND_USER, gdb.COMPLETE_FILENAME)
self.binary = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "bin/nim")
def invoke(self, argument, from_tty):
subprocess.run([self.binary] + gdb.string_to_argv(argument))
NimCmd()
class NimbleCmd (gdb.Command):
"""Command that invokes ``nimble'', the nim package manager and build tool."""
def __init__ (self):
super (NimbleCmd, self).__init__ ("nimble",
gdb.COMMAND_USER, gdb.COMPLETE_FILENAME)
self.binary = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "bin/nimble")
def invoke(self, argument, from_tty):
subprocess.run([self.binary] + gdb.string_to_argv(argument))
NimbleCmd()
################################################################################
##### Value pretty printers
################################################################################
class NimBoolPrinter:
pattern = re.compile(r'^NIM_BOOL$')
def __init__(self, val):
self.val = val
def to_string(self):
if self.val == 0:
return "false"
else:
return "true"
################################################################################
class NimStringPrinter:
pattern = re.compile(r'^NimStringDesc \*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val:
l = int(self.val['Sup']['len'])
return self.val['data'].lazy_string(encoding="utf-8", length=l)
else:
return ""
class NimRopePrinter:
pattern = re.compile(r'^tyObject_RopeObj__([A-Za-z0-9]*) \*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val:
left = NimRopePrinter(self.val["left"]).to_string()
data = NimStringPrinter(self.val["data"]).to_string()
right = NimRopePrinter(self.val["right"]).to_string()
return left + data + right
else:
return ""
################################################################################
# proc reprEnum(e: int, typ: PNimType): string {.compilerRtl.} =
# ## Return string representation for enumeration values
# var n = typ.node
# if ntfEnumHole notin typ.flags:
# let o = e - n.sons[0].offset
# if o >= 0 and o <% typ.node.len:
# return $n.sons[o].name
# else:
# # ugh we need a slow linear search:
# var s = n.sons
# for i in 0 .. n.len-1:
# if s[i].offset == e:
# return $s[i].name
# result = $e & " (invalid data!)"
def reprEnum(e, typ):
""" this is a port of the nim runtime function `reprEnum` to python """
e = int(e)
n = typ["node"]
flags = int(typ["flags"])
# 1 << 6 is {ntfEnumHole}
if ((1 << 6) & flags) == 0:
o = e - int(n["sons"][0]["offset"])
if o >= 0 and 0 < int(n["len"]):
return n["sons"][o]["name"].string("utf-8", "ignore")
else:
# ugh we need a slow linear search:
s = n["sons"]
for i in range(0, int(n["len"])):
if int(s[i]["offset"]) == e:
return s[i]["name"].string("utf-8", "ignore")
return str(e) + " (invalid data!)"
def enumNti(typeNimName, idString):
typeInfoName = "NTI" + typeNimName.lower() + "__" + idString + "_"
nti = gdb.lookup_global_symbol(typeInfoName)
if nti is None:
typeInfoName = "NTI" + "__" + idString + "_"
nti = gdb.lookup_global_symbol(typeInfoName)
return (typeInfoName, nti)
class NimEnumPrinter:
pattern = re.compile(r'^tyEnum_([A-Za-z0-9]+)__([A-Za-z0-9]*)$')
def __init__(self, val):
self.val = val
typeName = self.val.type.name
match = self.pattern.match(typeName)
self.typeNimName = match.group(1)
typeInfoName, self.nti = enumNti(self.typeNimName, match.group(2))
if self.nti is None:
printErrorOnce(typeInfoName, f"NimEnumPrinter: lookup global symbol: '{typeInfoName}' failed for {typeName}.\n")
def to_string(self):
if self.nti:
arg0 = self.val
arg1 = self.nti.value(gdb.newest_frame())
return reprEnum(arg0, arg1)
else:
return self.typeNimName + "(" + str(int(self.val)) + ")"
################################################################################
class NimSetPrinter:
## the set printer is limited to sets that fit in an integer. Other
## sets are compiled to `NU8 *` (ptr uint8) and are invisible to
## gdb (currently).
pattern = re.compile(r'^tySet_tyEnum_([A-Za-z0-9]+)__([A-Za-z0-9]*)$')
def __init__(self, val):
self.val = val
typeName = self.val.type.name
match = self.pattern.match(typeName)
self.typeNimName = match.group(1)
typeInfoName, self.nti = enumNti(self.typeNimName, match.group(2))
if self.nti is None:
printErrorOnce(typeInfoName, f"NimSetPrinter: lookup global symbol: '{typeInfoName}' failed for {typeName}.\n")
def to_string(self):
if self.nti:
nti = self.nti.value(gdb.newest_frame())
enumStrings = []
val = int(self.val)
i = 0
while val > 0:
if (val & 1) == 1:
enumStrings.append(reprEnum(i, nti))
val = val >> 1
i += 1
return '{' + ', '.join(enumStrings) + '}'
else:
return str(int(self.val))
################################################################################
class NimHashSetPrinter:
pattern = re.compile(r'^tyObject_(HashSet)__([A-Za-z0-9]*)$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
counter = 0
capacity = 0
if self.val:
counter = int(self.val['counter'])
if self.val['data']:
capacity = int(self.val['data']['Sup']['len'])
return 'HashSet({0}, {1})'.format(counter, capacity)
def children(self):
if self.val:
data = NimSeqPrinter(self.val['data'])
for idxStr, entry in data.children():
if int(entry['Field0']) > 0:
yield ("data." + idxStr + ".Field1", str(entry['Field1']))
################################################################################
class NimSeqPrinter:
# the pointer is explicity part of the type. So it is part of
# ``pattern``.
pattern = re.compile(r'^tySequence_\w* \*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
len = 0
cap = 0
if self.val:
len = int(self.val['Sup']['len'])
cap = int(self.val['Sup']['reserved'])
return 'seq({0}, {1})'.format(len, cap)
def children(self):
if self.val:
val = self.val
valType = val.type
length = int(val['Sup']['len'])
if length <= 0:
return
dataType = valType['data'].type
data = val['data']
if self.val.type.name is None:
dataType = valType['data'].type.target().pointer()
data = val['data'].cast(dataType)
inaccessible = False
for i in range(length):
if inaccessible:
return
try:
str(data[i])
yield "data[{0}]".format(i), data[i]
except RuntimeError:
inaccessible = True
yield "data[{0}]".format(i), "inaccessible"
################################################################################
class NimArrayPrinter:
pattern = re.compile(r'^tyArray_\w*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return 'array'
def children(self):
length = self.val.type.sizeof // self.val[0].type.sizeof
align = len(str(length-1))
for i in range(length):
yield ("[{0:>{1}}]".format(i, align), self.val[i])
################################################################################
class NimStringTablePrinter:
pattern = re.compile(r'^tyObject_(StringTableObj)__([A-Za-z0-9]*)(:? \*)?$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
counter = 0
capacity = 0
if self.val:
counter = int(self.val['counter'])
if self.val['data']:
capacity = int(self.val['data']['Sup']['len'])
return 'StringTableObj({0}, {1})'.format(counter, capacity)
def children(self):
if self.val:
data = NimSeqPrinter(self.val['data'].referenced_value())
for idxStr, entry in data.children():
if int(entry['Field0']) != 0:
yield (idxStr + ".Field0", entry['Field0'])
yield (idxStr + ".Field1", entry['Field1'])
################################################################
class NimTablePrinter:
pattern = re.compile(r'^tyObject_(Table)__([A-Za-z0-9]*)(:? \*)?$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
counter = 0
capacity = 0
if self.val:
counter = int(self.val['counter'])
if self.val['data']:
capacity = int(self.val['data']['Sup']['len'])
return 'Table({0}, {1})'.format(counter, capacity)
def children(self):
if self.val:
data = NimSeqPrinter(self.val['data'])
for idxStr, entry in data.children():
if int(entry['Field0']) != 0:
yield (idxStr + '.Field1', entry['Field1'])
yield (idxStr + '.Field2', entry['Field2'])
################################################################
# this is untested, therefore disabled
# class NimObjectPrinter:
# pattern = re.compile(r'^tyObject_([A-Za-z0-9]+)__(_?[A-Za-z0-9]*)(:? \*)?$')
# def __init__(self, val):
# self.val = val
# self.valType = None
# self.valTypeNimName = None
# def display_hint(self):
# return 'object'
# def _determineValType(self):
# if self.valType is None:
# vt = self.val.type
# if vt.name is None:
# target = vt.target()
# self.valType = target.pointer()
# self.fields = target.fields()
# self.valTypeName = target.name
# self.isPointer = True
# else:
# self.valType = vt
# self.fields = vt.fields()
# self.valTypeName = vt.name
# self.isPointer = False
# def to_string(self):
# if self.valTypeNimName is None:
# self._determineValType()
# match = self.pattern.match(self.valTypeName)
# self.valTypeNimName = match.group(1)
# return self.valTypeNimName
# def children(self):
# self._determineValType()
# if self.isPointer and int(self.val) == 0:
# return
# self.baseVal = self.val.referenced_value() if self.isPointer else self.val
# for c in self.handleFields(self.baseVal, getNimRti(self.valTypeName)):
# yield c
# def handleFields(self, currVal, rti, fields = None):
# rtiSons = None
# discField = (0, None)
# seenSup = False
# if fields is None:
# fields = self.fields
# try: # XXX: remove try after finished debugging this method
# for (i, field) in enumerate(fields):
# if field.name == "Sup": # inherited data
# seenSup = True
# baseRef = rti['base']
# if baseRef:
# baseRti = baseRef.referenced_value()
# baseVal = currVal['Sup']
# baseValType = baseVal.type
# if baseValType.name is None:
# baseValType = baseValType.target().pointer()
# baseValFields = baseValType.target().fields()
# else:
# baseValFields = baseValType.fields()
# for c in self.handleFields(baseVal, baseRti, baseValFields):
# yield c
# else:
# if field.type.code == gdb.TYPE_CODE_UNION:
# # if not rtiSons:
# rtiNode = rti['node'].referenced_value()
# rtiSons = rtiNode['sons']
# if not rtiSons and int(rtiNode['len']) == 0 and str(rtiNode['name']) != "0x0":
# rtiSons = [rti['node']] # sons are dereferenced by the consumer
# if not rtiSons:
# printErrorOnce(self.valTypeName, f"NimObjectPrinter: UNION field can't be displayed without RTI {self.valTypeName}, using fallback.\n")
# # yield (field.name, self.baseVal[field]) # XXX: this fallback seems wrong
# return # XXX: this should probably continue instead?
# if int(rtiNode['len']) != 0 and str(rtiNode['name']) != "0x0":
# gdb.write(f"wtf IT HAPPENED {self.valTypeName}\n", gdb.STDERR)
# discNode = rtiSons[discField[0]].referenced_value()
# if not discNode:
# raise ValueError("Can't find union discriminant field in object RTI")
# discNodeLen = int(discNode['len'])
# discFieldVal = int(currVal[discField[1].name])
# unionNodeRef = None
# if discFieldVal < discNodeLen:
# unionNodeRef = discNode['sons'][discFieldVal]
# if not unionNodeRef:
# unionNodeRef = discNode['sons'][discNodeLen]
# if not unionNodeRef:
# printErrorOnce(self.valTypeName + "no union node", f"wtf is up with sons {self.valTypeName} {unionNodeRef} {rtiNode['offset']} {discNode} {discFieldVal} {discNodeLen} {discField[1].name} {field.name} {field.type}\n")
# continue
# unionNode = unionNodeRef.referenced_value()
# fieldName = "" if field.name == None else field.name.lower()
# unionNodeName = "" if not unionNode['name'] else unionNode['name'].string("utf-8", "ignore")
# if not unionNodeName or unionNodeName.lower() != fieldName:
# unionFieldName = f"_{discField[1].name.lower()}_{int(rti['node'].referenced_value()['len'])}"
# gdb.write(f"wtf i: {i} union: {unionFieldName} field: {fieldName} type: {field.type.name} tag: {field.type.tag}\n", gdb.STDERR)
# else:
# unionFieldName = unionNodeName
# if discNodeLen == 0:
# yield (unionFieldName, currVal[unionFieldName])
# else:
# unionNodeLen = int(unionNode['len'])
# if unionNodeLen > 0:
# for u in range(unionNodeLen):
# un = unionNode['sons'][u].referenced_value()['name'].string("utf-8", "ignore")
# yield (un, currVal[unionFieldName][un])
# else:
# yield(unionNodeName, currVal[unionFieldName])
# else:
# discIndex = i - 1 if seenSup else i
# discField = (discIndex, field) # discriminant field is the last normal field
# yield (field.name, currVal[field.name])
# except GeneratorExit:
# raise
# except:
# gdb.write(f"wtf {self.valTypeName} {i} fn: {field.name} df: {discField} rti: {rti} rtiNode: {rti['node'].referenced_value()} rtiSons: {rtiSons} {sys.exc_info()} {traceback.format_tb(sys.exc_info()[2], limit = 10)}\n", gdb.STDERR)
# gdb.write(f"wtf {self.valTypeName} {i} {field.name}\n", gdb.STDERR)
# # seenSup = False
# # for (i, field) in enumerate(fields):
# # # if field.name:
# # # val = currVal[field.name]
# # # else:
# # # val = None
# # rtiNode = rti['node'].referenced_value()
# # rtiLen = int(rtiNode['len'])
# # if int(rtiNode['len']) > 0:
# # sons = rtiNode['sons']
# # elif int(rti['len']) == 0 and str(rti['name']) != "0x0":
# # sons = [rti['node']] # sons are dereferenced by the consumer
# # sonsIdx = i - 1 if seenSup else i
# # s = sons[sonsIdx].referenced_value()
# # addr = int(currVal.address)
# # off = addr + int(rtiNode['offset'])
# # seenSup = seenSup or field.name == "Sup"
# # gdb.write(f"wtf: i: {i} sonsIdx: {sonsIdx} field: {field.name} rtiLen: {rtiLen} rti: {rti} rtiNode: {rtiNode} isUnion: {field.type.code == gdb.TYPE_CODE_UNION} s: {s}\n", gdb.STDERR)
# raise
################################################################################
class NimFrameFilter:
def __init__(self):
self.name = "nim-frame-filter"
self.enabled = True
self.priority = 100
self.hidden = {"NimMainInner","NimMain", "main"}
def filter(self, iterator):
for framedecorator in iterator:
if framedecorator.function() not in self.hidden:
yield framedecorator
################################################################################
def makematcher(klass):
def matcher(val):
typeName = str(val.type)
try:
if hasattr(klass, 'pattern') and hasattr(klass, '__name__'):
# print(typeName + " <> " + klass.__name__)
if klass.pattern.match(typeName):
return klass(val)
except Exception as e:
print(klass)
printErrorOnce(typeName, "No matcher for type '" + typeName + "': " + str(e) + "\n")
return matcher
def register_nim_pretty_printers_for_object(objfile):
nimMainSym = gdb.lookup_global_symbol("NimMain", gdb.SYMBOL_FUNCTIONS_DOMAIN)
if nimMainSym and nimMainSym.symtab.objfile == objfile:
print("set Nim pretty printers for ", objfile.filename)
gdb.types.register_type_printer(objfile, NimTypePrinter())
objfile.pretty_printers = [makematcher(var) for var in list(globals().values()) if hasattr(var, 'pattern')]
# Register pretty printers for all objfiles that are already loaded.
for old_objfile in gdb.objfiles():
register_nim_pretty_printers_for_object(old_objfile)
# Register an event handler to register nim pretty printers for all future objfiles.
def new_object_handler(event):
register_nim_pretty_printers_for_object(event.new_objfile)
gdb.events.new_objfile.connect(new_object_handler)
gdb.frame_filters = {"nim-frame-filter": NimFrameFilter()}
| Nim-devel | tools/nim-gdb.py |
import gdb
# this test should test the gdb pretty printers of the nim
# library. But be aware this test is not complete. It only tests the
# command line version of gdb. It does not test anything for the
# machine interface of gdb. This means if if this test passes gdb
# frontends might still be broken.
gdb.execute("source ../../../tools/nim-gdb.py")
# debug all instances of the generic function `myDebug`, should be 14
gdb.execute("rbreak myDebug")
gdb.execute("run")
outputs = [
'meTwo',
'""',
'"meTwo"',
'{meOne, meThree}',
'MyOtherEnum(1)',
'5',
'array = {1, 2, 3, 4, 5}',
'seq(0, 0)',
'seq(0, 10)',
'array = {"one", "two"}',
'seq(3, 3) = {1, 2, 3}',
'seq(3, 3) = {"one", "two", "three"}',
'Table(3, 64) = {[4] = "four", [5] = "five", [6] = "six"}',
'Table(3, 8) = {["two"] = 2, ["three"] = 3, ["one"] = 1}',
'{a = 1, b = "some string"}'
]
for i, expected in enumerate(outputs):
gdb.write(f"{i+1}) expecting: {expected}: ", gdb.STDLOG)
gdb.flush()
functionSymbol = gdb.selected_frame().block().function
assert functionSymbol.line == 41, str(functionSymbol.line)
if i == 6:
# myArray is passed as pointer to int to myDebug. I look up myArray up in the stack
gdb.execute("up")
raw = gdb.parse_and_eval("myArray")
elif i == 9:
# myOtherArray is passed as pointer to int to myDebug. I look up myOtherArray up in the stack
gdb.execute("up")
raw = gdb.parse_and_eval("myOtherArray")
else:
raw = gdb.parse_and_eval("arg")
output = str(raw)
assert output == expected, "{0} : output: ({1}) != expected: ({2})".format(i, output, expected)
gdb.write(f"passed\n", gdb.STDLOG)
gdb.execute("continue")
| Nim-devel | tests/untestable/gdb/gdb_pretty_printer_test.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Generates the unidecode.dat module
# (c) 2010 Andreas Rumpf
from unidecode import unidecode
try:
import warnings
warnings.simplefilter("ignore")
except ImportError:
pass
def main():
f = open("unidecode.dat", "wb+")
for x in range(128, 0xffff + 1):
u = eval("u'\\u%04x'" % x)
val = unidecode(u)
# f.write("%x | " % x)
if x == 0x2028: # U+2028 = LINE SEPARATOR
val = ""
elif x == 0x2029: # U+2029 = PARAGRAPH SEPARATOR
val = ""
f.write("%s\n" % val)
f.close()
main()
| Nim-devel | lib/pure/unidecode/gen.py |
from setuptools import setup, find_packages
setup(
name = 'flash-attention-jax',
packages = find_packages(exclude=[]),
version = '0.3.1',
license='MIT',
description = 'Flash Attention - in Jax',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/flash-attention-jax',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'jax'
],
install_requires=[
'einops',
'jax>=0.2.20'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| flash-attention-jax-main | setup.py |
import jax
from jax import nn
from jax import jit, numpy as jnp
from jax.numpy import einsum
from einops import rearrange
EPSILON = 1e-10
MASK_VALUE = -1e10
COSINE_SIM_SCALE = 10
@jit
def attention(q, k, v, key_mask):
dim, k_len = q.shape[-1], k.shape[-2]
scale = 1 / jnp.sqrt(dim)
q = q * scale
sim = einsum('... i d, ... j d -> ... i j', q, k)
key_mask = rearrange(key_mask, 'b j -> b 1 1 j')
sim = jnp.where(key_mask, sim, MASK_VALUE)
attn = nn.softmax(sim, axis = -1)
return attn @ v
@jit
def causal_attention(q, k, v):
q_len, dim, k_len = *q.shape[-2:], k.shape[-2]
scale = 1 / jnp.sqrt(dim)
q = q * scale
sim = einsum('... i d, ... j d -> ... i j', q, k)
causal_mask = jnp.triu(jnp.ones((q_len, k_len)), k_len - q_len + 1)
sim = jnp.where(causal_mask, MASK_VALUE, sim)
attn = nn.softmax(sim, axis = -1)
return einsum('... i j, ... j d -> ... i d', attn, v)
# cosine sim attention
@jit
def l2norm(t):
return t / (jnp.linalg.norm(t) + EPSILON)
@jit
def cosine_sim_attention(q, k, v, key_mask):
dim, k_len = q.shape[-1], k.shape[-2]
q, k = map(l2norm, (q, k))
sim = einsum('... i d, ... j d -> ... i j', q, k) * COSINE_SIM_SCALE
key_mask = rearrange(key_mask, 'b j -> b 1 1 j')
sim = jnp.where(key_mask, sim, MASK_VALUE)
attn = nn.softmax(sim, axis = -1)
return einsum('... i j, ... j d -> ... i d', attn, v)
| flash-attention-jax-main | flash_attention_jax/attention.py |
import math
from functools import partial
import jax
from jax import lax, numpy as jnp, jit
# constants
HIGHEST_PRECISION = jax.lax.Precision.HIGHEST
einsum = partial(jnp.einsum, precision = HIGHEST_PRECISION)
# Figure 1 from https://arxiv.org/abs/2112.05682
# cleaned up
def _query_chunk_attention(q, k, v, k_chunk_size = 4096):
q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1]
k_chunk_size = min(k_chunk_size, k_len)
q = q / jnp.sqrt(dim)
@partial(jax.checkpoint, prevent_cse = False)
def summarize_chunk(q, k, v):
attn_weights = einsum('qd, kd -> qk', q, k)
max_score = jnp.max(attn_weights, axis = -1, keepdims = True)
max_score = jax.lax.stop_gradient(max_score)
exp_weights = jnp.exp(attn_weights - max_score)
exp_values = einsum('vf, qv -> qf', v, exp_weights)
return (exp_values, exp_weights.sum(axis = -1), max_score.reshape((q_len,)))
def chunk_scanner(chunk_idx):
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_size, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_size, v_dim))
return summarize_chunk(q, k_chunk, v_chunk)
chunk_values, chunk_weights, chunk_max = jax.lax.map(chunk_scanner, xs = jnp.arange(0, k_len, k_chunk_size))
global_max = jnp.max(chunk_max, axis = 0, keepdims = True)
max_diffs = jnp.exp(chunk_max - global_max)
chunk_values *= jnp.expand_dims(max_diffs, axis=-1)
chunk_weights *= max_diffs
all_values = chunk_values.sum(axis = 0)
all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis = 0)
return all_values / all_weights
@jit
def rabe_attention(q, k, v, q_chunk_size = 1024, k_chunk_size = 4096):
q_len, dim, v_dim = *q.shape, v.shape[-1]
def chunk_scanner(chunk_idx, _):
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (min(q_chunk_size, q_len), dim))
return (chunk_idx + q_chunk_size, _query_chunk_attention(q_chunk, k, v, k_chunk_size = k_chunk_size))
_, res = jax.lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / q_chunk_size))
return res.reshape(q_len, v_dim)
| flash-attention-jax-main | flash_attention_jax/rabe_attention.py |
from flash_attention_jax.flash_attention import flash_attention
from flash_attention_jax.cosine_sim_flash_attention import cosine_sim_flash_attention
from flash_attention_jax.causal_flash_attention import causal_flash_attention
from flash_attention_jax.rabe_attention import rabe_attention
from flash_attention_jax.attention import attention, causal_attention, cosine_sim_attention
from flash_attention_jax.utils import value_and_grad_difference, PRNGKeyGenerator
plain_attention = attention
| flash-attention-jax-main | flash_attention_jax/__init__.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
COSINE_SIM_SCALE = 10 # this may need to be a function of log(sequence length), but 16 was sufficient for 2048 and 4096 in my tests
# flash attention
def _query_chunk_flash_attention(chunk_idx, q, k, v, key_mask):
q_len, k_len, dim, v_dim = q.shape[-2], *k.shape, v.shape[-1]
def chunk_scanner(carries, _):
chunk_idx, out, row_sum = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx,), slice_sizes=(k_chunk_sizes,))
attn_weights = (q @ k_chunk.transpose() * COSINE_SIM_SCALE) - COSINE_SIM_SCALE # the output of this will range from [-2 * scale, 0], and the row sums are now bounded by key/value sequence length - you can also shift this more if you wish to tailor the normalization constant (in the case of extreme sequence lengths)
attn_weights = jnp.where(key_mask_chunk, attn_weights, MASK_VALUE)
exp_weights = jnp.exp(attn_weights)
exp_weights = jnp.where(key_mask_chunk, exp_weights, 0.)
block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True)
exp_values = exp_weights @ v_chunk
chunk_out = exp_values / k_len
return (chunk_idx + k_chunk_sizes, out + chunk_out, row_sum + block_row_sum), None
out = jnp.zeros((q_len, dim))
row_sum = jnp.zeros((q_len, 1))
(_, out, row_sum), _ = lax.scan(chunk_scanner, init = (0, out, row_sum), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
out = out * (k_len / (row_sum + EPSILON)) # renormalize after acquiring all the correct row sums
out = out.reshape(q_len, v_dim)
row_sum = row_sum.reshape(q_len)
return out, row_sum
@jit
def l2norm(t):
return t / (jnp.linalg.norm(t) + EPSILON)
@jit
def cosine_sim_flash_attention(q, k, v, key_mask):
q, k = map(l2norm, (q, k))
return cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask)
def _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask):
q_len, dim, v_dim = *q.shape, v.shape[-1]
def chunk_scanner(chunk_idx, _):
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, dim))
return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(chunk_idx, q_chunk, k, v, key_mask))
_, (out, row_sum) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
out = out.reshape(q_len, v_dim)
row_sum = row_sum.reshape(q_len)
return out, (row_sum,)
@custom_vjp
def cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask):
out, _ = _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask)
return out
@jit
def flash_attention_forward(q, k, v, key_mask):
out, (row_sum,) = _cosine_sim_flash_attention_after_l2norm(q, k, v, key_mask)
return out, (q, k, v, key_mask, out, row_sum)
def _query_chunk_flash_attention_backward(q, k, v, key_mask,o, do, l):
q_len, dim, k_len, v_dim = *q.shape, *v.shape
def chunk_scanner(carries, _):
chunk_idx, dq = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx,), slice_sizes=(k_chunk_sizes,))
attn_weights = q @ k_chunk.transpose() * COSINE_SIM_SCALE - COSINE_SIM_SCALE
exp_attn_weights = jnp.exp(attn_weights)
exp_attn_weights = jnp.where(key_mask_chunk, exp_attn_weights, 0.)
p = exp_attn_weights / (l + EPSILON)
dv_chunk = p.transpose() @ do
dp = do @ v_chunk.transpose()
D = jnp.sum(do * o, axis = -1, keepdims = True)
ds = p * COSINE_SIM_SCALE * (dp - D)
dq_chunk = ds @ k_chunk
dk_chunk = ds.transpose() @ q
return (chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk)
dq = jnp.zeros_like(q)
(_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
dq = dq.reshape(q_len, dim)
dk = dk.reshape(k_len, v_dim)
dv = dv.reshape(k_len, v_dim)
return dq, dk, dv
@jit
def flash_attention_backward(res, do):
q, k, v, key_mask, o, l = res
q_len, dim = q.shape
dk = jnp.zeros_like(k)
dv = jnp.zeros_like(v)
l = l.reshape(q_len, 1)
def chunk_scanner(carries, _):
chunk_idx, dk, dv = carries
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0), slice_sizes = (chunk_sizes, q.shape[-1]))
l_chunk = lax.dynamic_slice(l, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
o_chunk = lax.dynamic_slice(o, (chunk_idx, 0), slice_sizes = (chunk_sizes, o.shape[-1]))
do_chunk = lax.dynamic_slice(do, (chunk_idx, 0), slice_sizes = (chunk_sizes, do.shape[-1]))
dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_chunk, k, v, key_mask, o_chunk, do_chunk, l_chunk)
return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk
(_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
dq = dq.reshape(q_len, dim)
return dq, dk, dv, None
cosine_sim_flash_attention_after_l2norm.defvjp(flash_attention_forward, flash_attention_backward)
| flash-attention-jax-main | flash_attention_jax/cosine_sim_flash_attention.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
from jax.numpy import einsum
from einops import rearrange
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
# flash attention
def _query_chunk_flash_attention(q_range_chunk, k_range, q, k, v):
q_len, k_len, bh, dim, v_dim = q.shape[0], *k.shape, v.shape[-1]
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
key_chunk_idx, out, row_sum, row_max = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (key_chunk_idx, 0, 0), slice_sizes=(k_chunk_sizes, bh, dim))
v_chunk = lax.dynamic_slice(v, (key_chunk_idx, 0, 0), slice_sizes=(k_chunk_sizes, bh, v_dim))
k_range_chunk = lax.dynamic_slice(k_range, (0, key_chunk_idx), slice_sizes=(1, k_chunk_sizes))
causal_mask = q_range_chunk < k_range_chunk
attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk)
causal_mask = rearrange(causal_mask, 'i j -> i 1 j')
attn_weights = jnp.where(causal_mask, MASK_VALUE, attn_weights)
block_row_max = jnp.max(attn_weights, axis = -1, keepdims = True)
exp_weights = jnp.exp(attn_weights - block_row_max)
exp_weights = jnp.where(causal_mask, 0., exp_weights)
block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) + EPSILON
exp_values = einsum('i ... j, j ... d -> i ... d', exp_weights, v_chunk)
new_row_max = jnp.maximum(block_row_max, row_max)
exp_row_max_diff = jnp.exp(row_max - new_row_max)
exp_block_row_max_diff = jnp.exp(block_row_max - new_row_max)
new_row_sum = exp_row_max_diff * row_sum + exp_block_row_max_diff * block_row_sum
out = (row_sum / new_row_sum) * exp_row_max_diff * out + \
(exp_block_row_max_diff / new_row_sum) * exp_values
return (key_chunk_idx + k_chunk_sizes, out, new_row_sum, new_row_max), None
out = jnp.zeros((q_len, bh, dim))
row_sum = jnp.zeros((q_len, bh, 1))
row_max = jnp.ones((q_len, bh, 1)) * -1e6
(_, out, row_sum, row_max), _ = lax.scan(chunk_scanner, init = (0, out, row_sum, row_max), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
out = out.reshape(q_len, bh, v_dim)
row_sum = row_sum.reshape(q_len, bh)
row_max = row_max.reshape(q_len, bh)
return out, row_sum, row_max
def _causal_flash_attention(q, k, v):
batch, heads, q_len, dim, k_len, v_dim = *q.shape, *v.shape[-2:]
bh = batch * heads
q, k, v = map(lambda t: rearrange(t, 'b h n d -> n (b h) d'), (q, k, v))
q_range = jnp.arange(q_len).reshape(q_len, 1) + (k_len - q_len)
k_range = jnp.arange(k_len).reshape(1, k_len)
def chunk_scanner(chunk_idx, _):
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0, 0), slice_sizes = (chunk_sizes, bh, dim))
q_range_chunk = lax.dynamic_slice(q_range, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(q_range_chunk, k_range, q_chunk, k, v))
_, (out, row_sum, row_max) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
out = out.reshape(q_len, bh, v_dim)
row_sum = row_sum.reshape(q_len, bh)
row_max = row_max.reshape(q_len, bh)
out = rearrange(out, 'n (b h) d -> b h n d', b = batch)
return out, (row_sum, row_max)
@custom_vjp
@jit
def causal_flash_attention(q, k, v):
out, _ = _causal_flash_attention(q, k, v)
return out
@jit
def flash_attention_forward(q, k, v):
out, (row_sum, row_max) = _causal_flash_attention(q, k, v)
return out, (q, k, v, out, row_sum, row_max)
def _query_chunk_flash_attention_backward(query_range_chunk, key_range, q, k, v, o, do, l, m):
q_len, bh, dim, k_len, _, v_dim = *q.shape, *v.shape
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
key_chunk_idx, dq = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (key_chunk_idx, 0, 0), slice_sizes=(k_chunk_sizes, bh, dim))
v_chunk = lax.dynamic_slice(v, (key_chunk_idx, 0, 0), slice_sizes=(k_chunk_sizes, bh, v_dim))
key_range_chunk = lax.dynamic_slice(key_range, (0, key_chunk_idx), slice_sizes=(1, k_chunk_sizes))
causal_mask = query_range_chunk < key_range_chunk
attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk)
causal_mask = rearrange(causal_mask, 'i j -> i 1 j')
attn_weights = jnp.where(causal_mask, MASK_VALUE, attn_weights)
exp_attn_weights = jnp.exp(attn_weights - m)
exp_attn_weights = jnp.where(causal_mask, 0., exp_attn_weights)
p = exp_attn_weights / l
dv_chunk = einsum('i ... j, i ... d -> j ... d', p, do)
dp = einsum('i ... d, j ... d -> i ... j', do, v_chunk)
D = jnp.sum(do * o, axis = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('i ... j, j ... d -> i ... d', ds, k_chunk)
dk_chunk = einsum('i ... j, i ... d -> j ... d', ds, q)
return (key_chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk)
dq = jnp.zeros_like(q)
(_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
dq = dq.reshape(q_len, bh, dim)
dk = dk.reshape(k_len, bh, v_dim)
dv = dv.reshape(k_len, bh, v_dim)
return dq, dk, dv
@jit
def flash_attention_backward(res, do):
q, k, v, o, l, m = res
batch, heads, q_len, dim, k_len, v_dim = *q.shape, *v.shape[-2:]
bh = batch * heads
m = m.reshape(q_len, bh, 1)
l = l.reshape(q_len, bh, 1)
q, k, v, o, do = map(lambda t: rearrange(t, 'b h n d -> n (b h) d'), (q, k, v, o, do))
dk = jnp.zeros_like(k)
dv = jnp.zeros_like(v)
q_range = jnp.arange(q_len).reshape(q_len, 1) + (k_len - q_len)
k_range = jnp.arange(k_len).reshape(1, k_len)
def chunk_scanner(carries, _):
chunk_idx, dk, dv = carries
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0, 0), slice_sizes = (chunk_sizes, bh, q.shape[-1]))
q_range_chunk = lax.dynamic_slice(q_range, (chunk_idx, 0), slice_sizes = (chunk_sizes, 1))
m_chunk = lax.dynamic_slice(m, (chunk_idx, 0, 0), slice_sizes = (chunk_sizes, bh, 1))
l_chunk = lax.dynamic_slice(l, (chunk_idx, 0, 0), slice_sizes = (chunk_sizes, bh, 1))
o_chunk = lax.dynamic_slice(o, (chunk_idx, 0, 0), slice_sizes = (chunk_sizes, bh, o.shape[-1]))
do_chunk = lax.dynamic_slice(do, (chunk_idx, 0, 0), slice_sizes = (chunk_sizes, bh, do.shape[-1]))
dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_range_chunk, k_range, q_chunk, k, v, o_chunk, do_chunk, l_chunk, m_chunk)
return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk
(_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
dq = dq.reshape(q_len, bh, dim)
dq, dk, dv = map(lambda t: rearrange(t, 'n (b h) d -> b h n d', b = batch), (dq, dk, dv))
return dq, dk, dv
causal_flash_attention.defvjp(flash_attention_forward, flash_attention_backward)
| flash-attention-jax-main | flash_attention_jax/causal_flash_attention.py |
import jax
from functools import partial
import jax.numpy as jnp
from jax import random
from jax import value_and_grad
def value_and_grad_wrapper(fn, **kwargs):
@partial(value_and_grad, **kwargs)
def inner(*args, **kwargs):
return jnp.sum(fn(*args, **kwargs))
return inner
def diff(t1, t2):
return jnp.max(jnp.abs(t1 - t2))
def PRNGKeyGenerator(seed = 42):
key = random.PRNGKey(seed)
while True:
sub_key, key = random.split(key)
yield sub_key
def value_and_grad_difference(
fn1,
fn2,
seed = 42,
batch = 2,
heads = 4,
q_seq_len = 4096,
k_seq_len = 8192,
add_key_mask = True,
dim = 512
):
key_gen = PRNGKeyGenerator(seed)
q = random.normal(next(key_gen), (batch, heads, q_seq_len, dim))
k = random.normal(next(key_gen), (batch, heads, k_seq_len, dim))
v = random.normal(next(key_gen), (batch, heads, k_seq_len, dim))
key_mask = random.randint(next(key_gen), (batch, k_seq_len), 0, 2) == 1
fn1_value_and_grad, fn2_value_and_grad = map(partial(value_and_grad_wrapper, argnums = (0, 1, 2)), (fn1, fn2))
args = (q, k, v)
if add_key_mask:
args = (*args, key_mask)
o1, grads1 = fn1_value_and_grad(*args)
o2, grads2 = fn2_value_and_grad(*args)
return diff(o1, o2), [diff(*args) for args in zip(grads1, grads2)]
| flash-attention-jax-main | flash_attention_jax/utils.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
from jax.numpy import einsum
from einops import rearrange
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
# flash attention
def _query_chunk_flash_attention(chunk_idx, q, k, v, key_mask):
q_len, batch, heads, dim, k_len, v_dim = *q.shape, k.shape[0], v.shape[-1]
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
chunk_idx, out, row_sum, row_max = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, 0, 0, 0), slice_sizes=(k_chunk_sizes, batch, heads, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, 0, 0, 0), slice_sizes=(k_chunk_sizes, batch, heads, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx, 0), slice_sizes=(k_chunk_sizes, batch))
attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk)
key_mask_chunk = rearrange(key_mask_chunk, 'j b -> 1 b 1 j')
attn_weights = jnp.where(key_mask_chunk, attn_weights, MASK_VALUE)
block_row_max = jnp.max(attn_weights, axis = -1, keepdims = True)
new_row_max = jnp.maximum(block_row_max, row_max)
exp_weights = jnp.exp(attn_weights - new_row_max)
exp_weights = jnp.where(key_mask_chunk, exp_weights, 0.)
block_row_sum = jnp.sum(exp_weights, axis = -1, keepdims = True) + EPSILON
exp_values = einsum('i ... j, j ... d -> i ... d', exp_weights, v_chunk)
exp_row_max_diff = jnp.exp(row_max - new_row_max)
new_row_sum = exp_row_max_diff * row_sum + block_row_sum
out = (row_sum / new_row_sum) * exp_row_max_diff * out + \
(1. / new_row_sum) * exp_values
return (chunk_idx + k_chunk_sizes, out, new_row_sum, new_row_max), None
out = jnp.zeros((q_len, batch, heads, dim))
row_sum = jnp.zeros((q_len, batch, heads, 1))
row_max = jnp.ones((q_len, batch, heads, 1)) * -1e6
(_, out, row_sum, row_max), _ = lax.scan(chunk_scanner, init = (0, out, row_sum, row_max), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
row_sum = rearrange(row_sum, 'n ... 1 -> n ...')
row_max = rearrange(row_max, 'n ... 1 -> n ...')
lse = jnp.log(row_sum) + row_max
return out, lse
def _flash_attention(q, k, v, key_mask):
batch, heads, q_len, dim, v_dim = *q.shape, v.shape[-1]
def chunk_scanner(chunk_idx, _):
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, 0, 0, 0), slice_sizes = (chunk_sizes, batch, heads, dim))
return (chunk_idx + chunk_sizes, _query_chunk_flash_attention(chunk_idx, q_chunk, k, v, key_mask))
q, k, v = map(lambda t: rearrange(t, 'b h n d -> n b h d'), (q, k, v))
key_mask = rearrange(key_mask, 'b j -> j b')
_, (out, lse) = lax.scan(chunk_scanner, init = 0, xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
out = rearrange(out, 'c n b h d -> b h (c n) d')
lse = rearrange(lse, 'c n b h -> b h (c n)')
return out, lse
@custom_vjp
@jit
def flash_attention(q, k, v, key_mask):
out, _ = _flash_attention(q, k, v, key_mask)
return out
@jit
def flash_attention_forward(q, k, v, key_mask):
out, lse = _flash_attention(q, k, v, key_mask)
return out, (q, k, v, key_mask, out, lse)
def _query_chunk_flash_attention_backward(q, k, v, key_mask, o, do, lse):
q_len, batch, heads, dim, k_len, v_dim = *q.shape, v.shape[0], v.shape[-1]
scale = 1 / jnp.sqrt(dim)
q_scaled = q * scale
def chunk_scanner(carries, _):
chunk_idx, dq = carries
k_chunk_sizes = min(K_CHUNK_SIZE, k_len)
k_chunk = lax.dynamic_slice(k, (chunk_idx, batch, heads, 0), slice_sizes=(k_chunk_sizes, batch, heads, dim))
v_chunk = lax.dynamic_slice(v, (chunk_idx, batch, heads, 0), slice_sizes=(k_chunk_sizes, batch, heads, v_dim))
key_mask_chunk = lax.dynamic_slice(key_mask, (chunk_idx, batch), slice_sizes=(k_chunk_sizes, batch))
attn_weights = einsum('i ... d, j ... d -> i ... j', q_scaled, k_chunk)
p = jnp.exp(attn_weights - lse)
key_mask_chunk = rearrange(key_mask_chunk, 'j b -> 1 b 1 j')
p = jnp.where(key_mask_chunk, p, 0.)
dv_chunk = einsum('i ... j, i ... d -> j ... d', p, do)
dp = einsum('i ... d, j ... d -> i ... j', do, v_chunk)
D = jnp.sum(do * o, axis = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('i ... j, j ... d -> i ... d', ds, k_chunk)
dk_chunk = einsum('i ... j, i ... d -> j ... d', ds, q)
return (chunk_idx + k_chunk_sizes, dq + dq_chunk), (dk_chunk, dv_chunk)
dq = jnp.zeros_like(q)
(_, dq), (dk, dv) = lax.scan(chunk_scanner, init = (0, dq), xs = None, length = math.ceil(k_len / K_CHUNK_SIZE))
dk = rearrange(dk, 'c n ... -> (c n) ...')
dv = rearrange(dv, 'c n ... -> (c n) ...')
return dq, dk, dv
@jit
def flash_attention_backward(res, do):
q, k, v, key_mask, o, lse = res
batch, heads, q_len, dim = q.shape
lse = rearrange(lse, 'b h n -> n b h 1')
q, k, v, o, do = map(lambda t: rearrange(t, 'b h n d -> n b h d'), (q, k, v, o, do))
key_mask = rearrange(key_mask, 'b j -> j b')
dk = jnp.zeros_like(k)
dv = jnp.zeros_like(v)
def chunk_scanner(carries, _):
chunk_idx, dk, dv = carries
chunk_sizes = min(Q_CHUNK_SIZE, q_len)
q_chunk = lax.dynamic_slice(q, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, q.shape[-1]))
lse_chunk = lax.dynamic_slice(lse, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, 1))
o_chunk = lax.dynamic_slice(o, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, o.shape[-1]))
do_chunk = lax.dynamic_slice(do, (chunk_idx, batch, heads, 0), slice_sizes = (chunk_sizes, batch, heads, do.shape[-1]))
dq_chunk, dk_chunk, dv_chunk = _query_chunk_flash_attention_backward(q_chunk, k, v, key_mask, o_chunk, do_chunk, lse_chunk)
return (chunk_idx + chunk_sizes, dk + dk_chunk, dv + dv_chunk), dq_chunk
(_, dk, dv), dq = lax.scan(chunk_scanner, init = (0, dk, dv), xs = None, length = math.ceil(q_len / Q_CHUNK_SIZE))
dq = rearrange(dq, 'c n b h d -> b h (c n) d')
dk, dv = map(lambda t: rearrange(t, 'n b h d -> b h n d'), (dk, dv))
return dq, dk, dv, None
flash_attention.defvjp(flash_attention_forward, flash_attention_backward)
| flash-attention-jax-main | flash_attention_jax/flash_attention.py |
from setuptools import setup, find_packages
setup(
name = 'hamburger-pytorch',
packages = find_packages(),
version = '0.0.3',
license='MIT',
description = 'Hamburger - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/hamburger-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'matrix factorization'
],
install_requires=[
'torch',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | hamburger-pytorch-main | setup.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from contextlib import contextmanager
from einops import repeat, rearrange
# helper fn
@contextmanager
def null_context():
yield
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
class NMF(nn.Module):
def __init__(
self,
dim,
n,
ratio = 8,
K = 6,
eps = 2e-8
):
super().__init__()
r = dim // ratio
D = torch.zeros(dim, r).uniform_(0, 1)
C = torch.zeros(r, n).uniform_(0, 1)
self.K = K
self.D = nn.Parameter(D)
self.C = nn.Parameter(C)
self.eps = eps
def forward(self, x):
b, D, C, eps = x.shape[0], self.D, self.C, self.eps
# x is made non-negative with relu as proposed in paper
x = F.relu(x)
D = repeat(D, 'd r -> b d r', b = b)
C = repeat(C, 'r n -> b r n', b = b)
# transpose
t = lambda tensor: rearrange(tensor, 'b i j -> b j i')
for k in reversed(range(self.K)):
# only calculate gradients on the last step, per propose 'One-step Gradient'
context = null_context if k == 0 else torch.no_grad
with context():
C_new = C * ((t(D) @ x) / ((t(D) @ D @ C) + eps))
D_new = D * ((x @ t(C)) / ((D @ C @ t(C)) + eps))
C, D = C_new, D_new
return D @ C
class Hamburger(nn.Module):
def __init__(
self,
*,
dim,
n,
inner_dim = None,
ratio = 8,
K = 6
):
super().__init__()
inner_dim = default(inner_dim, dim)
self.lower_bread = nn.Conv1d(dim, inner_dim, 1, bias = False)
self.ham = NMF(inner_dim, n, ratio = ratio, K = K)
self.upper_bread = nn.Conv1d(inner_dim, dim, 1, bias = False)
def forward(self, x):
shape = x.shape
x = x.flatten(2)
x = self.lower_bread(x)
x = self.ham(x)
x = self.upper_bread(x)
return x.reshape(shape)
| hamburger-pytorch-main | hamburger_pytorch/hamburger_pytorch.py |
from hamburger_pytorch.hamburger_pytorch import Hamburger
| hamburger-pytorch-main | hamburger_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'ITTR-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.4',
license='MIT',
description = 'ITTR - Implementation of the Hybrid Perception Block and Dual-Pruned Self-Attention block',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/ITTR-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism'
],
install_requires=[
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| ITTR-pytorch-main | setup.py |
from ITTR_pytorch.ITTR_pytorch import HPB, DPSA
| ITTR-pytorch-main | ITTR_pytorch/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = -1)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class ChanLayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + self.eps).sqrt() * self.g + self.b
# classes
class HPB(nn.Module):
""" Hybrid Perception Block """
def __init__(
self,
dim,
dim_head = 32,
heads = 8,
ff_mult = 4,
attn_height_top_k = 16,
attn_width_top_k = 16,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.attn = DPSA(
dim = dim,
heads = heads,
dim_head = dim_head,
height_top_k = attn_height_top_k,
width_top_k = attn_width_top_k,
dropout = attn_dropout
)
self.dwconv = nn.Conv2d(dim, dim, 3, padding = 1, groups = dim)
self.attn_parallel_combine_out = nn.Conv2d(dim * 2, dim, 1)
ff_inner_dim = dim * ff_mult
self.ff = nn.Sequential(
nn.Conv2d(dim, ff_inner_dim, 1),
nn.InstanceNorm2d(ff_inner_dim),
nn.GELU(),
nn.Dropout(ff_dropout),
Residual(nn.Sequential(
nn.Conv2d(ff_inner_dim, ff_inner_dim, 3, padding = 1, groups = ff_inner_dim),
nn.InstanceNorm2d(ff_inner_dim),
nn.GELU(),
nn.Dropout(ff_dropout)
)),
nn.Conv2d(ff_inner_dim, dim, 1),
nn.InstanceNorm2d(ff_inner_dim)
)
def forward(self, x):
attn_branch_out = self.attn(x)
conv_branch_out = self.dwconv(x)
concatted_branches = torch.cat((attn_branch_out, conv_branch_out), dim = 1)
attn_out = self.attn_parallel_combine_out(concatted_branches) + x
return self.ff(attn_out)
class DPSA(nn.Module):
""" Dual-pruned Self-attention Block """
def __init__(
self,
dim,
height_top_k = 16,
width_top_k = 16,
dim_head = 32,
heads = 8,
dropout = 0.
):
super().__init__()
self.heads = heads
self.dim_head = dim_head
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.norm = ChanLayerNorm(dim)
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
self.height_top_k = height_top_k
self.width_top_k = width_top_k
self.dropout = nn.Dropout(dropout)
self.to_out = nn.Conv2d(inner_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
# fold out heads
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) c x y', h = self.heads), (q, k, v))
# they used l2 normalized queries and keys, cosine sim attention basically
q, k = map(l2norm, (q, k))
# calculate whether to select and rank along height and width
need_height_select_and_rank = self.height_top_k < h
need_width_select_and_rank = self.width_top_k < w
# select and rank keys / values, probing with query (reduced along height and width) and keys reduced along row and column respectively
if need_width_select_and_rank or need_height_select_and_rank:
q_probe = reduce(q, 'b h w d -> b d', 'sum')
# gather along height, then width
if need_height_select_and_rank:
k_height = reduce(k, 'b h w d -> b h d', 'sum')
top_h_indices = einsum('b d, b h d -> b h', q_probe, k_height).topk(k = self.height_top_k, dim = -1).indices
top_h_indices = repeat(top_h_indices, 'b h -> b h w d', d = self.dim_head, w = k.shape[-2])
k, v = map(lambda t: t.gather(1, top_h_indices), (k, v)) # first gather across height
if need_width_select_and_rank:
k_width = reduce(k, 'b h w d -> b w d', 'sum')
top_w_indices = einsum('b d, b w d -> b w', q_probe, k_width).topk(k = self.width_top_k, dim = -1).indices
top_w_indices = repeat(top_w_indices, 'b w -> b h w d', d = self.dim_head, h = k.shape[1])
k, v = map(lambda t: t.gather(2, top_w_indices), (k, v)) # then gather along width
# select the appropriate keys and values
q, k, v = map(lambda t: rearrange(t, 'b ... d -> b (...) d'), (q, k, v))
# cosine similarities
sim = einsum('b i d, b j d -> b i j', q, k)
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregate out
out = einsum('b i j, b j d -> b i d', attn, v)
# merge heads and combine out
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', x = h, y = w, h = self.heads)
return self.to_out(out)
| ITTR-pytorch-main | ITTR_pytorch/ITTR_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'make-a-video-pytorch',
packages = find_packages(exclude=[]),
version = '0.3.1',
license='MIT',
description = 'Make-A-Video - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/make-a-video-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism',
'text-to-video',
'axial convolutions'
],
install_requires=[
'classifier-free-guidance-pytorch',
'einops>=0.6',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| make-a-video-pytorch-main | setup.py |
from make_a_video_pytorch.make_a_video import PseudoConv3d, SpatioTemporalAttention
from make_a_video_pytorch.make_a_video import ResnetBlock, Downsample, Upsample
from make_a_video_pytorch.make_a_video import SpaceTimeUnet
| make-a-video-pytorch-main | make_a_video_pytorch/__init__.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False,
causal = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = AttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
q, k, v = map(lambda t: t.contiguous(), (q, k, v))
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, bias = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
if self.flash:
assert not exists(bias)
return self.flash_attn(q, k, v)
scale = q.shape[-1] ** -0.5
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attn bias
if exists(bias):
sim = sim + bias
# causal
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
| make-a-video-pytorch-main | make_a_video_pytorch/attend.py |
import math
import functools
from operator import mul
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from make_a_video_pytorch.attend import Attend
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def mul_reduce(tup):
return functools.reduce(mul, tup)
def divisible_by(numer, denom):
return (numer % denom) == 0
mlist = nn.ModuleList
# for time conditioning
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
self.theta = theta
self.dim = dim
def forward(self, x):
dtype, device = x.dtype, x.device
assert dtype == torch.float, 'input to sinusoidal pos emb must be a float type'
half_dim = self.dim // 2
emb = math.log(self.theta) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = device, dtype = dtype) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1).type(dtype)
# layernorm 3d
class RMSNorm(nn.Module):
def __init__(self, chan, dim = 1):
super().__init__()
self.dim = dim
self.gamma = nn.Parameter(torch.ones(chan))
def forward(self, x):
dim = self.dim
right_ones = (dim + 1) if dim < 0 else (x.ndim - 1 - dim)
gamma = self.gamma.reshape(-1, *((1,) * right_ones))
return F.normalize(x, dim = dim) * (x.shape[dim] ** 0.5) * gamma
# feedforward
def shift_token(t):
t, t_shift = t.chunk(2, dim = 1)
t_shift = F.pad(t_shift, (0, 0, 0, 0, 1, -1), value = 0.)
return torch.cat((t, t_shift), dim = 1)
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = 1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
inner_dim = int(dim * mult * 2 / 3)
self.proj_in = nn.Sequential(
nn.Conv3d(dim, inner_dim * 2, 1, bias = False),
GEGLU()
)
self.proj_out = nn.Sequential(
RMSNorm(inner_dim),
nn.Conv3d(inner_dim, dim, 1, bias = False)
)
def forward(self, x, enable_time = True):
is_video = x.ndim == 5
enable_time &= is_video
if not is_video:
x = rearrange(x, 'b c h w -> b c 1 h w')
x = self.proj_in(x)
if enable_time:
x = shift_token(x)
out = self.proj_out(x)
if not is_video:
out = rearrange(out, 'b c 1 h w -> b c h w')
return out
# best relative positional encoding
class ContinuousPositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(
self,
*,
dim,
heads,
num_dims = 1,
layers = 2
):
super().__init__()
self.num_dims = num_dims
self.net = nn.ModuleList([])
self.net.append(nn.Sequential(nn.Linear(self.num_dims, dim), nn.SiLU()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), nn.SiLU()))
self.net.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, *dimensions):
device = self.device
shape = torch.tensor(dimensions, device = device)
rel_pos_shape = 2 * shape - 1
# calculate strides
strides = torch.flip(rel_pos_shape, (0,)).cumprod(dim = -1)
strides = torch.flip(F.pad(strides, (1, -1), value = 1), (0,))
# get all positions and calculate all the relative distances
positions = [torch.arange(d, device = device) for d in dimensions]
grid = torch.stack(torch.meshgrid(*positions, indexing = 'ij'), dim = -1)
grid = rearrange(grid, '... c -> (...) c')
rel_dist = rearrange(grid, 'i c -> i 1 c') - rearrange(grid, 'j c -> 1 j c')
# get all relative positions across all dimensions
rel_positions = [torch.arange(-d + 1, d, device = device) for d in dimensions]
rel_pos_grid = torch.stack(torch.meshgrid(*rel_positions, indexing = 'ij'), dim = -1)
rel_pos_grid = rearrange(rel_pos_grid, '... c -> (...) c')
# mlp input
bias = rel_pos_grid.float()
for layer in self.net:
bias = layer(bias)
# convert relative distances to indices of the bias
rel_dist += (shape - 1) # make sure all positive
rel_dist *= strides
rel_dist_indices = rel_dist.sum(dim = -1)
# now select the bias for each unique relative position combination
bias = bias[rel_dist_indices]
return rearrange(bias, 'i j h -> h i j')
# helper classes
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
flash = False,
causal = False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.attend = Attend(flash = flash, causal = causal)
self.norm = RMSNorm(dim, dim = -1)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
nn.init.zeros_(self.to_out.weight.data) # identity with skip connection
def forward(
self,
x,
rel_pos_bias = None
):
x = self.norm(x)
q, k, v = self.to_q(x), *self.to_kv(x).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
out = self.attend(q, k, v, bias = rel_pos_bias)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main contribution - pseudo 3d conv
class PseudoConv3d(nn.Module):
def __init__(
self,
dim,
dim_out = None,
kernel_size = 3,
*,
temporal_kernel_size = None,
**kwargs
):
super().__init__()
dim_out = default(dim_out, dim)
temporal_kernel_size = default(temporal_kernel_size, kernel_size)
self.spatial_conv = nn.Conv2d(dim, dim_out, kernel_size = kernel_size, padding = kernel_size // 2)
self.temporal_conv = nn.Conv1d(dim_out, dim_out, kernel_size = temporal_kernel_size, padding = temporal_kernel_size // 2) if kernel_size > 1 else None
if exists(self.temporal_conv):
nn.init.dirac_(self.temporal_conv.weight.data) # initialized to be identity
nn.init.zeros_(self.temporal_conv.bias.data)
def forward(
self,
x,
enable_time = True
):
b, c, *_, h, w = x.shape
is_video = x.ndim == 5
enable_time &= is_video
if is_video:
x = rearrange(x, 'b c f h w -> (b f) c h w')
x = self.spatial_conv(x)
if is_video:
x = rearrange(x, '(b f) c h w -> b c f h w', b = b)
if not enable_time or not exists(self.temporal_conv):
return x
x = rearrange(x, 'b c f h w -> (b h w) c f')
x = self.temporal_conv(x)
x = rearrange(x, '(b h w) c f -> b c f h w', h = h, w = w)
return x
# factorized spatial temporal attention from Ho et al.
class SpatioTemporalAttention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
add_feed_forward = True,
ff_mult = 4,
pos_bias = True,
flash = False,
causal_time_attn = False
):
super().__init__()
assert not (flash and pos_bias), 'learned positional attention bias is not compatible with flash attention'
self.spatial_attn = Attention(dim = dim, dim_head = dim_head, heads = heads, flash = flash)
self.spatial_rel_pos_bias = ContinuousPositionBias(dim = dim // 2, heads = heads, num_dims = 2) if pos_bias else None
self.temporal_attn = Attention(dim = dim, dim_head = dim_head, heads = heads, flash = flash, causal = causal_time_attn)
self.temporal_rel_pos_bias = ContinuousPositionBias(dim = dim // 2, heads = heads, num_dims = 1) if pos_bias else None
self.has_feed_forward = add_feed_forward
if not add_feed_forward:
return
self.ff = FeedForward(dim = dim, mult = ff_mult)
def forward(
self,
x,
enable_time = True
):
b, c, *_, h, w = x.shape
is_video = x.ndim == 5
enable_time &= is_video
if is_video:
x = rearrange(x, 'b c f h w -> (b f) (h w) c')
else:
x = rearrange(x, 'b c h w -> b (h w) c')
space_rel_pos_bias = self.spatial_rel_pos_bias(h, w) if exists(self.spatial_rel_pos_bias) else None
x = self.spatial_attn(x, rel_pos_bias = space_rel_pos_bias) + x
if is_video:
x = rearrange(x, '(b f) (h w) c -> b c f h w', b = b, h = h, w = w)
else:
x = rearrange(x, 'b (h w) c -> b c h w', h = h, w = w)
if enable_time:
x = rearrange(x, 'b c f h w -> (b h w) f c')
time_rel_pos_bias = self.temporal_rel_pos_bias(x.shape[1]) if exists(self.temporal_rel_pos_bias) else None
x = self.temporal_attn(x, rel_pos_bias = time_rel_pos_bias) + x
x = rearrange(x, '(b h w) f c -> b c f h w', w = w, h = h)
if self.has_feed_forward:
x = self.ff(x, enable_time = enable_time) + x
return x
# resnet block
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel_size = 3,
temporal_kernel_size = None,
groups = 8
):
super().__init__()
self.project = PseudoConv3d(dim, dim_out, 3)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(
self,
x,
scale_shift = None,
enable_time = False
):
x = self.project(x, enable_time = enable_time)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
return self.act(x)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
timestep_cond_dim = None,
groups = 8
):
super().__init__()
self.timestep_mlp = None
if exists(timestep_cond_dim):
self.timestep_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(timestep_cond_dim, dim_out * 2)
)
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = PseudoConv3d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(
self,
x,
timestep_emb = None,
enable_time = True
):
assert not (exists(timestep_emb) ^ exists(self.timestep_mlp))
scale_shift = None
if exists(self.timestep_mlp) and exists(timestep_emb):
time_emb = self.timestep_mlp(timestep_emb)
to_einsum_eq = 'b c 1 1 1' if x.ndim == 5 else 'b c 1 1'
time_emb = rearrange(time_emb, f'b c -> {to_einsum_eq}')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift, enable_time = enable_time)
h = self.block2(h, enable_time = enable_time)
return h + self.res_conv(x)
# pixelshuffle upsamples and downsamples
# where time dimension can be configured
class Downsample(nn.Module):
def __init__(
self,
dim,
downsample_space = True,
downsample_time = False,
nonlin = False
):
super().__init__()
assert downsample_space or downsample_time
self.down_space = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(dim * 4, dim, 1, bias = False),
nn.SiLU() if nonlin else nn.Identity()
) if downsample_space else None
self.down_time = nn.Sequential(
Rearrange('b c (f p) h w -> b (c p) f h w', p = 2),
nn.Conv3d(dim * 2, dim, 1, bias = False),
nn.SiLU() if nonlin else nn.Identity()
) if downsample_time else None
def forward(
self,
x,
enable_time = True
):
is_video = x.ndim == 5
if is_video:
x = rearrange(x, 'b c f h w -> b f c h w')
x, ps = pack([x], '* c h w')
if exists(self.down_space):
x = self.down_space(x)
if is_video:
x, = unpack(x, ps, '* c h w')
x = rearrange(x, 'b f c h w -> b c f h w')
if not is_video or not exists(self.down_time) or not enable_time:
return x
x = self.down_time(x)
return x
class Upsample(nn.Module):
def __init__(
self,
dim,
upsample_space = True,
upsample_time = False,
nonlin = False
):
super().__init__()
assert upsample_space or upsample_time
self.up_space = nn.Sequential(
nn.Conv2d(dim, dim * 4, 1),
nn.SiLU() if nonlin else nn.Identity(),
Rearrange('b (c p1 p2) h w -> b c (h p1) (w p2)', p1 = 2, p2 = 2)
) if upsample_space else None
self.up_time = nn.Sequential(
nn.Conv3d(dim, dim * 2, 1),
nn.SiLU() if nonlin else nn.Identity(),
Rearrange('b (c p) f h w -> b c (f p) h w', p = 2)
) if upsample_time else None
self.init_()
def init_(self):
if exists(self.up_space):
self.init_conv_(self.up_space[0], 4)
if exists(self.up_time):
self.init_conv_(self.up_time[0], 2)
def init_conv_(self, conv, factor):
o, *remain_dims = conv.weight.shape
conv_weight = torch.empty(o // factor, *remain_dims)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o r) ...', r = factor)
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(
self,
x,
enable_time = True
):
is_video = x.ndim == 5
if is_video:
x = rearrange(x, 'b c f h w -> b f c h w')
x, ps = pack([x], '* c h w')
if exists(self.up_space):
x = self.up_space(x)
if is_video:
x, = unpack(x, ps, '* c h w')
x = rearrange(x, 'b f c h w -> b c f h w')
if not is_video or not exists(self.up_time) or not enable_time:
return x
x = self.up_time(x)
return x
# space time factorized 3d unet
class SpaceTimeUnet(nn.Module):
def __init__(
self,
*,
dim,
channels = 3,
dim_mult = (1, 2, 4, 8),
self_attns = (False, False, False, True),
temporal_compression = (False, True, True, True),
resnet_block_depths = (2, 2, 2, 2),
attn_dim_head = 64,
attn_heads = 8,
condition_on_timestep = True,
attn_pos_bias = True,
flash_attn = False,
causal_time_attn = False
):
super().__init__()
assert len(dim_mult) == len(self_attns) == len(temporal_compression) == len(resnet_block_depths)
num_layers = len(dim_mult)
dims = [dim, *map(lambda mult: mult * dim, dim_mult)]
dim_in_out = zip(dims[:-1], dims[1:])
# determine the valid multiples of the image size and frames of the video
self.frame_multiple = 2 ** sum(tuple(map(int, temporal_compression)))
self.image_size_multiple = 2 ** num_layers
# timestep conditioning for DDPM, not to be confused with the time dimension of the video
self.to_timestep_cond = None
timestep_cond_dim = (dim * 4) if condition_on_timestep else None
if condition_on_timestep:
self.to_timestep_cond = nn.Sequential(
SinusoidalPosEmb(dim),
nn.Linear(dim, timestep_cond_dim),
nn.SiLU()
)
# layers
self.downs = mlist([])
self.ups = mlist([])
attn_kwargs = dict(
dim_head = attn_dim_head,
heads = attn_heads,
pos_bias = attn_pos_bias,
flash = flash_attn,
causal_time_attn = causal_time_attn
)
mid_dim = dims[-1]
self.mid_block1 = ResnetBlock(mid_dim, mid_dim, timestep_cond_dim = timestep_cond_dim)
self.mid_attn = SpatioTemporalAttention(dim = mid_dim, **attn_kwargs)
self.mid_block2 = ResnetBlock(mid_dim, mid_dim, timestep_cond_dim = timestep_cond_dim)
for _, self_attend, (dim_in, dim_out), compress_time, resnet_block_depth in zip(range(num_layers), self_attns, dim_in_out, temporal_compression, resnet_block_depths):
assert resnet_block_depth >= 1
self.downs.append(mlist([
ResnetBlock(dim_in, dim_out, timestep_cond_dim = timestep_cond_dim),
mlist([ResnetBlock(dim_out, dim_out) for _ in range(resnet_block_depth)]),
SpatioTemporalAttention(dim = dim_out, **attn_kwargs) if self_attend else None,
Downsample(dim_out, downsample_time = compress_time)
]))
self.ups.append(mlist([
ResnetBlock(dim_out * 2, dim_in, timestep_cond_dim = timestep_cond_dim),
mlist([ResnetBlock(dim_in + (dim_out if ind == 0 else 0), dim_in) for ind in range(resnet_block_depth)]),
SpatioTemporalAttention(dim = dim_in, **attn_kwargs) if self_attend else None,
Upsample(dim_out, upsample_time = compress_time)
]))
self.skip_scale = 2 ** -0.5 # paper shows faster convergence
self.conv_in = PseudoConv3d(dim = channels, dim_out = dim, kernel_size = 7, temporal_kernel_size = 3)
self.conv_out = PseudoConv3d(dim = dim, dim_out = channels, kernel_size = 3, temporal_kernel_size = 3)
def forward(
self,
x,
timestep = None,
enable_time = True
):
# some asserts
assert not (exists(self.to_timestep_cond) ^ exists(timestep))
is_video = x.ndim == 5
if enable_time and is_video:
frames = x.shape[2]
assert divisible_by(frames, self.frame_multiple), f'number of frames on the video ({frames}) must be divisible by the frame multiple ({self.frame_multiple})'
height, width = x.shape[-2:]
assert divisible_by(height, self.image_size_multiple) and divisible_by(width, self.image_size_multiple), f'height and width of the image or video must be a multiple of {self.image_size_multiple}'
# main logic
t = self.to_timestep_cond(rearrange(timestep, '... -> (...)')) if exists(timestep) else None
x = self.conv_in(x, enable_time = enable_time)
hiddens = []
for init_block, blocks, maybe_attention, downsample in self.downs:
x = init_block(x, t, enable_time = enable_time)
hiddens.append(x.clone())
for block in blocks:
x = block(x, enable_time = enable_time)
if exists(maybe_attention):
x = maybe_attention(x, enable_time = enable_time)
hiddens.append(x.clone())
x = downsample(x, enable_time = enable_time)
x = self.mid_block1(x, t, enable_time = enable_time)
x = self.mid_attn(x, enable_time = enable_time)
x = self.mid_block2(x, t, enable_time = enable_time)
for init_block, blocks, maybe_attention, upsample in reversed(self.ups):
x = upsample(x, enable_time = enable_time)
x = torch.cat((hiddens.pop() * self.skip_scale, x), dim = 1)
x = init_block(x, t, enable_time = enable_time)
x = torch.cat((hiddens.pop() * self.skip_scale, x), dim = 1)
for block in blocks:
x = block(x, enable_time = enable_time)
if exists(maybe_attention):
x = maybe_attention(x, enable_time = enable_time)
x = self.conv_out(x, enable_time = enable_time)
return x
| make-a-video-pytorch-main | make_a_video_pytorch/make_a_video.py |
from setuptools import setup, find_packages
exec(open('parti_pytorch/version.py').read())
setup(
name = 'parti-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'Parti - Pathways Autoregressive Text-to-Image Model - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/parti-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'text-to-image'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'ema-pytorch',
'torch>=1.6',
'torchvision',
'transformers',
'vector-quantize-pytorch>=0.9.2'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| parti-pytorch-main | setup.py |
__version__ = '0.0.18'
| parti-pytorch-main | parti_pytorch/version.py |
import torch
import transformers
from transformers import T5Tokenizer, T5EncoderModel, T5Config
transformers.logging.set_verbosity_error()
def exists(val):
return val is not None
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
# avoids loading the model if we only want to get the dim
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config=config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
assert False
return config.d_model
# encoding text
def t5_encode_text(texts, name = DEFAULT_T5_NAME, output_device = None):
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = "pt",
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.no_grad():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask.bool()
if not exists(output_device):
return encoded_text, attn_mask
encoded_text.to(output_device)
attn_mask.to(output_device)
return encoded_text, attn_mask
| parti-pytorch-main | parti_pytorch/t5.py |
from math import sqrt
import copy
from random import choice
from pathlib import Path
from shutil import rmtree
from PIL import Image
import torch
from torch import nn
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
from einops import rearrange
from parti_pytorch.vit_vqgan import VitVQGanVAE
from parti_pytorch.optimizer import get_optimizer
from ema_pytorch import EMA
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# classes
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
print(f'{len(self.paths)} training samples found at {folder}')
self.transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(image_size),
T.RandomHorizontalFlip(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# main trainer class
class VQGanVAETrainer(nn.Module):
def __init__(
self,
vae,
*,
num_train_steps,
batch_size,
folder,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
valid_frac = 0.05,
random_split_seed = 42,
ema_beta = 0.995,
ema_update_after_step = 500,
ema_update_every = 10,
apply_grad_penalty_every = 4,
amp = False
):
super().__init__()
assert isinstance(vae, VitVQGanVAE), 'vae must be instance of VitVQGanVAE'
image_size = vae.image_size
self.vae = vae
self.ema_vae = EMA(vae, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
all_parameters = set(vae.parameters())
discr_parameters = set(vae.discr.parameters())
vae_parameters = all_parameters - discr_parameters
self.optim = get_optimizer(vae_parameters, lr = lr, wd = wd)
self.discr_optim = get_optimizer(discr_parameters, lr = lr, wd = wd)
self.amp = amp
self.scaler = GradScaler(enabled = amp)
self.discr_scaler = GradScaler(enabled = amp)
# create dataset
self.ds = ImageDataset(folder, image_size = image_size)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
print(f'training with shared training and valid dataset of {len(self.ds)} samples')
# dataloader
self.dl = cycle(DataLoader(
self.ds,
batch_size = batch_size,
shuffle = True
))
self.valid_dl = cycle(DataLoader(
self.valid_ds,
batch_size = batch_size,
shuffle = True
))
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?'):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
def train_step(self):
device = next(self.vae.parameters()).device
steps = int(self.steps.item())
apply_grad_penalty = not (steps % self.apply_grad_penalty_every)
self.vae.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
img = next(self.dl)
img = img.to(device)
with autocast(enabled = self.amp):
loss = self.vae(
img,
return_loss = True,
apply_grad_penalty = apply_grad_penalty
)
self.scaler.scale(loss / self.grad_accum_every).backward()
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
self.scaler.step(self.optim)
self.scaler.update()
self.optim.zero_grad()
# update discriminator
if exists(self.vae.discr):
self.discr_optim.zero_grad()
discr_loss = 0
for _ in range(self.grad_accum_every):
img = next(self.dl)
img = img.to(device)
with autocast(enabled = self.amp):
loss = self.vae(img, return_discr_loss = True)
self.discr_scaler.scale(loss / self.grad_accum_every).backward()
accum_log(logs, {'discr_loss': loss.item() / self.grad_accum_every})
self.discr_scaler.step(self.discr_optim)
self.discr_scaler.update()
# log
print(f"{steps}: vae loss: {logs['loss']} - discr loss: {logs['discr_loss']}")
# update exponential moving averaged generator
self.ema_vae.update()
# sample results every so often
if not (steps % self.save_results_every):
for model, filename in ((self.ema_vae.ema_model, f'{steps}.ema'), (self.vae, str(steps))):
model.eval()
imgs = next(self.dl)
imgs = imgs.to(device)
recons = model(imgs)
nrows = int(sqrt(self.batch_size))
imgs_and_recons = torch.stack((imgs, recons), dim = 0)
imgs_and_recons = rearrange(imgs_and_recons, 'r b ... -> (b r) ...')
imgs_and_recons = imgs_and_recons.detach().cpu().float().clamp(0., 1.)
grid = make_grid(imgs_and_recons, nrow = 2, normalize = True, value_range = (0, 1))
logs['reconstructions'] = grid
save_image(grid, str(self.results_folder / f'{filename}.png'))
print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
if not (steps % self.save_model_every):
state_dict = self.vae.state_dict()
model_path = str(self.results_folder / f'vae.{steps}.pt')
torch.save(state_dict, model_path)
ema_state_dict = self.ema_vae.state_dict()
model_path = str(self.results_folder / f'vae.{steps}.ema.pt')
torch.save(ema_state_dict, model_path)
print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
device = next(self.vae.parameters()).device
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
print('training complete')
| parti-pytorch-main | parti_pytorch/vit_vqgan_trainer.py |
from parti_pytorch.parti_pytorch import Parti
from parti_pytorch.vit_vqgan import VitVQGanVAE
from parti_pytorch.vit_vqgan_trainer import VQGanVAETrainer | parti-pytorch-main | parti_pytorch/__init__.py |
import copy
import math
from math import sqrt
from functools import partial, wraps
from vector_quantize_pytorch import VectorQuantize as VQ
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torchvision
from einops import rearrange, reduce, repeat
from einops_exts import rearrange_many
from einops.layers.torch import Rearrange
# constants
MList = nn.ModuleList
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def remove_vgg(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
has_vgg = hasattr(self, 'vgg')
if has_vgg:
vgg = self.vgg
delattr(self, 'vgg')
out = fn(self, *args, **kwargs)
if has_vgg:
self.vgg = vgg
return out
return inner
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, string_input):
return string_input.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# tensor helper functions
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(outputs = output, inputs = images,
grad_outputs = torch.ones(output.size(), device = images.device),
create_graph = True, retain_graph = True, only_inputs = True)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
def l2norm(t):
return F.normalize(t, dim = -1)
def leaky_relu(p = 0.1):
return nn.LeakyReLU(0.1)
def safe_div(numer, denom, eps = 1e-8):
return numer / (denom + eps)
# gan losses
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def bce_discr_loss(fake, real):
return (-log(1 - torch.sigmoid(fake)) - log(torch.sigmoid(real))).mean()
def bce_gen_loss(fake):
return -log(torch.sigmoid(fake)).mean()
def grad_layer_wrt_loss(loss, layer):
return torch_grad(
outputs = loss,
inputs = layer,
grad_outputs = torch.ones_like(loss),
retain_graph = True
)[0].detach()
# fourier
class SinusoidalPosEmb(nn.Module):
def __init__(
self,
dim,
height_or_width,
theta = 10000
):
super().__init__()
self.dim = dim
self.theta = theta
hw_range = torch.arange(height_or_width)
coors = torch.stack(torch.meshgrid(hw_range, hw_range, indexing = 'ij'), dim = -1)
coors = rearrange(coors, 'h w c -> h w c')
self.register_buffer('coors', coors, persistent = False)
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(self.theta) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)
emb = rearrange(self.coors, 'h w c -> h w c 1') * rearrange(emb, 'j -> 1 1 1 j')
fourier = torch.cat((emb.sin(), emb.cos()), dim = -1)
fourier = repeat(fourier, 'h w c d -> b (c d) h w', b = x.shape[0])
return torch.cat((x, fourier), dim = 1)
# vqgan vae
class ChanLayerNorm(nn.Module):
def __init__(
self,
dim,
eps = 1e-5
):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + self.eps).rsqrt() * self.gamma
class CrossEmbedLayer(nn.Module):
def __init__(
self,
dim_in,
kernel_sizes,
dim_out = None,
stride = 2
):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return torch.cat(fmaps, dim = 1)
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8
):
super().__init__()
self.groupnorm = nn.GroupNorm(groups, dim)
self.activation = leaky_relu()
self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)
def forward(self, x, scale_shift = None):
x = self.groupnorm(x)
x = self.activation(x)
return self.project(x)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out = None,
*,
groups = 8
):
super().__init__()
dim_out = default(dim_out, dim)
self.block = Block(dim, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x):
h = self.block(x)
return h + self.res_conv(x)
# discriminator
class Discriminator(nn.Module):
def __init__(
self,
dims,
channels = 3,
groups = 8,
init_kernel_size = 5,
cross_embed_kernel_sizes = (3, 7, 15)
):
super().__init__()
init_dim, *_, final_dim = dims
dim_pairs = zip(dims[:-1], dims[1:])
self.layers = MList([nn.Sequential(
CrossEmbedLayer(channels, cross_embed_kernel_sizes, init_dim, stride = 1),
leaky_relu()
)])
for dim_in, dim_out in dim_pairs:
self.layers.append(nn.Sequential(
nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1),
leaky_relu(),
nn.GroupNorm(groups, dim_out),
ResnetBlock(dim_out, dim_out),
))
self.to_logits = nn.Sequential( # return 5 x 5, for PatchGAN-esque training
nn.Conv2d(final_dim, final_dim, 1),
leaky_relu(),
nn.Conv2d(final_dim, 1, 4)
)
def forward(self, x):
for net in self.layers:
x = net(x)
return self.to_logits(x)
# 2d relative positional bias
class RelPosBias2d(nn.Module):
def __init__(self, size, heads):
super().__init__()
self.pos_bias = nn.Embedding((2 * size - 1) ** 2, heads)
arange = torch.arange(size)
pos = torch.stack(torch.meshgrid(arange, arange, indexing = 'ij'), dim = -1)
pos = rearrange(pos, '... c -> (...) c')
rel_pos = rearrange(pos, 'i c -> i 1 c') - rearrange(pos, 'j c -> 1 j c')
rel_pos = rel_pos + size - 1
h_rel, w_rel = rel_pos.unbind(dim = -1)
pos_indices = h_rel * (2 * size - 1) + w_rel
self.register_buffer('pos_indices', pos_indices)
def forward(self, qk):
i, j = qk.shape[-2:]
bias = self.pos_bias(self.pos_indices)
bias = rearrange(bias, 'i j h -> h i j')
return bias
# ViT encoder / decoder
class PEG(nn.Module):
def __init__(self, dim, kernel_size = 3):
super().__init__()
self.proj = nn.Conv2d(dim, dim, kernel_size = kernel_size, padding = kernel_size // 2, groups = dim, stride = 1)
def forward(self, x):
return self.proj(x)
class SPT(nn.Module):
""" https://arxiv.org/abs/2112.13492 """
def __init__(self, *, dim, patch_size, channels = 3):
super().__init__()
patch_dim = patch_size * patch_size * 5 * channels
self.to_patch_tokens = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (p1 p2 c) h w', p1 = patch_size, p2 = patch_size),
ChanLayerNorm(patch_dim),
nn.Conv2d(patch_dim, dim, 1)
)
def forward(self, x):
shifts = ((1, -1, 0, 0), (-1, 1, 0, 0), (0, 0, 1, -1), (0, 0, -1, 1))
shifted_x = list(map(lambda shift: F.pad(x, shift), shifts))
x_with_shifts = torch.cat((x, *shifted_x), dim = 1)
return self.to_patch_tokens(x_with_shifts)
class Attention(nn.Module):
def __init__(
self,
dim,
*,
heads = 8,
dim_head = 32,
fmap_size = None,
rel_pos_bias = False
):
super().__init__()
self.norm = ChanLayerNorm(dim)
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)
self.primer_ds_convs = nn.ModuleList([PEG(inner_dim) for _ in range(3)])
self.to_out = nn.Conv2d(inner_dim, dim, 1, bias = False)
self.rel_pos_bias = None
if rel_pos_bias:
assert exists(fmap_size)
self.rel_pos_bias = RelPosBias2d(fmap_size, heads)
def forward(self, x):
fmap_size = x.shape[-1]
h = self.heads
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = [ds_conv(t) for ds_conv, t in zip(self.primer_ds_convs, (q, k, v))]
q, k, v = rearrange_many((q, k, v), 'b (h d) x y -> b h (x y) d', h = h)
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(self.rel_pos_bias):
sim = sim + self.rel_pos_bias(sim)
attn = sim.softmax(dim = -1, dtype = torch.float32)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = fmap_size, y = fmap_size)
return self.to_out(out)
def FeedForward(dim, mult = 4):
return nn.Sequential(
ChanLayerNorm(dim),
nn.Conv2d(dim, dim * mult, 1, bias = False),
nn.GELU(),
PEG(dim * mult),
nn.Conv2d(dim * mult, dim, 1, bias = False)
)
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
layers,
dim_head = 32,
heads = 8,
ff_mult = 4,
fmap_size = None
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
PEG(dim = dim),
Attention(dim = dim, dim_head = dim_head, heads = heads, fmap_size = fmap_size, rel_pos_bias = True),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = ChanLayerNorm(dim)
def forward(self, x):
for peg, attn, ff in self.layers:
x = peg(x) + x
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
class ViTEncDec(nn.Module):
def __init__(
self,
dim,
image_size,
channels = 3,
layers = 4,
patch_size = 16,
dim_head = 32,
heads = 8,
ff_mult = 4
):
super().__init__()
self.encoded_dim = dim
self.patch_size = patch_size
input_dim = channels * (patch_size ** 2)
fmap_size = image_size // patch_size
self.encoder = nn.Sequential(
SPT(dim = dim, patch_size = patch_size, channels = channels),
Transformer(
dim = dim,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
layers = layers,
fmap_size = fmap_size
),
)
self.decoder = nn.Sequential(
Transformer(
dim = dim,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
layers = layers,
fmap_size = fmap_size
),
nn.Sequential(
SinusoidalPosEmb(dim // 2, height_or_width = fmap_size),
nn.Conv2d(2 * dim, dim * 4, 3, bias = False, padding = 1),
nn.Tanh(),
nn.Conv2d(dim * 4, input_dim, 1, bias = False),
),
Rearrange('b (p1 p2 c) h w -> b c (h p1) (w p2)', p1 = patch_size, p2 = patch_size)
)
def get_encoded_fmap_size(self, image_size):
return image_size // self.patch_size
@property
def last_dec_layer(self):
return self.decoder[-2][-1].weight
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
# vit vqgan vae
class VitVQGanVAE(nn.Module):
def __init__(
self,
*,
dim,
image_size,
channels = 3,
layers = 4,
l2_recon_loss = False,
use_hinge_loss = True,
vgg = None,
vq_codebook_dim = 64,
vq_codebook_size = 512,
vq_decay = 0.9,
vq_commitment_weight = 1.,
vq_kmeans_init = True,
use_vgg_and_gan = True,
discr_layers = 4,
**kwargs
):
super().__init__()
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
encdec_kwargs, kwargs = groupby_prefix_and_trim('encdec_', kwargs)
self.image_size = image_size
self.channels = channels
self.codebook_size = vq_codebook_size
self.enc_dec = ViTEncDec(
dim = dim,
image_size = image_size,
channels = channels,
layers = layers,
**encdec_kwargs
)
self.vq = VQ(
dim = self.enc_dec.encoded_dim,
codebook_dim = vq_codebook_dim,
codebook_size = vq_codebook_size,
decay = vq_decay,
commitment_weight = vq_commitment_weight,
kmeans_init = vq_kmeans_init,
accept_image_fmap = True,
use_cosine_sim = True,
**vq_kwargs
)
# reconstruction loss
self.recon_loss_fn = F.mse_loss if l2_recon_loss else F.l1_loss
# turn off GAN and perceptual loss if grayscale
self.vgg = None
self.discr = None
self.use_vgg_and_gan = use_vgg_and_gan
if not use_vgg_and_gan:
return
# preceptual loss
if exists(vgg):
self.vgg = vgg
else:
self.vgg = torchvision.models.vgg16(pretrained = True)
self.vgg.classifier = nn.Sequential(*self.vgg.classifier[:-2])
# gan related losses
layer_mults = list(map(lambda t: 2 ** t, range(discr_layers)))
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.discr = Discriminator(dims = dims, channels = channels)
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
@property
def encoded_dim(self):
return self.enc_dec.encoded_dim
def get_encoded_fmap_size(self, image_size):
return self.enc_dec.get_encoded_fmap_size(image_size)
def copy_for_eval(self):
device = next(self.parameters()).device
vae_copy = copy.deepcopy(self.cpu())
if vae_copy.use_vgg_and_gan:
del vae_copy.discr
del vae_copy.vgg
vae_copy.eval()
return vae_copy.to(device)
@remove_vgg
def state_dict(self, *args, **kwargs):
return super().state_dict(*args, **kwargs)
@remove_vgg
def load_state_dict(self, *args, **kwargs):
return super().load_state_dict(*args, **kwargs)
@property
def codebook(self):
return self.vq.codebook
def get_fmap_from_codebook(self, indices):
codes = self.codebook[indices]
fmap = self.vq.project_out(codes)
return rearrange(fmap, 'b h w c -> b c h w')
def encode(self, fmap, return_indices_and_loss = True):
fmap = self.enc_dec.encode(fmap)
fmap, indices, commit_loss = self.vq(fmap)
if not return_indices_and_loss:
return fmap
return fmap, indices, commit_loss
def decode(self, fmap):
return self.enc_dec.decode(fmap)
def forward(
self,
img,
return_loss = False,
return_discr_loss = False,
return_recons = False,
apply_grad_penalty = True
):
batch, channels, height, width, device = *img.shape, img.device
assert height == self.image_size and width == self.image_size, 'height and width of input image must be equal to {self.image_size}'
assert channels == self.channels, 'number of channels on image or sketch is not equal to the channels set on this VQGanVAE'
fmap, indices, commit_loss = self.encode(img, return_indices_and_loss = True)
fmap = self.decode(fmap)
if not return_loss and not return_discr_loss:
return fmap
assert return_loss ^ return_discr_loss, 'you should either return autoencoder loss or discriminator loss, but not both'
# whether to return discriminator loss
if return_discr_loss:
assert exists(self.discr), 'discriminator must exist to train it'
fmap.detach_()
img.requires_grad_()
fmap_discr_logits, img_discr_logits = map(self.discr, (fmap, img))
discr_loss = self.discr_loss(fmap_discr_logits, img_discr_logits)
if apply_grad_penalty:
gp = gradient_penalty(img, img_discr_logits)
loss = discr_loss + gp
if return_recons:
return loss, fmap
return loss
# reconstruction loss
recon_loss = self.recon_loss_fn(fmap, img)
# early return if training on grayscale
if not self.use_vgg_and_gan:
if return_recons:
return recon_loss, fmap
return recon_loss
# perceptual loss
img_vgg_input = img
fmap_vgg_input = fmap
if img.shape[1] == 1:
# handle grayscale for vgg
img_vgg_input, fmap_vgg_input = map(lambda t: repeat(t, 'b 1 ... -> b c ...', c = 3), (img_vgg_input, fmap_vgg_input))
img_vgg_feats = self.vgg(img_vgg_input)
recon_vgg_feats = self.vgg(fmap_vgg_input)
perceptual_loss = F.mse_loss(img_vgg_feats, recon_vgg_feats)
# generator loss
gen_loss = self.gen_loss(self.discr(fmap))
# calculate adaptive weight
last_dec_layer = self.enc_dec.last_dec_layer
norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p = 2)
norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(p = 2)
adaptive_weight = safe_div(norm_grad_wrt_perceptual_loss, norm_grad_wrt_gen_loss)
adaptive_weight.clamp_(max = 1e4)
# combine losses
loss = recon_loss + perceptual_loss + commit_loss + adaptive_weight * gen_loss
if return_recons:
return loss, fmap
return loss
| parti-pytorch-main | parti_pytorch/vit_vqgan.py |
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
if group_wd_params:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
| parti-pytorch-main | parti_pytorch/optimizer.py |
from typing import List
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
import torchvision.transforms as T
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from parti_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# normalization
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer('beta', torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# 2d relative positional bias
class RelPosBias2d(nn.Module):
def __init__(self, size, heads):
super().__init__()
self.pos_bias = nn.Embedding((2 * size - 1) ** 2, heads)
arange = torch.arange(size)
pos = torch.stack(torch.meshgrid(arange, arange, indexing = 'ij'), dim = -1)
pos = rearrange(pos, '... c -> (...) c')
rel_pos = rearrange(pos, 'i c -> i 1 c') - rearrange(pos, 'j c -> 1 j c')
rel_pos = rel_pos + size - 1
h_rel, w_rel = rel_pos.unbind(dim = -1)
pos_indices = h_rel * (2 * size - 1) + w_rel
self.register_buffer('pos_indices', pos_indices)
def forward(self, qk):
i, j = qk.shape[-2:]
bias = self.pos_bias(self.pos_indices[:i, :(j - 1)])
bias = rearrange(bias, 'i j h -> h i j')
bias = F.pad(bias, (j - bias.shape[-1], 0), value = 0.) # account for null key / value for classifier free guidance
return bias
# feedforward
def FeedForward(dim, mult = 4, dropout = 0.):
dim_hidden = int(dim * mult)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, dim_hidden, bias = False),
nn.GELU(),
LayerNorm(dim_hidden),
nn.Linear(dim_hidden, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
causal = False,
dropout = 0.,
norm_context = False,
rel_pos_bias = False,
encoded_fmap_size = None
):
super().__init__()
self.causal = causal
self.scale = dim_head ** -0.5
self.norm = LayerNorm(dim)
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm_context = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(dim, inner_dim, bias = False),
Rearrange('b n (h d) -> b h n d', h = heads)
)
# needed for classifier free guidance for transformers
# by @crowsonkb, adopted by the paper
self.null_kv = nn.Parameter(torch.randn(dim_head))
# one-headed key / value attention, from Shazeer's multi-query paper, adopted by Alphacode and PaLM
self.to_kv = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(context_dim, dim_head, bias = False)
)
self.to_out = nn.Sequential(
Rearrange('b h n d -> b n (h d)'),
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
# positional bias
self.rel_pos_bias = None
if rel_pos_bias:
assert exists(encoded_fmap_size)
self.rel_pos_bias = RelPosBias2d(encoded_fmap_size, heads)
def forward(
self,
x,
context = None,
context_mask = None
):
batch, device = x.shape[0], x.device
x = self.norm(x)
q = self.to_q(x) * self.scale
context = default(context, x)
context = self.norm_context(context)
kv = self.to_kv(context)
null_kv = repeat(self.null_kv, 'd -> b 1 d', b = batch)
kv = torch.cat((null_kv, kv), dim = 1)
sim = einsum('b h i d, b j d -> b h i j', q, kv)
if exists(self.rel_pos_bias):
pos_bias = self.rel_pos_bias(sim)
sim = sim + pos_bias
mask_value = -torch.finfo(sim.dtype).max
if exists(context_mask):
context_mask = F.pad(context_mask, (1, 0), value = True)
context_mask = rearrange(context_mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~context_mask, mask_value)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
out = einsum('b h i j, b j d -> b h i d', attn, kv)
return self.to_out(out)
# classes
class Parti(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
dropout = 0.,
ff_mult = 4,
vae = None,
vae_image_size = None,
vae_codebook_size = None,
t5_name = DEFAULT_T5_NAME,
text_embed_dim = None,
cond_drop_prob = 0.25,
max_text_len = 128,
ignore_index = -1
):
super().__init__()
# text conditioning
text_embed_dim = default(text_embed_dim, get_encoded_dim(t5_name))
self.encode_texts = partial(t5_encode_text, name = t5_name)
self.max_text_len = max_text_len
assert cond_drop_prob > 0.
self.cond_drop_prob = cond_drop_prob # classifier free guidance for transformers - @crowsonkb
# vae and image handling
assert exists(vae) ^ exists(vae_codebook_size)
self.vae = vae
codebook_size = default(vae_codebook_size, vae.codebook_size)
image_size = default(vae_image_size, vae.image_size)
self.start_token = nn.Parameter(torch.randn(dim))
self.image_token_embed = nn.Embedding(codebook_size, dim)
self.image_encoded_dim = vae.get_encoded_fmap_size(image_size)
self.axial_height_pos = nn.Parameter(torch.randn(self.image_encoded_dim, dim))
self.axial_width_pos = nn.Parameter(torch.randn(self.image_encoded_dim, dim))
# projecting to logits
self.init_norm = LayerNorm(dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim, causal = True, encoded_fmap_size = self.image_encoded_dim, rel_pos_bias = True, dim_head = dim_head, heads = heads, dropout = dropout),
Attention(dim, context_dim = text_embed_dim, dim_head = dim_head, heads = heads, dropout = dropout),
FeedForward(dim, mult = ff_mult, dropout = dropout)
]))
self.final_norm = LayerNorm(dim)
self.to_logits = nn.Linear(dim, codebook_size, bias = False)
self.to_logits.weight = self.image_token_embed.weight
# default device
if exists(vae):
self.to(next(vae.parameters()).device)
# loss related
self.ignore_index = ignore_index
@torch.no_grad()
@eval_decorator
def generate(
self,
texts,
*,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
return_pil_images = False
):
device = next(self.parameters()).device
text_token_embeds, text_mask = self.encode_texts(texts, output_device = device)
batch = text_token_embeds.shape[0]
image_seq_len = self.image_encoded_dim ** 2
image_tokens = torch.empty((batch, 0), device = device, dtype = torch.long)
for _ in range(image_seq_len):
logits = self.forward_with_cond_scale(
text_token_embeds = text_token_embeds,
text_mask = text_mask,
image_token_ids = image_tokens
)[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
image_tokens = torch.cat((image_tokens, sampled), dim = -1)
image_tokens = rearrange(image_tokens, 'b (h w) -> b h w', h = self.image_encoded_dim)
if not exists(self.vae):
return image_tokens
with torch.no_grad():
fmap = self.vae.get_fmap_from_codebook(image_tokens)
images = self.vae.decode(fmap)
if not return_pil_images:
return images
pil_images = list(map(T.ToPILImage(), images.unbind(dim = 0)))
return pil_images
def forward_with_cond_scale(self, *args, cond_scale = 3, **kwargs):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
texts: List[str] = None,
text_token_embeds = None,
text_mask = None,
images = None,
image_token_ids = None,
cond_drop_prob = None,
return_loss = False
):
assert exists(texts) ^ exists(text_token_embeds)
assert exists(images) ^ exists(image_token_ids)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
# encoding images
if not exists(image_token_ids):
assert exists(self.vae), 'vae must be given if you want to encode the image live'
with torch.no_grad():
_, image_token_ids, _ = self.vae.encode(images, return_indices_and_loss = True)
image_token_ids = rearrange(image_token_ids, 'b ... -> b (...)')
if return_loss:
assert image_token_ids.shape[-1] > 1, 'not enough image tokens given to return a loss'
image_token_ids, labels = image_token_ids[:, :-1], image_token_ids
image_token_emb = self.image_token_embed(image_token_ids)
# add axial positional embedding
axial_pos_emb = rearrange(self.axial_width_pos, 'w d -> 1 w d') + rearrange(self.axial_height_pos, 'h d -> h 1 d')
axial_pos_emb = rearrange(axial_pos_emb, 'h w d -> (h w) d')
batch, seq_len, device = *image_token_emb.shape[:2], image_token_emb.device
image_token_emb = image_token_emb + axial_pos_emb[:seq_len]
# add start token
start_tokens = repeat(self.start_token, 'd -> b 1 d', b = batch)
image_token_emb = torch.cat((start_tokens, image_token_emb), dim = 1)
# text
if not exists(text_token_embeds):
with torch.no_grad():
text_token_embeds, text_mask = self.encode_texts(texts, output_device = device)
if not exists(text_mask):
text_mask = torch.ones(text_token_embeds.shape[:2], dtype = torch.bool)
# enforce max text len
text_token_embeds, text_mask = map(lambda t: t[:, :self.max_text_len], (text_token_embeds, text_mask))
# classifier free guidance conditional dropout
if cond_drop_prob > 0:
keep_mask = prob_mask_like((batch,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
# attend
x = image_token_emb
x = self.init_norm(x)
for self_attn, cross_attn, ff in self.layers:
x = self_attn(x) + x
x = cross_attn(x, context = text_token_embeds, context_mask = text_mask) + x
x = ff(x) + x
x = self.final_norm(x)
# to logits
logits = self.to_logits(x)
if not return_loss:
return logits
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
labels,
ignore_index = self.ignore_index
)
return loss
| parti-pytorch-main | parti_pytorch/parti_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'musiclm-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.8',
license='MIT',
description = 'MusicLM - AudioLM + Audio CLIP to text to music synthesis',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/musiclm-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'text to music',
'contrastive learning'
],
install_requires=[
'accelerate',
'audiolm-pytorch>=0.17.0',
'beartype',
'einops>=0.6',
'lion-pytorch',
'vector-quantize-pytorch>=1.0.0',
'x-clip',
'torch>=1.12',
'torchaudio'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| musiclm-pytorch-main | setup.py |
from musiclm_pytorch.musiclm_pytorch import (
MuLaN,
MuLaNEmbedQuantizer,
MusicLM,
AudioSpectrogramTransformer,
TextTransformer,
SigmoidContrastiveLearning,
SoftmaxContrastiveLearning
)
from musiclm_pytorch.trainer import MuLaNTrainer
| musiclm-pytorch-main | musiclm_pytorch/__init__.py |
import torch
from torch import nn
from torch.autograd import Function
import torch.distributed as dist
from einops import rearrange
# distributed helpers
def all_gather_same_dim(t):
world_size = dist.get_world_size()
gathered_tensors = [torch.empty_like(t, device = t.device, dtype = t.dtype) for i in range(world_size)]
dist.all_gather(gathered_tensors, t)
return gathered_tensors
def all_gather_variable_dim(t, dim = 0, sizes = None):
device, rank, world_size = t.device, dist.get_rank(), dist.get_world_size()
if not exists(sizes):
size = torch.tensor(t.shape[dim], device = device, dtype = torch.long)
sizes = all_gather_same_dim(size)
sizes = torch.stack(sizes)
if torch.unique(sizes).numel() == 1:
gathered_tensors = all_gather_same_dim(t)
return torch.cat(gathered_tensors, dim = dim), sizes
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = dim)
gathered_tensors = all_gather_same_dim(padded_t)
gathered_tensor = torch.cat(gathered_tensors, dim = dim)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
seq = torch.arange(mask.shape[-1], device = device)
indices = seq[mask]
gathered_tensor = gathered_tensor.index_select(dim, indices)
return gathered_tensor, sizes
class AllGatherFunction(Function):
@staticmethod
def forward(ctx, x, dim, sizes, all_reduce_grads):
x, batch_sizes = all_gather_variable_dim(x, dim = dim, sizes = sizes)
ctx.dim = dim
ctx.all_reduce_grads = all_reduce_grads
ctx.batch_sizes = batch_sizes.tolist()
return x, batch_sizes
@staticmethod
def backward(ctx, grads, _):
batch_sizes, rank = ctx.batch_sizes, dist.get_rank()
if ctx.all_reduce_grads:
dist.all_reduce(grads)
grads_by_rank = grads.split(batch_sizes, dim = ctx.dim)
return grads_by_rank[rank], None, None, None
class AllGather(nn.Module):
def __init__(
self,
dim,
*,
all_reduce_grads = False
):
super().__init__()
self.dim = dim
self.all_reduce_grads = all_reduce_grads
self.is_distributed = dist.is_initialized() and dist.get_world_size() > 1
def forward(
self,
x,
sizes = None
):
if not self.is_distributed:
return x, None
return AllGatherFunction.apply(x, self.dim, sizes, self.all_reduce_grads)
| musiclm-pytorch-main | musiclm_pytorch/distributed.py |
import copy
from math import sqrt
from random import choice
from pathlib import Path
from shutil import rmtree
from functools import wraps, partial
from typing_extensions import Annotated
from beartype import beartype
from beartype.door import is_bearable
from beartype.vale import Is
from beartype.typing import Union, List, Optional, Tuple, Callable
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader, random_split
from torch.nn.utils.rnn import pad_sequence
from lion_pytorch import Lion
from musiclm_pytorch import MuLaN
from einops import rearrange
from accelerate import Accelerator
# for automatically routing data emitted from a dataset to keywords of the transformer wrappers
DATASET_FIELD_TYPE_CONFIG = dict(
wavs = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim in {2, 3}]
],
raw_texts = List[str],
texts = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.long and t.ndim == 2]
],
)
# helpers
def exists(val):
return val is not None
def default(*args):
for arg in args:
if exists(arg):
return arg
return None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# auto data to module keyword argument routing functions
def has_duplicates(tup):
counts = dict()
for el in tup:
if el not in counts:
counts[el] = 0
counts[el] += 1
return any(filter(lambda count: count > 1, counts.values()))
def determine_types(data, config):
output = []
for el in data:
for name, data_type in config.items():
if is_bearable(el, data_type):
output.append(name)
break
else:
raise TypeError(f'unable to determine type of {data}')
return tuple(output)
# optimizer functions
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
# dataloader functions
def collate_one_or_multiple_tensors(fn):
@wraps(fn)
def inner(data):
is_one_data = not isinstance(data[0], tuple)
if is_one_data:
data = torch.stack(data)
return (data,)
outputs = []
for datum in zip(*data):
if is_bearable(datum, Tuple[str, ...]):
output = list(datum)
else:
output = fn(datum)
outputs.append(output)
return tuple(outputs)
return inner
@collate_one_or_multiple_tensors
def curtail_to_shortest_collate(data):
min_len = min(*[datum.shape[0] for datum in data])
data = [datum[:min_len] for datum in data]
return torch.stack(data)
@collate_one_or_multiple_tensors
def pad_to_longest_fn(data):
return pad_sequence(data, batch_first = True)
def get_dataloader(ds, pad_to_longest = True, **kwargs):
collate_fn = pad_to_longest_fn if pad_to_longest else curtail_to_shortest_collate
return DataLoader(ds, collate_fn = collate_fn, **kwargs)
# semantic transformer trainer
@beartype
class MuLaNTrainer(nn.Module):
def __init__(
self,
mulan: MuLaN,
dataset: Dataset,
*,
num_train_steps = None,
batch_size,
data_max_length = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
betas = (0.9, 0.99),
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
use_lion = False,
force_clear_prev_results = None # set to True | False to skip the prompt
):
super().__init__()
assert batch_size > 1, 'batch size must be greater than 1 for contrastive learning (but ideally as large as possible)'
self.accelerator = Accelerator(**accelerate_kwargs)
self.mulan = mulan
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = default(num_train_steps, len(dataset)) # 1 epoch by default
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
optim_klass = Lion if use_lion else Adam
self.optim = optim_klass(mulan.parameters(), lr = lr, betas = betas)
# max grad norm
self.max_grad_norm = max_grad_norm
self.data_max_length = data_max_length
# create dataset
self.ds = dataset
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, pad_to_longest = False, drop_last = True)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, pad_to_longest = False, drop_last = True)
# prepare with accelerator
(
self.mulan,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.mulan,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
hps = dict(
num_train_steps = num_train_steps,
data_max_length = data_max_length,
learning_rate = lr
)
self.accelerator.init_trackers("mulan", config = hps)
# results folder
self.results_folder = Path(results_folder)
if force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
# to device
self.mulan.to(self.device)
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.mulan),
optim = self.optim.state_dict()
)
torch.save(pkg, path)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
mulan = self.accelerator.unwrap_model(self.mulan)
mulan.load_state_dict(pkg['model'])
self.optim.load_state_dict(pkg['optim'])
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
data_kwargs = dict(zip(self.ds_fields, data))
wavs = data_kwargs['wavs']
data_kwargs.update(wavs = wavs[..., :self.data_max_length])
return data_kwargs
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.mulan.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.mulan(**data_kwargs)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.mulan.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step = steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'mulan.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn: Callable = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
| musiclm-pytorch-main | musiclm_pytorch/trainer.py |
import math
from functools import wraps, partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torchaudio.transforms import Spectrogram, TimeStretch, FrequencyMasking, TimeMasking
from audiolm_pytorch import AudioLM
from audiolm_pytorch.utils import AudioConditionerBase
import torch.distributed as dist
from musiclm_pytorch.distributed import AllGather
from x_clip.tokenizer import tokenizer
from vector_quantize_pytorch import ResidualVQ
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange
from beartype.typing import List, Optional, Tuple
from beartype import beartype
# functions
def exists(val):
return val is not None
def first(it):
return it[0]
def default(val, d):
return val if exists(val) else d
def round_down_nearest_multiple(n, divisor):
return n // divisor * divisor
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# decorators
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# tensor functions
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def l2norm(t):
return F.normalize(t, p = 2, dim = -1)
def matrix_diag(t):
device = t.device
i, j = t.shape[-2:]
num_diag_el = min(i, j)
i_range = torch.arange(i, device = device)
j_range = torch.arange(j, device = device)
diag_mask = rearrange(i_range, 'i -> i 1') == rearrange(j_range, 'j -> 1 j')
diag_el = t.masked_select(diag_mask)
return rearrange(diag_el, '(b d) -> b d', d = num_diag_el)
# 2d sinusoidal positional embedding
# simple vit paper shows it is good enough compared to learned
def posemb_sincos_2d(patches, temperature = 10000, dtype = torch.float32):
_, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype
y, x = torch.meshgrid(torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij')
assert (dim % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb'
omega = torch.arange(dim // 4, device = device) / (dim // 4 - 1)
omega = 1. / (temperature ** omega)
y = y.flatten()[:, None] * omega[None, :]
x = x.flatten()[:, None] * omega[None, :]
pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim = 1)
pe = pe.type(dtype)
return rearrange(pe, '(h w) d -> h w d', h = h, w = w)
# biasless layernorm
class LayerNorm(nn.Module):
def __init__(self, dim, scale = True):
super().__init__()
self.learned_gamma = nn.Parameter(torch.ones(dim)) if scale else None
self.register_buffer('gamma', torch.ones(dim), persistent = False)
self.register_buffer('beta', torch.zeros(dim), persistent = False)
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], default(self.learned_gamma, self.gamma), self.beta)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, dropout = 0.):
dim_hidden = int(dim * mult * 2 / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, dim_hidden * 2, bias = False),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim_hidden, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
heads = 8,
dropout = 0.,
scale = 8
):
super().__init__()
self.heads = heads
self.scale = scale
self.causal = causal
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.attn_dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
def forward(
self,
x,
rel_pos_bias = None,
mask = None
):
b, n, _, device = *x.shape, x.device
# prenorm
x = self.norm(x)
# project for queries, keys, values
q, k, v = self.to_q(x), *self.to_kv(x).chunk(2, dim = -1)
# split for multi-headed attention
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
# qk rmsnorm, technique circulating within brain used to stabilize a 22B parameter vision model training
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if exists(rel_pos_bias):
sim = sim + rel_pos_bias
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = x.device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout),
]))
def forward(
self,
x,
rel_pos_bias = None,
mask = None,
return_all_layers = False
):
layers = []
for attn, ff in self.layers:
x = attn(x, rel_pos_bias = rel_pos_bias, mask = mask) + x
x = ff(x) + x
layers.append(x)
if not return_all_layers:
return x
return x, torch.stack(layers[:-1])
# contrastive losses
class SoftmaxContrastiveLearning(nn.Module):
def __init__(
self,
*,
layers = 1,
decoupled_contrastive_learning = False,
init_temp = 10
):
super().__init__()
self.temperatures = nn.Parameter(torch.ones(layers, 1, 1) * math.log(init_temp))
self.decoupled_contrastive_learning = decoupled_contrastive_learning
self.all_gather = AllGather(dim = 2)
@property
def device(self):
return next(self.parameters()).device
def forward(self, audio_latents, text_latents):
if audio_latents.ndim == 2:
audio_latents = rearrange(audio_latents, '... -> 1 ...')
if text_latents.ndim == 2:
text_latents = rearrange(text_latents, '... -> 1 ...')
batch = audio_latents.shape[1]
if self.all_gather.is_distributed:
latents = torch.stack((audio_latents, text_latents))
latents, _ = self.all_gather(latents)
audio_latents, text_latents = latents
sims = einsum('l i d, l j d -> l i j', audio_latents, text_latents)
sims = sims * self.temperatures.exp()
cosine_sims_exp = sims.exp()
numerator = matrix_diag(cosine_sims_exp)
if self.decoupled_contrastive_learning:
eye = torch.eye(batch, device = self.device, dtype = torch.bool)
cosine_sims_exp = cosine_sims_exp.masked_fill(eye, 0.)
denominator_i = reduce(cosine_sims_exp, 'l i j -> l i', 'sum')
denominator_j = reduce(cosine_sims_exp, 'l i j -> l j', 'sum')
contrastive_loss = -log(numerator) + 0.5 * (log(denominator_i) + log(denominator_j))
contrastive_loss = reduce(contrastive_loss, 'l n -> l', 'mean')
return contrastive_loss.sum()
class SigmoidContrastiveLearning(nn.Module):
""" https://arxiv.org/abs/2303.15343 """
def __init__(
self,
*,
layers = 1,
init_temp = 10,
init_bias = -10
):
super().__init__()
self.temperatures = nn.Parameter(torch.ones(layers, 1, 1) * math.log(init_temp))
self.bias = nn.Parameter(torch.ones(layers, 1, 1) * init_bias)
self.all_gather = AllGather(dim = 1, all_reduce_grads = True)
@property
def device(self):
return next(self.parameters()).device
def forward(self, audio_latents, text_latents):
device = self.device
if audio_latents.ndim == 2:
audio_latents = rearrange(audio_latents, '... -> 1 ...')
if text_latents.ndim == 2:
text_latents = rearrange(text_latents, '... -> 1 ...')
text_latents, rank_sizes = self.all_gather(text_latents)
n = text_latents.shape[1]
sims = einsum('l i d, l j d -> l i j', audio_latents, text_latents)
sims = sims * self.temperatures.exp() + self.bias
labels = torch.eye(n, device = device)
if exists(rank_sizes):
labels_by_ranks = labels.split(rank_sizes.tolist(), dim = 0)
labels = labels_by_ranks[dist.get_rank()]
labels = 2 * rearrange(labels, 'i j -> 1 i j') - torch.ones_like(sims)
return -F.logsigmoid(labels * sims).sum() / n
# Audio Spectrogram Transformer - https://arxiv.org/abs/2104.01778
def pair(t):
return (t, t) if not isinstance(t, tuple) else t
class AudioSpectrogramTransformer(nn.Module):
def __init__(
self,
dim,
depth,
patch_size = 16,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
accept_spec = False,
accept_spec_time_first = True,
spec_n_fft = 128,
spec_power = 2,
spec_win_length = 24,
spec_hop_length = None,
spec_pad = 0,
spec_center = True,
spec_pad_mode = 'reflect',
spec_aug_stretch_factor = 0.8,
spec_aug_freq_mask = 80,
spec_aug_time_mask = 80,
patch_dropout_prob = 0.25
):
super().__init__()
self.dim = dim
self.depth = depth
self.patch_size = pair(patch_size)
patch_input_dim = self.patch_size[0] * self.patch_size[1]
self.to_patch_tokens = Sequential(
Rearrange('b (h p1) (w p2) -> b h w (p1 p2)', p1 = self.patch_size[0], p2 = self.patch_size[1]),
nn.LayerNorm(patch_input_dim),
nn.Linear(patch_input_dim, dim),
nn.LayerNorm(dim)
)
self.accept_spec = accept_spec
self.accept_spec_time_first = accept_spec_time_first
self.spec = Spectrogram(
n_fft = spec_n_fft,
power = spec_power,
win_length = spec_win_length,
hop_length = spec_hop_length,
pad = spec_pad,
center = spec_center,
pad_mode = spec_pad_mode
)
# SpecAugment - seems to be widely used in audio field https://arxiv.org/abs/1904.08779
self.aug = torch.nn.Sequential(
TimeStretch(spec_aug_stretch_factor, fixed_rate = True),
FrequencyMasking(freq_mask_param = spec_aug_freq_mask),
TimeMasking(time_mask_param = spec_aug_time_mask),
)
self.transformer = Transformer(
dim = dim,
depth = depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_mult = ff_mult,
ff_dropout = ff_dropout
)
self.norm = LayerNorm(dim)
# patch dropout
self.patch_dropout_prob = patch_dropout_prob
# 2d dynamic positional bias
mlp_hidden_dim = dim // 4
self.dynamic_pos_bias_mlp = nn.Sequential(
nn.Linear(2, mlp_hidden_dim),
nn.SiLU(),
nn.Linear(mlp_hidden_dim, mlp_hidden_dim),
nn.SiLU(),
nn.Linear(mlp_hidden_dim, heads),
Rearrange('... i j h -> ... h i j')
)
def forward(
self,
x,
force_no_patch_dropout = False,
return_all_layers = False
):
batch, device = x.shape[0], x.device
assert (self.accept_spec and x.ndim == 3) or (not self.accept_spec and x.ndim == 2)
if self.accept_spec and self.accept_spec_time_first:
x = rearrange(x, 'b t f -> b f t')
if not self.accept_spec:
x = self.spec(x)
if self.training:
x = self.aug(x)
# automatically crop if audio does not yield a 2d spectrogram that is divisible by patch sizes
height, width = x.shape[-2:]
patch_height, patch_width = self.patch_size
rounded_height, rounded_width = map(lambda args: round_down_nearest_multiple(*args), ((height, patch_height), (width, patch_width)))
if (height, width) != (rounded_height, rounded_width): # just keep printing to be annoying until it is fixed
print_once(f'spectrogram yielded shape of {(height, width)}, but had to be cropped to {(rounded_height, rounded_width)} to be patchified for transformer')
x = x[..., :rounded_height, :rounded_width]
# to patches
x = self.to_patch_tokens(x)
# get number of patches along height and width
_, num_patch_height, num_patch_width, _ = x.shape
# get 2d relative positions
grid = torch.stack(torch.meshgrid(
torch.arange(num_patch_height, device = device),
torch.arange(num_patch_width, device = device)
, indexing = 'ij'), dim = -1)
grid = rearrange(grid, '... c -> (...) c')
# 2d sinusoidal positional embedding
x = x + posemb_sincos_2d(x)
x = rearrange(x, 'b ... c -> b (...) c')
# patch dropout
if self.training and self.patch_dropout_prob > 0. and not force_no_patch_dropout:
n, device = x.shape[1], x.device
batch_indices = torch.arange(batch, device = device)
batch_indices = rearrange(batch_indices, '... -> ... 1')
num_patches_keep = max(1, int(n * (1 - self.patch_dropout_prob)))
patch_indices_keep = torch.randn(batch, n, device = device).topk(num_patches_keep, dim = -1).indices
x = x[batch_indices, patch_indices_keep]
grid = repeat(grid, '... -> b ...', b = batch)
grid = grid[batch_indices, patch_indices_keep]
# 2d relative positional bias
rel_dist = rearrange(grid, '... i c -> ... i 1 c') - rearrange(grid, '... j c -> ... 1 j c')
rel_pos_bias = self.dynamic_pos_bias_mlp(rel_dist.float())
# attention, what else
x, all_layers = self.transformer(x, rel_pos_bias = rel_pos_bias, return_all_layers = True)
# final global average and norm (most recent papers show this is superior to CLS token)
x = reduce(x, 'b n d -> b d', 'mean')
out = self.norm(x)
if not return_all_layers:
return out
return out, all_layers
# text transformer
class TextTransformer(nn.Module):
@beartype
def __init__(
self,
dim,
depth,
num_tokens = tokenizer.vocab_size,
max_seq_len = 256,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
pad_id = 0
):
super().__init__()
self.dim = dim
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.depth = depth
self.max_seq_len = max_seq_len
self.cls_token = nn.Parameter(torch.randn(dim))
self.transformer = Transformer(
dim = dim,
depth = depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult
)
self.pad_id = pad_id
self.norm = LayerNorm(dim)
@property
def device(self):
return next(self.parameters()).device
@beartype
def forward(
self,
x = None,
raw_texts: Optional[List[str]] = None,
mask = None,
return_all_layers = False
):
assert exists(x) ^ exists(raw_texts)
if exists(raw_texts):
x = tokenizer.tokenize(raw_texts).to(self.device)
if not exists(mask):
mask = x != self.pad_id
b, n, device = *x.shape, x.device
# token embedding + positional embedding
x = self.token_emb(x)
assert n <= self.max_seq_len, f'text sequence length {n} must be less than {self.max_seq_len}'
x = x + self.pos_emb(torch.arange(n, device = device))
# cls tokens, as in bert
cls_tokens = repeat(self.cls_token, 'd -> b d', b = b)
x, ps = pack([cls_tokens, x], 'b * d')
# account for attending to cls token with self attention mask
mask = F.pad(mask, (1, 0), value = True)
# attention
x, all_layers = self.transformer(x, mask = mask, return_all_layers = True)
# unpack the cls tokens
cls_tokens, _ = unpack(x, ps, 'b * d')
out = self.norm(cls_tokens)
if not return_all_layers:
return out
return out, all_layers
# hierarchical cl loss
def interspersed_indices(layers, total_layers):
assert total_layers >= layers
step = total_layers / layers
return (torch.arange(0, layers) * step).floor().long()
class MultiLayerContrastiveLoss(nn.Module):
def __init__(
self,
*,
audio_dim,
text_dim,
dim_latent,
layers,
decoupled_contrastive_learning = False,
sigmoid_contrastive_loss = False
):
super().__init__()
self.layers = layers
self.audio_norm = LayerNorm(audio_dim, scale = False)
self.audio_gamma = nn.Parameter(torch.ones(layers, 1, audio_dim))
self.audio_latent_weight = nn.Parameter(torch.randn(layers, audio_dim, dim_latent))
self.audio_latent_bias = nn.Parameter(torch.randn(layers, 1, dim_latent))
self.text_norm = LayerNorm(text_dim, scale = False)
self.text_gamma = nn.Parameter(torch.ones(layers, 1, text_dim))
self.text_latent_weight = nn.Parameter(torch.randn(layers, text_dim, dim_latent))
self.text_latent_bias = nn.Parameter(torch.randn(layers, 1, dim_latent))
klass = SigmoidContrastiveLearning if sigmoid_contrastive_loss else partial(SoftmaxContrastiveLearning, decoupled_contrastive_learning = decoupled_contrastive_learning)
self.contrast = klass(layers = layers)
def forward(self, *, audio_layers, text_layers):
device, batch = audio_layers.device, audio_layers.shape[1]
audio_gap = reduce(audio_layers, 'l b n d -> l b d', 'mean')
audio_embeds = self.audio_norm(audio_gap) * self.audio_gamma
audio_latents = einsum('l b d, l d e -> l b e', audio_embeds, self.audio_latent_weight) + self.audio_latent_bias
audio_latents = l2norm(audio_latents)
text_cls_tokens = text_layers[:, :, 0]
text_embeds = self.text_norm(text_cls_tokens) * self.text_gamma
text_latents = einsum('l b d, l d e -> l b e', text_embeds, self.text_latent_weight) + self.text_latent_bias
text_latents = l2norm(text_latents)
return self.contrast(audio_latents, text_latents)
# main classes
class MuLaN(nn.Module):
@beartype
def __init__(
self,
audio_transformer: AudioSpectrogramTransformer,
text_transformer: TextTransformer,
dim_latent = 128, # they use 128
decoupled_contrastive_learning = True, # think this was used, make it optional
hierarchical_contrastive_loss = False,
hierarchical_contrastive_loss_layers = None,
sigmoid_contrastive_loss = False
):
super().__init__()
self.dim_latent = dim_latent
self.audio = audio_transformer
self.text = text_transformer
self.text_to_latents = nn.Linear(self.text.dim, dim_latent)
self.audio_to_latents = nn.Linear(self.audio.dim, dim_latent)
klass = SigmoidContrastiveLearning if sigmoid_contrastive_loss else partial(SoftmaxContrastiveLearning, decoupled_contrastive_learning = decoupled_contrastive_learning)
self.contrast = klass()
self.multi_layer_contrastive_learning = None
if hierarchical_contrastive_loss:
num_layers = default(hierarchical_contrastive_loss_layers, min(audio_transformer.depth, text_transformer.depth) - 1)
assert num_layers > 0
self.register_buffer('text_layers_indices', interspersed_indices(num_layers, text_transformer.depth))
self.register_buffer('audio_layers_indices', interspersed_indices(num_layers, audio_transformer.depth))
self.multi_layer_contrastive_learning = MultiLayerContrastiveLoss(
audio_dim = self.audio.dim,
text_dim = self.text.dim,
dim_latent = dim_latent,
layers = num_layers,
decoupled_contrastive_learning = decoupled_contrastive_learning,
sigmoid_contrastive_loss = sigmoid_contrastive_loss
)
def get_audio_latents(
self,
wavs,
return_all_layers = False
):
audio_embeds, audio_layers = self.audio(wavs, return_all_layers = True)
audio_latents = self.audio_to_latents(audio_embeds)
out = l2norm(audio_latents)
if not return_all_layers:
return out
return out, audio_layers
@beartype
def get_text_latents(
self,
texts = None,
raw_texts: Optional[List[str]] = None,
return_all_layers = False
):
text_embeds, text_layers = self.text(texts, raw_texts = raw_texts, return_all_layers = True)
text_latents = self.text_to_latents(text_embeds)
out = l2norm(text_latents)
if not return_all_layers:
return out
return out, text_layers
@beartype
def forward(
self,
wavs,
texts = None,
raw_texts: Optional[List[str]] = None,
return_latents = False,
return_similarities = False,
return_pairwise_similarities = False
):
batch, device = wavs.shape[0], wavs.device
audio_latents, audio_layers = self.get_audio_latents(wavs, return_all_layers = True)
text_latents, text_layers = self.get_text_latents(texts, raw_texts = raw_texts, return_all_layers = True)
if return_latents:
return audio_latents, text_latents
if return_similarities:
return einsum('i d, i d -> i', audio_latents, text_latents)
if return_pairwise_similarities:
cosine_sim = einsum('i d, j d -> i j', audio_latents, text_latents)
return cosine_sim
cl_loss = self.contrast(audio_latents, text_latents)
if not exists(self.multi_layer_contrastive_learning):
return cl_loss
audio_layers = audio_layers[self.audio_layers_indices]
text_layers = text_layers[self.text_layers_indices]
# whether to do cl loss across all layers, from ViCHA paper https://arxiv.org/abs/2208.13628
hierarchical_cl_loss = self.multi_layer_contrastive_learning(
audio_layers = audio_layers,
text_layers = text_layers
)
return cl_loss + hierarchical_cl_loss
# music lm
class MuLaNEmbedQuantizer(AudioConditionerBase):
@beartype
def __init__(
self,
mulan: MuLaN,
conditioning_dims: Tuple[int, ...],
rq_num_quantizers = 8,
rq_ema_decay = 0.9,
codebook_size = 1024,
namespaces: Tuple[str, ...] = ('semantic', 'coarse', 'fine'),
):
super().__init__()
self.mulan = mulan
assert len(namespaces) > 0
self.namespaces = namespaces
self.conditioning_dims = conditioning_dims
assert len(conditioning_dims) == len(namespaces), 'number of conditioning dimensions must be equal to number of namespaces'
dim = mulan.dim_latent
self.rq = ResidualVQ(
dim = dim,
num_quantizers = rq_num_quantizers,
codebook_size = codebook_size,
decay = rq_ema_decay,
commitment_weight = 0, # only use EMA to update codebooks
kmeans_init = True,
threshold_ema_dead_code = 2,
quantize_dropout = False # no quantize dropout
)
self.dim = dim
self.num_codebooks = rq_num_quantizers
self.cond_embeddings = nn.ParameterDict({})
for namespace, conditioning_dim in zip(namespaces, conditioning_dims):
cond_embeddings = nn.Parameter(torch.randn(rq_num_quantizers, codebook_size, conditioning_dim))
nn.init.normal_(cond_embeddings, std = 0.02)
self.cond_embeddings[namespace] = cond_embeddings
self.set_default_namespace(namespaces[0])
def parameters(self):
return self.cond_embeddings.parameters()
def set_default_namespace(self, namespace):
self._default_namespace = namespace
def forward(
self,
wavs = None,
texts = None,
namespace = None
):
assert exists(wavs) ^ exists(texts)
namespace = default(namespace, self._default_namespace)
assert namespace in self.namespaces, f'namespace {namespace} not found'
cond_embeddings = self.cond_embeddings[namespace]
with torch.no_grad():
self.mulan.eval()
# sound and language live in joint embedding space because of contrastive learning
if exists(wavs):
latents = self.mulan.get_audio_latents(wavs)
elif exists(texts):
latents = self.mulan.get_text_latents(texts)
_, indices, _ = self.rq(latents)
batch, num_codebooks, dim = indices.shape[0], self.num_codebooks, cond_embeddings.shape[-1]
cond_embeddings = repeat(cond_embeddings, 'q c d -> b q c d', b = batch)
indices = repeat(indices, 'b q -> b q 1 d', q = num_codebooks, d = dim)
cond_embeddings = cond_embeddings.gather(2, indices)
return rearrange(cond_embeddings, 'b q 1 d -> b q d')
class MusicLM(nn.Module):
@beartype
def __init__(
self,
audio_lm: AudioLM,
mulan_embed_quantizer: MuLaNEmbedQuantizer
):
super().__init__()
assert not exists(audio_lm.audio_conditioner), 'mulan must not have been passed into AudioLM. it will be managed externally now, embedding the text into the joint embedding space for text-to-audio synthesis'
self.mulan_embed_quantizer = mulan_embed_quantizer
self.audio_lm = audio_lm
@property
def device(self):
return next(self.parameters()).device
@torch.no_grad()
def forward(
self,
text: str,
num_samples = 1,
**audio_lm_kwargs
):
self.eval()
texts = tokenizer.tokenize([text]).to(self.device)
text_embeds = self.mulan_embed_quantizer(texts = texts)
# unable to deal with variable lengthed audio for now
samples = []
for _ in range(num_samples):
music = self.audio_lm(text_embeds = text_embeds, **audio_lm_kwargs)
samples.append(music)
# if one sample, just return it
if num_samples == 1:
return first(samples)
mulan = self.mulan_embed_quantizer.mulan
# get the one with the highest similarity score, of all the samples
sims = torch.cat([mulan(texts = texts, wavs = music, return_similarities = True) for music in samples], dim = 0)
top_matching_index = sims.topk(1, dim = 0).indices.item()
return samples[top_matching_index]
| musiclm-pytorch-main | musiclm_pytorch/musiclm_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'discrete-key-value-bottleneck-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.1',
license='MIT',
description = 'Discrete Key / Value Bottleneck - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/discrete-key-value-bottleneck-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'quantization',
'memory',
'transfer learning'
],
install_requires=[
'einops>=0.6',
'vector-quantize-pytorch>=1.6.28',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| discrete-key-value-bottleneck-pytorch-main | setup.py |
from discrete_key_value_bottleneck_pytorch.discrete_key_value_bottleneck import DiscreteKeyValueBottleneck
| discrete-key-value-bottleneck-pytorch-main | discrete_key_value_bottleneck_pytorch/__init__.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat, reduce
from vector_quantize_pytorch import VectorQuantize
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# main class
class DiscreteKeyValueBottleneck(nn.Module):
def __init__(
self,
dim,
*,
num_memories,
dim_embed = None,
num_memory_codebooks = 1,
encoder = None,
dim_memory = None,
average_pool_memories = True,
**kwargs
):
super().__init__()
self.encoder = encoder
dim_embed = default(dim_embed, dim)
self.dim_embed = dim_embed
self.vq = VectorQuantize(
dim = dim * num_memory_codebooks,
codebook_size = num_memories,
heads = num_memory_codebooks,
separate_codebook_per_head = True,
**kwargs
)
dim_memory = default(dim_memory, dim)
self.values = nn.Parameter(torch.randn(num_memory_codebooks, num_memories, dim_memory))
rand_proj = torch.empty(num_memory_codebooks, dim_embed, dim)
nn.init.xavier_normal_(rand_proj)
self.register_buffer('rand_proj', rand_proj)
self.average_pool_memories = average_pool_memories
def forward(
self,
x,
return_intermediates = False,
average_pool_memories = None,
**kwargs
):
average_pool_memories = default(average_pool_memories, self.average_pool_memories)
if exists(self.encoder):
self.encoder.eval()
with torch.no_grad():
x = self.encoder(x, **kwargs)
x.detach_()
assert x.shape[-1] == self.dim_embed, f'encoding has a dimension of {x.shape[-1]} but dim_embed (defaults to dim) is set to {self.dim_embed} on init'
x = einsum('b n d, c d e -> b n c e', x, self.rand_proj)
x = rearrange(x, 'b n c e -> b n (c e)')
vq_out = self.vq(x)
quantized, memory_indices, commit_loss = vq_out
if memory_indices.ndim == 2:
memory_indices = rearrange(memory_indices, '... -> ... 1')
memory_indices = rearrange(memory_indices, 'b n h -> b h n')
values = repeat(self.values, 'h n d -> b h n d', b = memory_indices.shape[0])
memory_indices = repeat(memory_indices, 'b h n -> b h n d', d = values.shape[-1])
memories = values.gather(2, memory_indices)
if average_pool_memories:
memories = reduce(memories, 'b h n d -> b n d', 'mean')
if return_intermediates:
return memories, vq_out
return memories
| discrete-key-value-bottleneck-pytorch-main | discrete_key_value_bottleneck_pytorch/discrete_key_value_bottleneck.py |
from setuptools import setup, find_packages
setup(
name = 'axial_attention',
packages = find_packages(),
version = '0.6.1',
license='MIT',
description = 'Axial Attention',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/axial-attention',
keywords = ['attention', 'artificial intelligence'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | axial-attention-master | setup.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim = 1)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim = 1)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim = 1)
del y
dy1, dy2 = torch.chunk(dy, 2, dim = 1)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim = 1)
dx = torch.cat([dx1, dx2], dim = 1)
return x, dx
class IrreversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = f
self.g = g
def forward(self, x, f_args, g_args):
x1, x2 = torch.chunk(x, 2, dim = 1)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim = 1)
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, ):
super().__init__()
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for (f, g) in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
x = torch.cat((x, x), dim = 1)
x = _ReversibleFunction.apply(x, self.blocks, block_kwargs)
return torch.stack(x.chunk(2, dim = 1)).mean(dim = 0)
| axial-attention-master | axial_attention/reversible.py |
from axial_attention.axial_attention import AxialAttention, AxialPositionalEmbedding, AxialImageTransformer, SelfAttention
| axial-attention-master | axial_attention/__init__.py |
import torch
from torch import nn
from operator import itemgetter
from axial_attention.reversible import ReversibleSequence
# helper functions
def exists(val):
return val is not None
def map_el_ind(arr, ind):
return list(map(itemgetter(ind), arr))
def sort_and_return_indices(arr):
indices = [ind for ind in range(len(arr))]
arr = zip(arr, indices)
arr = sorted(arr)
return map_el_ind(arr, 0), map_el_ind(arr, 1)
# calculates the permutation to bring the input tensor to something attend-able
# also calculates the inverse permutation to bring the tensor back to its original shape
def calculate_permutations(num_dimensions, emb_dim):
total_dimensions = num_dimensions + 2
emb_dim = emb_dim if emb_dim > 0 else (emb_dim + total_dimensions)
axial_dims = [ind for ind in range(1, total_dimensions) if ind != emb_dim]
permutations = []
for axial_dim in axial_dims:
last_two_dims = [axial_dim, emb_dim]
dims_rest = set(range(0, total_dimensions)) - set(last_two_dims)
permutation = [*dims_rest, *last_two_dims]
permutations.append(permutation)
return permutations
# helper classes
class ChanLayerNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
class Sequential(nn.Module):
def __init__(self, blocks):
super().__init__()
self.blocks = blocks
def forward(self, x):
for f, g in self.blocks:
x = x + f(x)
x = x + g(x)
return x
class PermuteToFrom(nn.Module):
def __init__(self, permutation, fn):
super().__init__()
self.fn = fn
_, inv_permutation = sort_and_return_indices(permutation)
self.permutation = permutation
self.inv_permutation = inv_permutation
def forward(self, x, **kwargs):
axial = x.permute(*self.permutation).contiguous()
shape = axial.shape
*_, t, d = shape
# merge all but axial dimension
axial = axial.reshape(-1, t, d)
# attention
axial = self.fn(axial, **kwargs)
# restore to original shape and permutation
axial = axial.reshape(*shape)
axial = axial.permute(*self.inv_permutation).contiguous()
return axial
# axial pos emb
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, shape, emb_dim_index = 1):
super().__init__()
parameters = []
total_dimensions = len(shape) + 2
ax_dim_indexes = [i for i in range(1, total_dimensions) if i != emb_dim_index]
self.num_axials = len(shape)
for i, (axial_dim, axial_dim_index) in enumerate(zip(shape, ax_dim_indexes)):
shape = [1] * total_dimensions
shape[emb_dim_index] = dim
shape[axial_dim_index] = axial_dim
parameter = nn.Parameter(torch.randn(*shape))
setattr(self, f'param_{i}', parameter)
def forward(self, x):
for i in range(self.num_axials):
x = x + getattr(self, f'param_{i}')
return x
# attention
class SelfAttention(nn.Module):
def __init__(self, dim, heads, dim_heads = None):
super().__init__()
self.dim_heads = (dim // heads) if dim_heads is None else dim_heads
dim_hidden = self.dim_heads * heads
self.heads = heads
self.to_q = nn.Linear(dim, dim_hidden, bias = False)
self.to_kv = nn.Linear(dim, 2 * dim_hidden, bias = False)
self.to_out = nn.Linear(dim_hidden, dim)
def forward(self, x, kv = None):
kv = x if kv is None else kv
q, k, v = (self.to_q(x), *self.to_kv(kv).chunk(2, dim=-1))
b, t, d, h, e = *q.shape, self.heads, self.dim_heads
merge_heads = lambda x: x.reshape(b, -1, h, e).transpose(1, 2).reshape(b * h, -1, e)
q, k, v = map(merge_heads, (q, k, v))
dots = torch.einsum('bie,bje->bij', q, k) * (e ** -0.5)
dots = dots.softmax(dim=-1)
out = torch.einsum('bij,bje->bie', dots, v)
out = out.reshape(b, h, -1, e).transpose(1, 2).reshape(b, -1, d)
out = self.to_out(out)
return out
# axial attention class
class AxialAttention(nn.Module):
def __init__(self, dim, num_dimensions = 2, heads = 8, dim_heads = None, dim_index = -1, sum_axial_out = True):
assert (dim % heads) == 0, 'hidden dimension must be divisible by number of heads'
super().__init__()
self.dim = dim
self.total_dimensions = num_dimensions + 2
self.dim_index = dim_index if dim_index > 0 else (dim_index + self.total_dimensions)
attentions = []
for permutation in calculate_permutations(num_dimensions, dim_index):
attentions.append(PermuteToFrom(permutation, SelfAttention(dim, heads, dim_heads)))
self.axial_attentions = nn.ModuleList(attentions)
self.sum_axial_out = sum_axial_out
def forward(self, x):
assert len(x.shape) == self.total_dimensions, 'input tensor does not have the correct number of dimensions'
assert x.shape[self.dim_index] == self.dim, 'input tensor does not have the correct input dimension'
if self.sum_axial_out:
return sum(map(lambda axial_attn: axial_attn(x), self.axial_attentions))
out = x
for axial_attn in self.axial_attentions:
out = axial_attn(out)
return out
# axial image transformer
class AxialImageTransformer(nn.Module):
def __init__(self, dim, depth, heads = 8, dim_heads = None, dim_index = 1, reversible = True, axial_pos_emb_shape = None):
super().__init__()
permutations = calculate_permutations(2, dim_index)
get_ff = lambda: nn.Sequential(
ChanLayerNorm(dim),
nn.Conv2d(dim, dim * 4, 3, padding = 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(dim * 4, dim, 3, padding = 1)
)
self.pos_emb = AxialPositionalEmbedding(dim, axial_pos_emb_shape, dim_index) if exists(axial_pos_emb_shape) else nn.Identity()
layers = nn.ModuleList([])
for _ in range(depth):
attn_functions = nn.ModuleList([PermuteToFrom(permutation, PreNorm(dim, SelfAttention(dim, heads, dim_heads))) for permutation in permutations])
conv_functions = nn.ModuleList([get_ff(), get_ff()])
layers.append(attn_functions)
layers.append(conv_functions)
execute_type = ReversibleSequence if reversible else Sequential
self.layers = execute_type(layers)
def forward(self, x):
x = self.pos_emb(x)
return self.layers(x)
| axial-attention-master | axial_attention/axial_attention.py |
from setuptools import setup, find_packages
setup(
name = 'metaformer-gpt',
packages = find_packages(exclude=[]),
version = '0.0.5',
license='MIT',
description = 'Metaformer - GPT',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/metaformer-gpt',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention-less'
],
install_requires=[
'einops>=0.4',
'scipy',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| metaformer-gpt-main | setup.py |
import gzip
import random
import numpy as np
import torch
import torch.optim as optim
import tqdm
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from metaformer_gpt import MetaformerGPT
from metaformer_gpt.autoregressive_wrapper import AutoregressiveWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = MetaformerGPT(
num_tokens = 256,
dim = 512,
depth = 8,
heads = 16,
dim_head = 32
)
model = AutoregressiveWrapper(model, max_seq_len=SEQ_LEN)
model.cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f"training loss: {loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
| metaformer-gpt-main | train.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from scipy.fftpack import next_fast_len
# helper functions
def cummean(x, *, dim):
numer = x.cumsum(dim = dim)
denom = torch.arange(x.shape[1], device = x.device) + 1
return numer / rearrange(denom, '... -> ... 1')
def conv1d_fft(x, weights, dim = -2, weight_dim = -1):
# O(N log(N)) 1d convolution using some fourier trick
N = x.shape[dim]
M = weights.shape[weight_dim]
fast_len = next_fast_len(N + M - 1)
f_x = torch.fft.rfft(x, n = fast_len, dim = dim)
f_weight = torch.fft.rfft(weights, n = fast_len, dim = weight_dim)
f_v_weight = f_x * rearrange(f_weight.conj(), '... -> ... 1')
out = torch.fft.irfft(f_v_weight, fast_len, dim = dim)
out = out.roll(-1, dims = (dim,))
indices = torch.arange(start = fast_len - N, end = fast_len, dtype = torch.long, device = x.device)
out = out.index_select(dim, indices)
return out
# classes
class MeanCenteringPool(nn.Module):
def __init__(
self,
dim
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.proj = nn.Linear(dim, dim, bias = False)
def forward(self, x):
x = self.norm(x)
x = cummean(x, dim = 1) - x
return self.proj(x)
class MultiheadExponentialTimeDecay(nn.Module):
def __init__(
self,
dim,
*,
heads = 8,
dim_head = 64
):
super().__init__()
self.heads = heads
inner_dim = heads * dim_head
self.norm = nn.LayerNorm(dim)
self.alpha = nn.Parameter(torch.randn(heads))
self.project_in = nn.Linear(dim, inner_dim, bias = False)
self.project_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x):
b, n, d, h, device = *x.shape, self.heads, x.device
x = self.norm(x)
# linear project in
x = self.project_in(x)
# split out heads
x = rearrange(x, 'b n (h d) -> b h n d', h = h)
# prepare exponential alpha
alpha = self.alpha.sigmoid()
alpha = rearrange(alpha, 'h -> h 1')
# arange == powers
arange = torch.arange(n, device = device)
weights = alpha * (1 - alpha) ** torch.flip(arange, dims = (0,))
output = conv1d_fft(x, weights)
# merge heads
output = rearrange(output, 'b h n d -> b n (h d)')
return self.project_out(output)
def FeedForward(dim, mult = 4):
hidden_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, hidden_dim, bias = False),
nn.GELU(),
nn.Linear(hidden_dim, dim, bias = False)
)
class MetaformerGPT(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
heads = 16,
dim_head = 32,
max_seq_len = 2048,
ff_mult = 4
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
MultiheadExponentialTimeDecay(dim, heads = heads, dim_head = dim_head),
MeanCenteringPool(dim),
FeedForward(dim, mult = ff_mult)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens, bias = False)
)
def forward(self, x):
n, device = x.shape[1], x.device
x = self.token_emb(x)
x = x + self.pos_emb(torch.arange(n, device = device))
for mh_esa, pool, ff in self.layers:
x = mh_esa(x) + x
x = pool(x) + x
x = ff(x) + x
return self.to_logits(x)
| metaformer-gpt-main | metaformer_gpt/metaformer_gpt.py |
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres=0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, max_seq_len=2048, pad_value=0):
super().__init__()
self.max_seq_len = max_seq_len
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token=None,
temperature=1.0,
filter_thres=0.9,
**kwargs
):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(out, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = out == eos_token
if is_eos_token.any(dim=-1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim=-1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
logits = self.net(x_inp, **kwargs)
return F.cross_entropy(rearrange(logits, "b c n -> b n c"), x_labels)
| metaformer-gpt-main | metaformer_gpt/autoregressive_wrapper.py |
from metaformer_gpt.metaformer_gpt import MetaformerGPT, MultiheadExponentialTimeDecay
| metaformer-gpt-main | metaformer_gpt/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'ddpm-ipa-protein-generation',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'DDPM + Invariant Point Attention - Protein Generation',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/ddpm-ipa-protein-generation',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism',
'geometric deep learning',
'denoising diffusion probabilistic models'
],
install_requires=[
'invariant-point-attention>=0.2.1',
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| ddpm-ipa-protein-generation-main | setup.py |
import torch
from torch import nn
# gaussian diffusion with continuous time helper functions and classes
# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py
@torch.jit.script
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
@torch.jit.script
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class Diffusion(nn.Module):
def __init__(self, *, noise_schedule, timesteps = 1000):
super().__init__()
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.num_timesteps = timesteps
def get_times(self, batch_size, noise_level, *, device):
return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)
def sample_random_times(self, batch_size, max_thres = 0.999, *, device):
return torch.zeros((batch_size,), device = device).float().uniform_(0, max_thres)
def get_condition(self, times):
return maybe(self.log_snr)(times)
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
def q_posterior(self, x_start, x_t, t, *, t_next = None):
t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))
""" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material """
log_snr = self.log_snr(t)
log_snr_next = self.log_snr(t_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# c - as defined near eq 33
c = -expm1(log_snr - log_snr_next)
posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)
# following (eq. 33)
posterior_variance = (sigma_next ** 2) * c
posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def q_sample(self, x_start, t, noise = None):
if isinstance(t, float):
batch = x_start.shape[0]
t = torch.full((batch,), t, device = x_start.device, dtype = x_start.dtype)
noise = default(noise, lambda: torch.randn_like(x_start))
log_snr = self.log_snr(t)
log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)
return alpha * x_start + sigma * noise, log_snr
def q_sample_from_to(self, x_from, from_t, to_t, noise = None):
shape, device, dtype = x_from.shape, x_from.device, x_from.dtype
batch = shape[0]
if isinstance(from_t, float):
from_t = torch.full((batch,), from_t, device = device, dtype = dtype)
if isinstance(to_t, float):
to_t = torch.full((batch,), to_t, device = device, dtype = dtype)
noise = default(noise, lambda: torch.randn_like(x_from))
log_snr = self.log_snr(from_t)
log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)
log_snr_to = self.log_snr(from_t)
log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)
alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)
return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha
def predict_start_from_noise(self, x_t, t, noise):
log_snr = self.log_snr(t)
log_snr = right_pad_dims_to(x_t, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)
| ddpm-ipa-protein-generation-main | ddpm_ipa_protein_generation/ddpm_ipa_protein_generation.py |
ddpm-ipa-protein-generation-main | ddpm_ipa_protein_generation/__init__.py |
|
from setuptools import setup, find_packages
setup(
name = 'transframer-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Transframer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/transframer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'unets',
'video generation'
],
install_requires=[
'einops>=0.4',
'kornia',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| transframer-pytorch-main | setup.py |
from transframer_pytorch.transframer_pytorch import Transframer, Unet
| transframer-pytorch-main | transframer_pytorch/__init__.py |
from math import sqrt, pi
from functools import partial
import torch
import torch.nn.functional as F
from torch.fft import fft, irfft
from torch import nn, einsum
from einops import rearrange, repeat
from kornia.color.ycbcr import rgb_to_ycbcr, ycbcr_to_rgb
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# tensor helpers
def l2norm(t):
return F.normalize(t, dim = -1)
# dct related encoding / decoding functions
# functions are adapted from https://github.com/zh217/torch-dct/blob/master/torch_dct/_dct.py
# but fixes for most torch versions > 1.9, using latest fft and irfft
def dct(x, norm = None):
shape, dtype, device = x.shape, x.dtype, x.device
N = shape[-1]
x = rearrange(x.contiguous(), '... n -> (...) n')
v = torch.cat([x[:, ::2], x[:, 1::2].flip((1,))], dim = 1)
vc = torch.view_as_real(fft(v, dim=1))
k = -torch.arange(N, dtype = dtype, device = device) * pi / (2 * N)
k = rearrange(k, 'n -> 1 n')
v = vc[:, :, 0] * k.cos() - vc[:, :, 1] * k.sin()
if norm == 'ortho':
v[:, 0] /= sqrt(N) * 2
v[:, 1:] /= sqrt(N / 2) * 2
v *= 2
return v.view(*shape)
def idct(x, norm = None):
shape, dtype, device = x.shape, x.dtype, x.device
N = shape[-1]
x_v = rearrange(x.contiguous(), '... n -> (...) n') / 2
if norm == 'ortho':
x_v[:, 0] *= sqrt(N) * 2
x_v[:, 1:] *= sqrt(N / 2) * 2
k = torch.arange(N, dtype = dtype, device = device) * pi / (2 * N)
k = rearrange(k, 'n -> 1 n')
w_r = torch.cos(k)
w_i = torch.sin(k)
v_t_r = x_v
v_t_i = torch.cat([x_v[:, :1] * 0, -x_v.flip((1,))[:, :-1]], dim = 1)
v_r = v_t_r * w_r - v_t_i * w_i
v_i = v_t_r * w_i + v_t_i * w_r
v = torch.stack((v_r, v_i), dim = -1)
v = irfft(torch.view_as_complex(v), n = N, dim = 1)
x = torch.zeros_like(v)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip((1,))[:, :N // 2]
return x.view(*shape)
def dct_2d(x, norm = None):
dct_ = partial(dct, norm = norm)
x1 = dct_(x)
x2 = dct_(rearrange(x1, '... h w -> ... w h'))
return rearrange(x2, '... h w -> ... w h')
def idct_2d(x, norm = None):
idct_ = partial(idct, norm = norm)
x1 = idct_(x)
x2 = idct_(rearrange(x1, '... h w -> ... w h'))
return rearrange(x2, '... h w -> ... w h')
def blockify(x, block_size = 8):
assert block_size in {8, 16}
return rearrange(x, 'b c (h bs1) (w bs2) -> (b h w) c bs1 bs2', bs1 = block_size, bs2 = block_size)
def deblockify(x, h, w, block_size = 8):
assert block_size in {8, 16}
return rearrange(x, '(b h w) c bs1 bs2 -> b c (h bs1) (w bs2)', h = h, w = w)
# final functions from rgb -> dct and back
def images_to_dct(images):
raise NotImplementedError
def dct_to_images(images):
raise NotImplementedError
# feedforward
def FeedForward(
dim,
*,
mult = 4.
):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias = False),
nn.GELU(),
nn.LayerNorm(inner_dim), # from normformer paper
nn.Linear(inner_dim, dim, bias = False)
)
# attention, what else?
# here we will use one headed key / values (as described in paper, from Noam Shazeer) - along with cosine sim attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
scale = 10,
causal = False,
norm_context = False
):
super().__init__()
self.heads = heads
self.scale = scale
self.causal = causal
self.norm = nn.LayerNorm(dim)
self.norm_context = nn.LayerNorm(dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, dim_head * heads, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.to_out = nn.Linear(dim_head * heads, dim, bias = False)
def forward(
self,
x,
context = None,
context_mask = None
):
h, scale, causal, device = self.heads, self.scale, self.causal, x.device
x = self.norm(x)
context = default(context, x)
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if exists(context):
context =self.norm_context(context)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k = map(l2norm, (q, k))
sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale
mask_value = -torch.finfo(sim.dtype).max
if exists(context_mask):
context_mask = rearrange(context_mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(context_mask, mask_value)
if causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# unet
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8
):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return self.act(x)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8
):
super().__init__()
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x):
h = self.block1(x)
h = self.block2(h)
return h + self.res_conv(x)
class UnetTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 32,
heads = 8
):
super().__init__()
self.attn = Attention(dim = dim, dim_head = dim_head, heads = heads)
self.ff = FeedForward(dim = dim)
def forward(self, x):
orig_shape = x.shape
x = rearrange(x, 'b c ... -> b (...) c')
x = self.attn(x) + x
x = self.ff(x) + x
x = rearrange(x, 'b n c -> b c n')
return x.reshape(*orig_shape)
class Unet(nn.Module):
def __init__(
self,
dim,
*,
dim_mults = (1, 2, 3, 4),
dim_out,
**attn_kwargs
):
super().__init__()
self.to_out = nn.Conv2d(dim, dim_out, 1)
dims = [dim, *map(lambda t: t * dim, dim_mults)]
dim_pairs = tuple(zip(dims[:-1], dims[1:]))
mid_dim = dims[-1]
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
self.mid = ResnetBlock(mid_dim, mid_dim)
for dim_in, dim_out in dim_pairs:
self.downs.append(nn.ModuleList([
ResnetBlock(dim_in, dim_in),
UnetTransformerBlock(dim_in, **attn_kwargs),
nn.Conv2d(dim_in, dim_out, 3, 2, 1)
]))
self.ups.insert(0, nn.ModuleList([
ResnetBlock(dim_out * 2, dim_out),
UnetTransformerBlock(dim_out, **attn_kwargs),
nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1)
]))
def forward(self, x):
hiddens = []
for block, attn_block, downsample in self.downs:
x = block(x)
x = attn_block(x)
x = downsample(x)
hiddens.append(x)
x = self.mid(x)
for block, attn_block, upsample in self.ups:
x = torch.cat((x, hiddens.pop()), dim = 1)
x = block(x)
x = attn_block(x)
x = upsample(x)
out = self.to_out(x)
return rearrange(out, 'b c h w -> b (h w) c')
# main class
class Transframer(nn.Module):
def __init__(
self,
*,
unet: Unet,
dim,
depth,
max_channels,
max_positions,
max_values,
image_size,
block_size = 8,
dim_head = 32,
heads = 8,
ff_mult = 4.,
ignore_index = -100
):
super().__init__()
self.unet = unet
self.start_token = nn.Parameter(torch.randn(dim))
self.block_pos_emb = nn.Parameter(torch.randn(2, (image_size // block_size), dim))
self.channels = nn.Embedding(max_channels, dim)
self.positions = nn.Embedding(max_positions, dim)
self.values = nn.Embedding(max_values, dim)
self.postemb_norm = nn.LayerNorm(dim) # done in Bloom and YaLM for stability
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim, dim_head = dim_head, heads = heads, causal = True),
Attention(dim, dim_head = dim_head, heads = heads, norm_context = True),
FeedForward(dim, mult = ff_mult)
]))
self.final_norm = nn.LayerNorm(dim)
# give channels and positions separate embedding for final prediction
self.axial_channels = nn.Embedding(max_channels, dim)
self.axial_positions = nn.Embedding(max_positions, dim)
self.axial_attn = Attention(dim, dim_head = dim_head, heads = heads, causal = True)
self.axial_ff = FeedForward(dim, mult = ff_mult)
self.axial_final_norm = nn.LayerNorm(dim)
# projection to logits
self.to_channel_logits = nn.Linear(dim, max_channels)
self.to_position_logits = nn.Linear(dim, max_positions)
self.to_value_logits = nn.Linear(dim, max_values)
self.ignore_index = ignore_index
def get_block_pos_emb(self):
block_pos_emb_h, block_pos_emb_w = self.block_pos_emb.unbind(dim = 0)
block_pos_emb = rearrange(block_pos_emb_h, 'h d -> h 1 d') + rearrange(block_pos_emb_w, 'w d -> 1 w d')
return rearrange(block_pos_emb, '... d -> (...) d')
def forward(
self,
x,
context_frames,
return_loss = False
):
assert x.shape[-1] == 3
encoded = self.unet(context_frames)
batch = x.shape[0]
channels, positions, values = x.unbind(dim = -1)
channel_emb = self.channels(channels)
position_emb = self.positions(positions)
value_emb = self.values(values)
embed = channel_emb + position_emb + value_emb
start_token = repeat(self.start_token, 'd -> b 1 d', b = batch)
embed = torch.cat((start_token, embed), dim = 1)
if return_loss:
embed = embed[:, :-1]
embed = self.postemb_norm(embed)
# layers of attention + cross attention
for attn, cross_attn, ff in self.layers:
embed = attn(embed) + embed
embed = cross_attn(embed, encoded) + embed
embed = ff(embed) + embed
embed = self.final_norm(embed)
# now do axial attention from the summed previous embedding of channel + position + value -> next channel -> next position
# this was successfully done in the residual quantization transformer (RQ-Transformer) https://arxiv.org/abs/2203.01941
# one layer of attention should be enough, as in the Deepmind paper, they use a pretty weak baseline and it still worked well
axial_channels_emb = self.axial_channels(channels)
axial_positions_emb = self.axial_positions(positions)
embed = torch.stack((embed, axial_channels_emb, axial_positions_emb), dim = -2)
embed = rearrange(embed, 'b m n d -> (b m) n d')
embed = self.axial_attn(embed) + embed
embed = self.axial_ff(embed) + embed
embed = self.axial_final_norm(embed)
embed = rearrange(embed, '(b m) n d -> b m n d', b = batch)
pred_channel_embed, pred_position_embed, pred_value_embed = embed.unbind(dim = -2)
# to logits
channel_logits = self.to_channel_logits(pred_channel_embed)
position_logits = self.to_position_logits(pred_position_embed)
value_logits = self.to_value_logits(pred_value_embed)
if not return_loss:
return channel_logits, position_logits, value_logits
channel_logits, position_logits, value_logits = map(lambda t: rearrange(t, 'b n c -> b c n'), (channel_logits, position_logits, value_logits))
ce = partial(F.cross_entropy, ignore_index = self.ignore_index)
channel_loss = ce(channel_logits, channels)
position_loss = ce(position_logits, positions)
value_loss = ce(value_logits, values)
return (channel_loss + position_loss + value_loss) / 3
| transframer-pytorch-main | transframer_pytorch/transframer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'multistream-transformers',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Multistream Transformers - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/multistream-transformers',
keywords = [
'artificial intelligence',
'deep learning',
'transformers'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| multistream-transformers-main | setup.py |
from multistream_transformers import MultistreamTransformer
from multistream_transformers.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = MultistreamTransformer(
num_tokens = 256,
dim = 512,
max_seq_len = SEQ_LEN,
depth = 4,
heads = 8,
causal = True,
num_streams = 2
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| multistream-transformers-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| multistream-transformers-main | multistream_transformers/autoregressive_wrapper.py |
from multistream_transformers.multistream_transformers import MultistreamTransformer
| multistream-transformers-main | multistream_transformers/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def rearrange_all(tensors, *args, **kwargs):
return map(lambda t: rearrange(t, *args, **kwargs), tensors)
# feedforward
class GroupLayerNorm(nn.Module):
def __init__(self, dim, groups = 1, eps = 1e-5):
super().__init__()
self.eps = eps
self.groups = groups
self.g = nn.Parameter(torch.ones(1, groups, dim, 1))
self.b = nn.Parameter(torch.zeros(1, groups, dim, 1))
def forward(self, x):
x = rearrange(x, 'b (g d) n -> b g d n', g = self.groups)
std = torch.var(x, dim = 2, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 2, keepdim = True)
out = (x - mean) / (std + self.eps) * self.g + self.b
return rearrange(out, 'b g d n -> b (g d) n')
class PreNorm(nn.Module):
def __init__(
self,
dim,
fn,
groups = 1
):
super().__init__()
self.norm = GroupLayerNorm(dim, groups = groups)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(
self,
*,
dim,
mult = 4,
groups = 1
):
super().__init__()
input_dim = dim * groups
hidden_dim = dim * mult * groups
self.net = nn.Sequential(
nn.Conv1d(input_dim, hidden_dim, 1, groups = groups),
nn.GELU(),
nn.Conv1d(hidden_dim, input_dim, 1, groups = groups)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
causal = False,
groups = 1
):
super().__init__()
self.scale = dim_head ** -0.5
self.groups = groups
self.heads = heads
self.causal = causal
input_dim = dim * groups
inner_dim = dim_head * heads * groups
self.to_q = nn.Conv1d(input_dim, inner_dim, 1, bias = False)
self.to_kv = nn.Conv1d(input_dim, inner_dim * 2, 1, bias = False)
self.to_out = nn.Conv1d(inner_dim, input_dim, 1)
def forward(self, x, mask = None, context = None):
n, device, h, g, causal = x.shape[2], x.device, self.heads, self.groups, self.causal
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = 1))
q, k, v = rearrange_all((q, k, v), 'b (g h d) n -> (b g h) n d', g = g, h = h)
q = q * self.scale
sim = einsum('b i d, b j d -> b i j', q, k)
if exists(mask):
mask = repeat(mask, 'b n -> (b g h) n', h = h, g = g)
mask = rearrange(mask, 'b n -> b n ()') * rearrange(mask, 'b n -> b () n')
mask_value = max_neg_value(sim)
sim = sim.masked_fill(~mask, mask_value)
if causal:
causal_mask = torch.ones((n, n), device = device).triu(1).bool()
mask_value = max_neg_value(sim)
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b g h) n d -> b (g h d) n', h = h, g = g)
return self.to_out(out)
class TransformerBlock(nn.Module):
def __init__(
self,
*,
dim,
causal = False,
dim_head = 64,
heads = 8,
ff_mult = 4,
groups = 1
):
super().__init__()
self.attn = PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, causal = causal, groups = groups), groups = groups)
self.ff = PreNorm(dim, FeedForward(dim = dim, mult = ff_mult, groups = groups), groups = groups)
def forward(self, x, mask = None):
x = self.attn(x, mask = mask) + x
x = self.ff(x) + x
return x
# main class
class MultistreamTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
num_tokens,
max_seq_len,
causal = False,
dim_head = 64,
heads = 8,
ff_mult = 4,
num_streams = 1
):
super().__init__()
self.dim = dim
self.max_seq_len = max_seq_len
self.num_streams = num_streams
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
self.pre_transformer_block = TransformerBlock(dim = dim, causal = causal, dim_head = dim_head, heads = heads)
for _ in range(depth):
self.layers.append(TransformerBlock(dim = dim, causal = causal, dim_head = dim_head, heads = heads, groups = num_streams))
if num_streams > 1:
self.query = nn.Parameter(torch.randn(dim))
self.attn_pool = Attention(dim = dim, dim_head = dim_head, heads = heads)
self.post_transformer_block = TransformerBlock(dim = dim, causal = causal, dim_head = dim_head, heads = heads,)
self.to_logits = nn.Sequential(
Rearrange('b d n -> b n d'),
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, mask = None):
b, n, d, device, is_multistream = *x.shape, self.dim, x.device, (self.num_streams > 1)
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
pos_emb = rearrange(pos_emb, 'n d -> () n d')
x = x + pos_emb
x = rearrange(x, 'b n d -> b d n')
x = self.pre_transformer_block(x, mask = mask)
layers = [x]
if is_multistream:
x = repeat(x, 'b d n -> b (s d) n', s = self.num_streams)
for block in self.layers:
x = block(x, mask = mask)
layers.append(x)
if is_multistream:
layers = list(map(lambda t: rearrange(t, 'b (s d) n -> (b n) d s', d = d), layers))
layer_tokens = torch.cat(layers, dim = -1)
query = repeat(self.query, 'd -> b d ()', b = layer_tokens.shape[0])
x = self.attn_pool(query, context = layer_tokens)
x = rearrange(x, '(b n) d () -> b d n', n = n)
x = self.post_transformer_block(x, mask = mask)
return self.to_logits(x)
| multistream-transformers-main | multistream_transformers/multistream_transformers.py |
from setuptools import setup, find_packages
setup(
name = 'local-attention',
packages = find_packages(),
version = '1.8.6',
license='MIT',
description = 'Local attention, window with lookback, for language modeling',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/local-attention',
keywords = [
'transformers',
'attention',
'artificial intelligence'
],
install_requires=[
'einops>=0.6.0',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| local-attention-master | setup.py |
import random
import tqdm
import gzip
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from local_attention import LocalTransformer
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 2048
SEQ_LEN = 2048
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = LocalTransformer(
num_tokens = 256,
dim = 512,
depth = 6,
causal = True,
local_attn_window_size = 256,
max_seq_len = SEQ_LEN,
use_dynamic_pos_bias = True
).cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
| local-attention-master | train.py |
from local_attention.local_attention import LocalAttention
from local_attention.transformer import LocalTransformer, LocalMHA, DynamicPositionBias
| local-attention-master | local_attention/__init__.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from local_attention.local_attention import LocalAttention
# helper function
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = -1)
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling functions
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# multi-head attention
class LocalMHA(nn.Module):
def __init__(
self,
*,
dim,
window_size,
dim_head = 64,
heads = 8,
dropout = 0.,
causal = False,
prenorm = False,
qk_rmsnorm = False,
qk_scale = 8,
use_xpos = False,
xpos_scale_base = None,
exact_windowsize = None,
**kwargs
):
super().__init__()
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim) if prenorm else None
self.heads = heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.qk_rmsnorm = qk_rmsnorm
if qk_rmsnorm:
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.attn_fn = LocalAttention(
dim = dim_head,
window_size = window_size,
causal = causal,
autopad = True,
scale = (qk_scale if qk_rmsnorm else None),
exact_windowsize = default(exact_windowsize, True),
use_xpos = use_xpos,
xpos_scale_base = xpos_scale_base,
**kwargs
)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, mask = None, attn_bias = None):
if exists(self.norm):
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
if self.qk_rmsnorm:
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
out = self.attn_fn(q, k, v, mask = mask, attn_bias = attn_bias)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
def FeedForward(dim, mult = 4, dropout = 0.):
inner_dim = int(dim * mult * 2 / 3)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
# dynamic positional bias
class DynamicPositionBias(nn.Module):
def __init__(
self,
dim,
heads
):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(1, dim),
nn.SiLU(),
nn.Linear(dim, dim),
nn.SiLU(),
nn.Linear(dim, heads)
)
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
assert j >= i
rel_dist = torch.arange(j, dtype = torch.float, device = device)
bias = self.mlp(rearrange(rel_dist, '... -> ... 1'))
i_seq = torch.arange(j - i, j, device = device)
j_seq = torch.arange(j, device = device)
rel_dist_indices = (rearrange(i_seq, 'i -> i 1') - rearrange(j_seq, 'j -> 1 j')).abs()
bias = rearrange(bias[rel_dist_indices], 'i j h -> h i j')
return bias
# main transformer class
class LocalTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
dim,
depth,
causal = True,
local_attn_window_size = 512,
dim_head = 64,
heads = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
ignore_index = -1,
use_xpos = False,
xpos_scale_base = None,
use_dynamic_pos_bias = False,
**kwargs
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.max_seq_len = max_seq_len
self.layers = nn.ModuleList([])
self.local_attn_window_size = local_attn_window_size
self.dynamic_pos_bias = None
if use_dynamic_pos_bias:
self.dynamic_pos_bias = DynamicPositionBias(dim = dim // 2, heads = heads)
for _ in range(depth):
self.layers.append(nn.ModuleList([
LocalMHA(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, causal = causal, window_size = local_attn_window_size, use_xpos = use_xpos, xpos_scale_base = xpos_scale_base, use_rotary_pos_emb = not use_dynamic_pos_bias, prenorm = True, **kwargs),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.ignore_index = ignore_index
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens, bias = False)
)
@torch.no_grad()
@eval_decorator
def generate(
self,
prime,
seq_len,
temperature = 1.,
filter_thres = 0.9,
**kwargs
):
n, device = prime.shape[1], prime.device
out = prime
for _ in range(seq_len):
logits = self.forward(out[:, -self.max_seq_len:], **kwargs)
filtered_logits = top_k(logits[:, -1], thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sampled = torch.multinomial(probs, 1)
out = torch.cat((out, sampled), dim = -1)
return out[:, n:]
def forward(self, x, mask = None, return_loss = False):
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
n, device = x.shape[1], x.device
x = self.token_emb(x)
assert n <= self.max_seq_len
x = x + self.pos_emb(torch.arange(n, device = device))
# dynamic pos bias
attn_bias = None
if exists(self.dynamic_pos_bias):
w = self.local_attn_window_size
attn_bias = self.dynamic_pos_bias(w, w * 2)
# go through layers
for attn, ff in self.layers:
x = attn(x, mask = mask, attn_bias = attn_bias) + x
x = ff(x) + x
logits = self.to_logits(x)
if not return_loss:
return logits
logits = rearrange(logits, 'b n c -> b c n')
loss = F.cross_entropy(logits, labels, ignore_index = self.ignore_index)
return loss
| local-attention-master | local_attention/transformer.py |
import math
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat, pack, unpack
from local_attention.rotary import SinusoidalEmbeddings, apply_rotary_pos_emb
# constant
TOKEN_SELF_ATTN_VALUE = -5e4
# helper functions
def exists(val):
return val is not None
def default(value, d):
return d if not exists(value) else value
def to(t):
return {'device': t.device, 'dtype': t.dtype}
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(tensor):
dtype = tensor.dtype
normed = F.normalize(tensor, dim = -1)
return normed.type(dtype)
def pad_to_multiple(tensor, multiple, dim=-1, value=0):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return False, tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return True, F.pad(tensor, (*pad_offset, 0, remainder), value = value)
def look_around(x, backward = 1, forward = 0, pad_value = -1, dim = 2):
t = x.shape[1]
dims = (len(x.shape) - dim) * (0, 0)
padded_x = F.pad(x, (*dims, backward, forward), value = pad_value)
tensors = [padded_x[:, ind:(ind + t), ...] for ind in range(forward + backward + 1)]
return torch.cat(tensors, dim = dim)
# main class
class LocalAttention(nn.Module):
def __init__(
self,
window_size,
causal = False,
look_backward = 1,
look_forward = None,
dropout = 0.,
shared_qk = False,
rel_pos_emb_config = None,
dim = None,
autopad = False,
exact_windowsize = False,
scale = None,
use_rotary_pos_emb = True,
use_xpos = False,
xpos_scale_base = None
):
super().__init__()
look_forward = default(look_forward, 0 if causal else 1)
assert not (causal and look_forward > 0), 'you cannot look forward if causal'
self.scale = scale
self.window_size = window_size
self.autopad = autopad
self.exact_windowsize = exact_windowsize
self.causal = causal
self.look_backward = look_backward
self.look_forward = look_forward
self.dropout = nn.Dropout(dropout)
self.shared_qk = shared_qk
# relative positions
self.rel_pos = None
self.use_xpos = use_xpos
if use_rotary_pos_emb and (exists(rel_pos_emb_config) or exists(dim)): # backwards compatible with old `rel_pos_emb_config` deprecated argument
if exists(rel_pos_emb_config):
dim = rel_pos_emb_config[0]
self.rel_pos = SinusoidalEmbeddings(
dim,
use_xpos = use_xpos,
scale_base = default(xpos_scale_base, window_size // 2)
)
def forward(
self,
q, k, v,
mask = None,
input_mask = None,
attn_bias = None,
window_size = None
):
mask = default(mask, input_mask)
assert not (exists(window_size) and not self.use_xpos), 'cannot perform window size extrapolation if xpos is not turned on'
shape, autopad, pad_value, window_size, causal, look_backward, look_forward, shared_qk = q.shape, self.autopad, -1, default(window_size, self.window_size), self.causal, self.look_backward, self.look_forward, self.shared_qk
# https://github.com/arogozhnikov/einops/blob/master/docs/4-pack-and-unpack.ipynb
(q, packed_shape), (k, _), (v, _) = map(lambda t: pack([t], '* n d'), (q, k, v))
# auto padding
if autopad:
orig_seq_len = q.shape[1]
(needed_pad, q), (_, k), (_, v) = map(lambda t: pad_to_multiple(t, self.window_size, dim = -2), (q, k, v))
b, n, dim_head, device, dtype = *q.shape, q.device, q.dtype
scale = default(self.scale, dim_head ** -0.5)
assert (n % window_size) == 0, f'sequence length {n} must be divisible by window size {window_size} for local attention'
windows = n // window_size
if shared_qk:
k = l2norm(k)
seq = torch.arange(n, device = device)
b_t = rearrange(seq, '(w n) -> 1 w n', w = windows, n = window_size)
# bucketing
bq, bk, bv = map(lambda t: rearrange(t, 'b (w n) d -> b w n d', w = windows), (q, k, v))
bq = bq * scale
look_around_kwargs = dict(
backward = look_backward,
forward = look_forward,
pad_value = pad_value
)
bk = look_around(bk, **look_around_kwargs)
bv = look_around(bv, **look_around_kwargs)
# rotary embeddings
if exists(self.rel_pos):
pos_emb, xpos_scale = self.rel_pos(bk)
bq, bk = apply_rotary_pos_emb(bq, bk, pos_emb, scale = xpos_scale)
# calculate positions for masking
bq_t = b_t
bq_k = look_around(b_t, **look_around_kwargs)
bq_t = rearrange(bq_t, '... i -> ... i 1')
bq_k = rearrange(bq_k, '... j -> ... 1 j')
pad_mask = bq_k == pad_value
sim = einsum('b h i e, b h j e -> b h i j', bq, bk)
if exists(attn_bias):
heads = attn_bias.shape[0]
assert (b % heads) == 0
attn_bias = repeat(attn_bias, 'h i j -> (b h) 1 i j', b = b // heads)
sim = sim + attn_bias
mask_value = max_neg_value(sim)
if shared_qk:
self_mask = bq_t == bq_k
sim = sim.masked_fill(self_mask, TOKEN_SELF_ATTN_VALUE)
del self_mask
if causal:
causal_mask = bq_t < bq_k
if self.exact_windowsize:
max_causal_window_size = (self.window_size * self.look_backward)
causal_mask = causal_mask | (bq_t > (bq_k + max_causal_window_size))
sim = sim.masked_fill(causal_mask, mask_value)
del causal_mask
# masking out for exact window size for non-causal
# as well as masking out for padding value
if not causal and self.exact_windowsize:
max_backward_window_size = (self.window_size * self.look_backward)
max_forward_window_size = (self.window_size * self.look_forward)
window_mask = ((bq_k - max_forward_window_size) > bq_t) | (bq_t > (bq_k + max_backward_window_size)) | pad_mask
sim = sim.masked_fill(window_mask, mask_value)
else:
sim = sim.masked_fill(pad_mask, mask_value)
# take care of key padding mask passed in
if exists(mask):
batch = mask.shape[0]
assert (b % batch) == 0
h = b // mask.shape[0]
if autopad:
_, mask = pad_to_multiple(mask, window_size, dim = -1, value = False)
mask = rearrange(mask, '... (w n) -> (...) w n', w = windows, n = window_size)
mask = look_around(mask, **{**look_around_kwargs, 'pad_value': False})
mask = rearrange(mask, '... j -> ... 1 j')
mask = repeat(mask, 'b ... -> (b h) ...', h = h)
sim = sim.masked_fill(~mask, mask_value)
del mask
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
# aggregation
out = einsum('b h i j, b h j e -> b h i e', attn, bv)
out = rearrange(out, 'b w n d -> b (w n) d')
if autopad:
out = out[:, :orig_seq_len, :]
out, *_ = unpack(out, packed_shape, '* n d')
return out
| local-attention-master | local_attention/local_attention.py |
import torch
from torch import nn, einsum
from einops import rearrange
def exists(val):
return val is not None
class SinusoidalEmbeddings(nn.Module):
def __init__(
self,
dim,
scale_base = None,
use_xpos = False
):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
# xpos related
self.use_xpos = use_xpos
self.scale_base = scale_base
assert not (use_xpos and not exists(scale_base)), 'scale base must be defined if using xpos'
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale, persistent = False)
def forward(self, x):
seq_len, device = x.shape[-2], x.device
t = torch.arange(seq_len, device = x.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not self.use_xpos:
return freqs, torch.ones(1, device = device)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, 'b ... (r d) -> b ... r d', r = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(q, k, freqs, scale = 1):
q_len = q.shape[-2]
q_freqs = freqs[..., -q_len:, :]
inv_scale = scale ** -1
if scale.ndim == 2:
scale = scale[-q_len:, :]
q = (q * q_freqs.cos() * scale) + (rotate_half(q) * q_freqs.sin() * scale)
k = (k * freqs.cos() * inv_scale) + (rotate_half(k) * freqs.sin() * inv_scale)
return q, k
| local-attention-master | local_attention/rotary.py |
from setuptools import setup, find_packages
setup(
name = 'resize-right',
packages = find_packages(exclude=[]),
version = '0.0.2',
license = 'MIT',
description = 'Resize Right',
author = 'Assaf Shocher',
author_email = '[email protected]',
url = 'https://github.com/assafshocher/ResizeRight',
keywords = [
'deep learning',
'image resize'
],
install_requires=[
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| ResizeRight-master | setup.py |
from resize_right.resize_right import resize
import resize_right.interp_methods as interp_methods
| ResizeRight-master | resize_right/__init__.py |
from math import pi
try:
import torch
except ImportError:
torch = None
try:
import numpy
except ImportError:
numpy = None
if numpy is None and torch is None:
raise ImportError("Must have either Numpy or PyTorch but both not found")
def set_framework_dependencies(x):
if type(x) is numpy.ndarray:
to_dtype = lambda a: a
fw = numpy
else:
to_dtype = lambda a: a.to(x.dtype)
fw = torch
eps = fw.finfo(fw.float32).eps
return fw, to_dtype, eps
def support_sz(sz):
def wrapper(f):
f.support_sz = sz
return f
return wrapper
@support_sz(4)
def cubic(x):
fw, to_dtype, eps = set_framework_dependencies(x)
absx = fw.abs(x)
absx2 = absx ** 2
absx3 = absx ** 3
return ((1.5 * absx3 - 2.5 * absx2 + 1.) * to_dtype(absx <= 1.) +
(-0.5 * absx3 + 2.5 * absx2 - 4. * absx + 2.) *
to_dtype((1. < absx) & (absx <= 2.)))
@support_sz(4)
def lanczos2(x):
fw, to_dtype, eps = set_framework_dependencies(x)
return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) /
((pi**2 * x**2 / 2) + eps)) * to_dtype(abs(x) < 2))
@support_sz(6)
def lanczos3(x):
fw, to_dtype, eps = set_framework_dependencies(x)
return (((fw.sin(pi * x) * fw.sin(pi * x / 3) + eps) /
((pi**2 * x**2 / 3) + eps)) * to_dtype(abs(x) < 3))
@support_sz(2)
def linear(x):
fw, to_dtype, eps = set_framework_dependencies(x)
return ((x + 1) * to_dtype((-1 <= x) & (x < 0)) + (1 - x) *
to_dtype((0 <= x) & (x <= 1)))
@support_sz(1)
def box(x):
fw, to_dtype, eps = set_framework_dependencies(x)
return to_dtype((-1 <= x) & (x < 0)) + to_dtype((0 <= x) & (x <= 1))
| ResizeRight-master | resize_right/interp_methods.py |
from typing import Tuple
import warnings
from math import ceil
from fractions import Fraction
import resize_right.interp_methods as interp_methods
class NoneClass:
pass
try:
import torch
from torch import nn
nnModuleWrapped = nn.Module
except ImportError:
warnings.warn('No PyTorch found, will work only with Numpy')
torch = None
nnModuleWrapped = NoneClass
try:
import numpy
except ImportError:
warnings.warn('No Numpy found, will work only with PyTorch')
numpy = None
if numpy is None and torch is None:
raise ImportError("Must have either Numpy or PyTorch but both not found")
def resize(input, scale_factors=None, out_shape=None,
interp_method=interp_methods.cubic, support_sz=None,
antialiasing=True, by_convs=False, scale_tolerance=None,
max_numerator=10, pad_mode='constant'):
# get properties of the input tensor
in_shape, n_dims = input.shape, input.ndim
# fw stands for framework that can be either numpy or torch,
# determined by the input type
fw = numpy if type(input) is numpy.ndarray else torch
eps = fw.finfo(fw.float32).eps
device = input.device if fw is torch else None
# set missing scale factors or output shapem one according to another,
# scream if both missing. this is also where all the defults policies
# take place. also handling the by_convs attribute carefully.
scale_factors, out_shape, by_convs = set_scale_and_out_sz(in_shape,
out_shape,
scale_factors,
by_convs,
scale_tolerance,
max_numerator,
eps, fw)
# sort indices of dimensions according to scale of each dimension.
# since we are going dim by dim this is efficient
sorted_filtered_dims_and_scales = [(dim, scale_factors[dim], by_convs[dim],
in_shape[dim], out_shape[dim])
for dim in sorted(range(n_dims),
key=lambda ind: scale_factors[ind])
if scale_factors[dim] != 1.]
# unless support size is specified by the user, it is an attribute
# of the interpolation method
if support_sz is None:
support_sz = interp_method.support_sz
# output begins identical to input and changes with each iteration
output = input
# iterate over dims
for (dim, scale_factor, dim_by_convs, in_sz, out_sz
) in sorted_filtered_dims_and_scales:
# STEP 1- PROJECTED GRID: The non-integer locations of the projection
# of output pixel locations to the input tensor
projected_grid = get_projected_grid(in_sz, out_sz,
scale_factor, fw, dim_by_convs,
device)
# STEP 1.5: ANTIALIASING- If antialiasing is taking place, we modify
# the window size and the interpolation method (see inside function)
cur_interp_method, cur_support_sz = apply_antialiasing_if_needed(
interp_method,
support_sz,
scale_factor,
antialiasing)
# STEP 2- FIELDS OF VIEW: for each output pixels, map the input pixels
# that influence it. Also calculate needed padding and update grid
# accoedingly
field_of_view = get_field_of_view(projected_grid, cur_support_sz, fw,
eps, device)
# STEP 2.5- CALCULATE PAD AND UPDATE: according to the field of view,
# the input should be padded to handle the boundaries, coordinates
# should be updated. actual padding only occurs when weights are
# aplied (step 4). if using by_convs for this dim, then we need to
# calc right and left boundaries for each filter instead.
pad_sz, projected_grid, field_of_view = calc_pad_sz(in_sz, out_sz,
field_of_view,
projected_grid,
scale_factor,
dim_by_convs, fw,
device)
# STEP 3- CALCULATE WEIGHTS: Match a set of weights to the pixels in
# the field of view for each output pixel
weights = get_weights(cur_interp_method, projected_grid, field_of_view)
# STEP 4- APPLY WEIGHTS: Each output pixel is calculated by multiplying
# its set of weights with the pixel values in its field of view.
# We now multiply the fields of view with their matching weights.
# We do this by tensor multiplication and broadcasting.
# if by_convs is true for this dim, then we do this action by
# convolutions. this is equivalent but faster.
if not dim_by_convs:
output = apply_weights(output, field_of_view, weights, dim, n_dims,
pad_sz, pad_mode, fw)
else:
output = apply_convs(output, scale_factor, in_sz, out_sz, weights,
dim, pad_sz, pad_mode, fw)
return output
def get_projected_grid(in_sz, out_sz, scale_factor, fw, by_convs, device=None):
# we start by having the ouput coordinates which are just integer locations
# in the special case when usin by_convs, we only need two cycles of grid
# points. the first and last.
grid_sz = out_sz if not by_convs else scale_factor.numerator
out_coordinates = fw_arange(grid_sz, fw, device)
# This is projecting the ouput pixel locations in 1d to the input tensor,
# as non-integer locations.
# the following fomrula is derived in the paper
# "From Discrete to Continuous Convolutions" by Shocher et al.
return (out_coordinates / float(scale_factor) +
(in_sz - 1) / 2 - (out_sz - 1) / (2 * float(scale_factor)))
def get_field_of_view(projected_grid, cur_support_sz, fw, eps, device):
# for each output pixel, map which input pixels influence it, in 1d.
# we start by calculating the leftmost neighbor, using half of the window
# size (eps is for when boundary is exact int)
left_boundaries = fw_ceil(projected_grid - cur_support_sz / 2 - eps, fw)
# then we simply take all the pixel centers in the field by counting
# window size pixels from the left boundary
ordinal_numbers = fw_arange(ceil(cur_support_sz - eps), fw, device)
return left_boundaries[:, None] + ordinal_numbers
def calc_pad_sz(in_sz, out_sz, field_of_view, projected_grid, scale_factor,
dim_by_convs, fw, device):
if not dim_by_convs:
# determine padding according to neighbor coords out of bound.
# this is a generalized notion of padding, when pad<0 it means crop
pad_sz = [-field_of_view[0, 0].item(),
field_of_view[-1, -1].item() - in_sz + 1]
# since input image will be changed by padding, coordinates of both
# field_of_view and projected_grid need to be updated
field_of_view += pad_sz[0]
projected_grid += pad_sz[0]
else:
# only used for by_convs, to calc the boundaries of each filter the
# number of distinct convolutions is the numerator of the scale factor
num_convs, stride = scale_factor.numerator, scale_factor.denominator
# calculate left and right boundaries for each conv. left can also be
# negative right can be bigger than in_sz. such cases imply padding if
# needed. however if# both are in-bounds, it means we need to crop,
# practically apply the conv only on part of the image.
left_pads = -field_of_view[:, 0]
# next calc is tricky, explanation by rows:
# 1) counting output pixels between the first position of each filter
# to the right boundary of the input
# 2) dividing it by number of filters to count how many 'jumps'
# each filter does
# 3) multiplying by the stride gives us the distance over the input
# coords done by all these jumps for each filter
# 4) to this distance we add the right boundary of the filter when
# placed in its leftmost position. so now we get the right boundary
# of that filter in input coord.
# 5) the padding size needed is obtained by subtracting the rightmost
# input coordinate. if the result is positive padding is needed. if
# negative then negative padding means shaving off pixel columns.
right_pads = (((out_sz - fw_arange(num_convs, fw, device) - 1) # (1)
// num_convs) # (2)
* stride # (3)
+ field_of_view[:, -1] # (4)
- in_sz + 1) # (5)
# in the by_convs case pad_sz is a list of left-right pairs. one per
# each filter
pad_sz = list(zip(left_pads, right_pads))
return pad_sz, projected_grid, field_of_view
def get_weights(interp_method, projected_grid, field_of_view):
# the set of weights per each output pixels is the result of the chosen
# interpolation method applied to the distances between projected grid
# locations and the pixel-centers in the field of view (distances are
# directed, can be positive or negative)
weights = interp_method(projected_grid[:, None] - field_of_view)
# we now carefully normalize the weights to sum to 1 per each output pixel
sum_weights = weights.sum(1, keepdims=True)
sum_weights[sum_weights == 0] = 1
return weights / sum_weights
def apply_weights(input, field_of_view, weights, dim, n_dims, pad_sz, pad_mode,
fw):
# for this operation we assume the resized dim is the first one.
# so we transpose and will transpose back after multiplying
tmp_input = fw_swapaxes(input, dim, 0, fw)
# apply padding
tmp_input = fw_pad(tmp_input, fw, pad_sz, pad_mode)
# field_of_view is a tensor of order 2: for each output (1d location
# along cur dim)- a list of 1d neighbors locations.
# note that this whole operations is applied to each dim separately,
# this is why it is all in 1d.
# neighbors = tmp_input[field_of_view] is a tensor of order image_dims+1:
# for each output pixel (this time indicated in all dims), these are the
# values of the neighbors in the 1d field of view. note that we only
# consider neighbors along the current dim, but such set exists for every
# multi-dim location, hence the final tensor order is image_dims+1.
neighbors = tmp_input[field_of_view]
# weights is an order 2 tensor: for each output location along 1d- a list
# of weights matching the field of view. we augment it with ones, for
# broadcasting, so that when multiplies some tensor the weights affect
# only its first dim.
tmp_weights = fw.reshape(weights, (*weights.shape, * [1] * (n_dims - 1)))
# now we simply multiply the weights with the neighbors, and then sum
# along the field of view, to get a single value per out pixel
tmp_output = (neighbors * tmp_weights).sum(1)
# we transpose back the resized dim to its original position
return fw_swapaxes(tmp_output, 0, dim, fw)
def apply_convs(input, scale_factor, in_sz, out_sz, weights, dim, pad_sz,
pad_mode, fw):
# for this operations we assume the resized dim is the last one.
# so we transpose and will transpose back after multiplying
input = fw_swapaxes(input, dim, -1, fw)
# the stride for all convs is the denominator of the scale factor
stride, num_convs = scale_factor.denominator, scale_factor.numerator
# prepare an empty tensor for the output
tmp_out_shape = list(input.shape)
tmp_out_shape[-1] = out_sz
tmp_output = fw_empty(tuple(tmp_out_shape), fw, input.device)
# iterate over the conv operations. we have as many as the numerator
# of the scale-factor. for each we need boundaries and a filter.
for conv_ind, (pad_sz, filt) in enumerate(zip(pad_sz, weights)):
# apply padding (we pad last dim, padding can be negative)
pad_dim = input.ndim - 1
tmp_input = fw_pad(input, fw, pad_sz, pad_mode, dim=pad_dim)
# apply convolution over last dim. store in the output tensor with
# positional strides so that when the loop is comlete conv results are
# interwind
tmp_output[..., conv_ind::num_convs] = fw_conv(tmp_input, filt, stride)
return fw_swapaxes(tmp_output, -1, dim, fw)
def set_scale_and_out_sz(in_shape, out_shape, scale_factors, by_convs,
scale_tolerance, max_numerator, eps, fw):
# eventually we must have both scale-factors and out-sizes for all in/out
# dims. however, we support many possible partial arguments
if scale_factors is None and out_shape is None:
raise ValueError("either scale_factors or out_shape should be "
"provided")
if out_shape is not None:
# if out_shape has less dims than in_shape, we defaultly resize the
# first dims for numpy and last dims for torch
out_shape = (list(out_shape) + list(in_shape[len(out_shape):])
if fw is numpy
else list(in_shape[:-len(out_shape)]) + list(out_shape))
if scale_factors is None:
# if no scale given, we calculate it as the out to in ratio
# (not recomended)
scale_factors = [out_sz / in_sz for out_sz, in_sz
in zip(out_shape, in_shape)]
if scale_factors is not None:
# by default, if a single number is given as scale, we assume resizing
# two dims (most common are images with 2 spatial dims)
scale_factors = (scale_factors
if isinstance(scale_factors, (list, tuple))
else [scale_factors, scale_factors])
# if less scale_factors than in_shape dims, we defaultly resize the
# first dims for numpy and last dims for torch
scale_factors = (list(scale_factors) + [1] *
(len(in_shape) - len(scale_factors)) if fw is numpy
else [1] * (len(in_shape) - len(scale_factors)) +
list(scale_factors))
if out_shape is None:
# when no out_shape given, it is calculated by multiplying the
# scale by the in_shape (not recomended)
out_shape = [ceil(scale_factor * in_sz)
for scale_factor, in_sz in
zip(scale_factors, in_shape)]
# next part intentionally after out_shape determined for stability
# we fix by_convs to be a list of truth values in case it is not
if not isinstance(by_convs, (list, tuple)):
by_convs = [by_convs] * len(out_shape)
# next loop fixes the scale for each dim to be either frac or float.
# this is determined by by_convs and by tolerance for scale accuracy.
for ind, (sf, dim_by_convs) in enumerate(zip(scale_factors, by_convs)):
# first we fractionaize
if dim_by_convs:
frac = Fraction(1/sf).limit_denominator(max_numerator)
frac = Fraction(numerator=frac.denominator, denominator=frac.numerator)
# if accuracy is within tolerance scale will be frac. if not, then
# it will be float and the by_convs attr will be set false for
# this dim
if scale_tolerance is None:
scale_tolerance = eps
if dim_by_convs and abs(frac - sf) < scale_tolerance:
scale_factors[ind] = frac
else:
scale_factors[ind] = float(sf)
by_convs[ind] = False
return scale_factors, out_shape, by_convs
def apply_antialiasing_if_needed(interp_method, support_sz, scale_factor,
antialiasing):
# antialiasing is "stretching" the field of view according to the scale
# factor (only for downscaling). this is low-pass filtering. this
# requires modifying both the interpolation (stretching the 1d
# function and multiplying by the scale-factor) and the window size.
scale_factor = float(scale_factor)
if scale_factor >= 1.0 or not antialiasing:
return interp_method, support_sz
cur_interp_method = (lambda arg: scale_factor *
interp_method(scale_factor * arg))
cur_support_sz = support_sz / scale_factor
return cur_interp_method, cur_support_sz
def fw_ceil(x, fw):
if fw is numpy:
return fw.int_(fw.ceil(x))
else:
return x.ceil().long()
def fw_floor(x, fw):
if fw is numpy:
return fw.int_(fw.floor(x))
else:
return x.floor().long()
def fw_cat(x, fw):
if fw is numpy:
return fw.concatenate(x)
else:
return fw.cat(x)
def fw_swapaxes(x, ax_1, ax_2, fw):
if fw is numpy:
return fw.swapaxes(x, ax_1, ax_2)
else:
return x.transpose(ax_1, ax_2)
def fw_pad(x, fw, pad_sz, pad_mode, dim=0):
if pad_sz == (0, 0):
return x
if fw is numpy:
pad_vec = [(0, 0)] * x.ndim
pad_vec[dim] = pad_sz
return fw.pad(x, pad_width=pad_vec, mode=pad_mode)
else:
if x.ndim < 3:
x = x[None, None, ...]
pad_vec = [0] * ((x.ndim - 2) * 2)
pad_vec[0:2] = pad_sz
return fw.nn.functional.pad(x.transpose(dim, -1), pad=pad_vec,
mode=pad_mode).transpose(dim, -1)
def fw_conv(input, filter, stride):
# we want to apply 1d conv to any nd array. the way to do it is to reshape
# the input to a 4D tensor. first two dims are singeletons, 3rd dim stores
# all the spatial dims that we are not convolving along now. then we can
# apply conv2d with a 1xK filter. This convolves the same way all the other
# dims stored in the 3d dim. like depthwise conv over these.
# TODO: numpy support
reshaped_input = input.reshape(1, 1, -1, input.shape[-1])
reshaped_output = torch.nn.functional.conv2d(reshaped_input,
filter.view(1, 1, 1, -1),
stride=(1, stride))
return reshaped_output.reshape(*input.shape[:-1], -1)
def fw_arange(upper_bound, fw, device):
if fw is numpy:
return fw.arange(upper_bound)
else:
return fw.arange(upper_bound, device=device)
def fw_empty(shape, fw, device):
if fw is numpy:
return fw.empty(shape)
else:
return fw.empty(size=(*shape,), device=device) | ResizeRight-master | resize_right/resize_right.py |
from setuptools import setup, find_packages
setup(
name = 'stam-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Space Time Attention Model (STAM) - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/STAM-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'image recognition'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| STAM-pytorch-main | setup.py |
from stam_pytorch.stam import STAM
| STAM-pytorch-main | stam_pytorch/__init__.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
self.norm = nn.LayerNorm(dim)
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
class STAM(nn.Module):
def __init__(
self,
*,
dim,
image_size,
patch_size,
num_frames,
num_classes,
space_depth,
space_heads,
space_mlp_dim,
time_depth,
time_heads,
time_mlp_dim,
space_dim_head = 64,
time_dim_head = 64,
dropout = 0.,
emb_dropout = 0.
):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
patch_dim = 3 * patch_size ** 2
self.to_patch_embedding = nn.Sequential(
Rearrange('b f c (h p1) (w p2) -> b f (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_frames, num_patches + 1, dim))
self.space_cls_token = nn.Parameter(torch.randn(1, dim))
self.time_cls_token = nn.Parameter(torch.randn(1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.space_transformer = Transformer(dim, space_depth, space_heads, space_dim_head, space_mlp_dim, dropout)
self.time_transformer = Transformer(dim, time_depth, time_heads, time_dim_head, time_mlp_dim, dropout)
self.mlp_head = nn.Linear(dim, num_classes)
def forward(self, video):
x = self.to_patch_embedding(video)
b, f, n, *_ = x.shape
# concat space CLS tokens
space_cls_tokens = repeat(self.space_cls_token, 'n d -> b f n d', b = b, f = f)
x = torch.cat((space_cls_tokens, x), dim = -2)
# positional embedding
x += self.pos_embedding[:, :, :(n + 1)]
x = self.dropout(x)
# space attention
x = rearrange(x, 'b f ... -> (b f) ...')
x = self.space_transformer(x)
x = rearrange(x[:, 0], '(b f) ... -> b f ...', b = b) # select CLS token out of each frame
# concat time CLS tokens
time_cls_tokens = repeat(self.time_cls_token, 'n d -> b n d', b = b)
x = torch.cat((time_cls_tokens, x), dim = -2)
# time attention
x = self.time_transformer(x)
# final mlp
return self.mlp_head(x[:, 0])
| STAM-pytorch-main | stam_pytorch/stam.py |
from setuptools import setup, find_packages
setup(
name = 'bit-diffusion',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Bit Diffusion - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/bit-diffusion',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion'
],
install_requires=[
'accelerate',
'einops',
'ema-pytorch',
'pillow',
'torch>=1.12.0',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| bit-diffusion-main | setup.py |
import math
from pathlib import Path
from functools import partial
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
from torch.special import expm1
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torchvision import transforms as T, utils
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from PIL import Image
from tqdm.auto import tqdm
from ema_pytorch import EMA
from accelerate import Accelerator
# constants
BITS = 8
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to(pil_img_type, image):
if image.mode != pil_img_type:
return image.convert(pil_img_type)
return image
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with learned sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
bits = BITS,
resnet_block_groups = 8,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
channels *= bits
self.channels = channels
input_channels = channels * 2
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinusoidal_dim)
fourier_dim = learned_sinusoidal_dim + 1
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, channels, 1)
def forward(self, x, time, x_self_cond = None):
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# convert to bit representations and back
def decimal_to_bits(x, bits = BITS):
""" expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1 """
device = x.device
x = (x * 255).int().clamp(0, 255)
mask = 2 ** torch.arange(bits - 1, -1, -1, device = device)
mask = rearrange(mask, 'd -> d 1 1')
x = rearrange(x, 'b c h w -> b c 1 h w')
bits = ((x & mask) != 0).float()
bits = rearrange(bits, 'b c d h w -> b (c d) h w')
bits = bits * 2 - 1
return bits
def bits_to_decimal(x, bits = BITS):
""" expects bits from -1 to 1, outputs image tensor from 0 to 1 """
device = x.device
x = (x > 0).int()
mask = 2 ** torch.arange(bits - 1, -1, -1, device = device, dtype = torch.int32)
mask = rearrange(mask, 'd -> d 1 1')
x = rearrange(x, 'b (c d) h w -> b c d h w', d = bits)
dec = reduce(x * mask, 'b c d h w -> b c h w', 'sum')
return (dec / 255).clamp(0., 1.)
# bit diffusion class
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class BitDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
use_ddim = False,
noise_schedule = 'cosine',
time_difference = 0.,
bit_scale = 1.
):
super().__init__()
self.model = model
self.channels = self.model.channels
self.image_size = image_size
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.bit_scale = bit_scale
self.timesteps = timesteps
self.use_ddim = use_ddim
# proposed in the paper, summed to time_next
# as a way to fix a deficiency in self-conditioning and lower FID when the number of sampling timesteps is < 400
self.time_difference = time_difference
@property
def device(self):
return next(self.model.parameters()).device
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
@torch.no_grad()
def ddpm_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device=device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step', total = self.timesteps):
# add the time delay
time_next = (time_next - self.time_difference).clamp(min = 0.)
noise_cond = self.log_snr(time)
# get predicted x0
x_start = self.model(img, noise_cond, x_start)
# clip x0
x_start.clamp_(-self.bit_scale, self.bit_scale)
# get log(snr)
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
# get alpha sigma of time and next time
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# derive posterior mean and variance
c = -expm1(log_snr - log_snr_next)
mean = alpha_next * (img * (1 - c) / alpha + c * x_start)
variance = (sigma_next ** 2) * c
log_variance = log(variance)
# get noise
noise = torch.where(
rearrange(time_next > 0, 'b -> b 1 1 1'),
torch.randn_like(img),
torch.zeros_like(img)
)
img = mean + (0.5 * log_variance).exp() * noise
return bits_to_decimal(img)
@torch.no_grad()
def ddim_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device = device)
x_start = None
for times, times_next in tqdm(time_pairs, desc = 'sampling loop time step'):
# add the time delay
times_next = (times_next - time_difference).clamp(min = 0.)
# get times and noise levels
log_snr = self.log_snr(times)
log_snr_next = self.log_snr(times_next)
padded_log_snr, padded_log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(padded_log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(padded_log_snr_next)
# predict x0
x_start = self.model(img, log_snr, x_start)
# clip x0
x_start.clamp_(-self.bit_scale, self.bit_scale)
# get predicted noise
pred_noise = (img - alpha * x_start) / sigma.clamp(min = 1e-8)
# calculate x next
img = x_start * alpha_next + pred_noise * sigma_next
return bits_to_decimal(img)
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.ddpm_sample if not self.use_ddim else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
def forward(self, img, *args, **kwargs):
batch, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
# sample random times
times = torch.zeros((batch,), device = device).float().uniform_(0, 1.)
# convert image to bit representation
img = decimal_to_bits(img) * self.bit_scale
# noise sample
noise = torch.randn_like(img)
noise_level = self.log_snr(times)
padded_noise_level = right_pad_dims_to(img, noise_level)
alpha, sigma = log_snr_to_alpha_sigma(padded_noise_level)
noised_img = alpha * img + sigma * noise
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
self_cond = None
if torch.rand((1)) < 0.5:
with torch.no_grad():
self_cond = self.model(noised_img, noise_level).detach_()
# predict and take gradient step
pred = self.model(noised_img, noise_level, self_cond)
return F.mse_loss(pred, img)
# dataset classes
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
pil_img_type = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
maybe_convert_fn = partial(convert_image_to, pil_img_type) if exists(pil_img_type) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
mixed_precision_type = 'fp16',
split_batches = True,
pil_img_type = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = mixed_precision_type if amp else 'no'
)
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = pil_img_type)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'))
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
self.step += 1
pbar.update(1)
accelerator.print('training complete')
| bit-diffusion-main | bit_diffusion/bit_diffusion.py |
from bit_diffusion.bit_diffusion import Unet, BitDiffusion, Trainer
| bit-diffusion-main | bit_diffusion/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'ETSformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.1',
license='MIT',
description = 'ETSTransformer - Exponential Smoothing Transformer for Time-Series Forecasting - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/ETSformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'time-series',
'forecasting'
],
install_requires=[
'einops>=0.4',
'scipy',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| ETSformer-pytorch-main | setup.py |
from etsformer_pytorch.etsformer_pytorch import (
ETSFormer,
ClassificationWrapper,
MHESA
)
| ETSformer-pytorch-main | etsformer_pytorch/__init__.py |
from math import pi
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn, einsum
from scipy.fftpack import next_fast_len
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# constants
Intermediates = namedtuple('Intermediates', ['growth_latents', 'seasonal_latents', 'level_output'])
# helper functions
def exists(val):
return val is not None
# fourier helpers
def fourier_extrapolate(signal, start, end):
device = signal.device
fhat = torch.fft.fft(signal)
fhat_len = fhat.shape[-1]
time = torch.linspace(start, end - 1, end - start, device = device, dtype = torch.complex64)
freqs = torch.linspace(0, fhat_len - 1, fhat_len, device = device, dtype = torch.complex64)
res = fhat[..., None, :] * (1.j * 2 * pi * freqs[..., None, :] * time[..., :, None] / fhat_len).exp() / fhat_len
return res.sum(dim = -1).real
# classes
def InputEmbedding(time_features, model_dim, kernel_size = 3, dropout = 0.):
return nn.Sequential(
Rearrange('b n d -> b d n'),
nn.Conv1d(time_features, model_dim, kernel_size = kernel_size, padding = kernel_size // 2),
nn.Dropout(dropout),
Rearrange('b d n -> b n d'),
)
def FeedForward(dim, mult = 4, dropout = 0.):
return nn.Sequential(
nn.Linear(dim, dim * mult),
nn.Sigmoid(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim),
nn.Dropout(dropout)
)
class FeedForwardBlock(nn.Module):
def __init__(
self,
*,
dim,
**kwargs
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.ff = FeedForward(dim, **kwargs)
self.post_norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.post_norm(x + self.ff(x))
# encoder related classes
## multi-head exponential smoothing attention
def conv1d_fft(x, weights, dim = -2, weight_dim = -1):
# Algorithm 3 in paper
N = x.shape[dim]
M = weights.shape[weight_dim]
fast_len = next_fast_len(N + M - 1)
f_x = torch.fft.rfft(x, n = fast_len, dim = dim)
f_weight = torch.fft.rfft(weights, n = fast_len, dim = weight_dim)
f_v_weight = f_x * rearrange(f_weight.conj(), '... -> ... 1')
out = torch.fft.irfft(f_v_weight, fast_len, dim = dim)
out = out.roll(-1, dims = (dim,))
indices = torch.arange(start = fast_len - N, end = fast_len, dtype = torch.long, device = x.device)
out = out.index_select(dim, indices)
return out
class MHESA(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dropout = 0.,
norm_heads = False
):
super().__init__()
self.heads = heads
self.initial_state = nn.Parameter(torch.randn(heads, dim // heads))
self.dropout = nn.Dropout(dropout)
self.alpha = nn.Parameter(torch.randn(heads))
self.norm_heads = nn.Sequential(
Rearrange('b n (h d) -> b (h d) n', h = heads),
nn.GroupNorm(heads, dim),
Rearrange('b (h d) n -> b n (h d)', h = heads)
) if norm_heads else nn.Identity()
self.project_in = nn.Linear(dim, dim)
self.project_out = nn.Linear(dim, dim)
def naive_Aes(self, x, weights):
n, h = x.shape[-2], self.heads
# in appendix A.1 - Algorithm 2
arange = torch.arange(n, device = x.device)
weights = repeat(weights, '... l -> ... t l', t = n)
indices = repeat(arange, 'l -> h t l', h = h, t = n)
indices = (indices - rearrange(arange + 1, 't -> 1 t 1')) % n
weights = weights.gather(-1, indices)
weights = self.dropout(weights)
# causal
weights = weights.tril()
# multiply
output = einsum('b h n d, h m n -> b h m d', x, weights)
return output
def forward(self, x, naive = False):
b, n, d, h, device = *x.shape, self.heads, x.device
# linear project in
x = self.project_in(x)
# split out heads
x = rearrange(x, 'b n (h d) -> b h n d', h = h)
# temporal difference
x = torch.cat((
repeat(self.initial_state, 'h d -> b h 1 d', b = b),
x
), dim = -2)
x = x[:, :, 1:] - x[:, :, :-1]
# prepare exponential alpha
alpha = self.alpha.sigmoid()
alpha = rearrange(alpha, 'h -> h 1')
# arange == powers
arange = torch.arange(n, device = device)
weights = alpha * (1 - alpha) ** torch.flip(arange, dims = (0,))
if naive:
output = self.naive_Aes(x, weights)
else:
output = conv1d_fft(x, weights)
# get initial state contribution
init_weight = (1 - alpha) ** (arange + 1)
init_output = rearrange(init_weight, 'h n -> h n 1') * rearrange(self.initial_state, 'h d -> h 1 d')
output = output + init_output
# merge heads
output = rearrange(output, 'b h n d -> b n (h d)')
# maybe sub-ln from https://arxiv.org/abs/2210.06423 - retnet used groupnorm
output = self.norm_heads(output)
return self.project_out(output)
## frequency attention
class FrequencyAttention(nn.Module):
def __init__(
self,
*,
K = 4,
dropout = 0.
):
super().__init__()
self.K = K
self.dropout = nn.Dropout(dropout)
def forward(self, x):
freqs = torch.fft.rfft(x, dim = 1)
# get amplitudes
amp = freqs.abs()
amp = self.dropout(amp)
# topk amplitudes - for seasonality, branded as attention
topk_amp, _ = amp.topk(k = self.K, dim = 1, sorted = True)
# mask out all freqs with lower amplitudes than the lowest value of the topk above
topk_freqs = freqs.masked_fill(amp < topk_amp[:, -1:], 0.+0.j)
# inverse fft
return torch.fft.irfft(topk_freqs, dim = 1)
## level module
class Level(nn.Module):
def __init__(self, time_features, model_dim):
super().__init__()
self.alpha = nn.Parameter(torch.Tensor([0.]))
self.to_growth = nn.Linear(model_dim, time_features)
self.to_seasonal = nn.Linear(model_dim, time_features)
def forward(self, x, latent_growth, latent_seasonal):
# following equation in appendix A.2
n, device = x.shape[1], x.device
alpha = self.alpha.sigmoid()
arange = torch.arange(n, device = device)
powers = torch.flip(arange, dims = (0,))
# Aes for raw time series signal with seasonal terms (from frequency attention) subtracted out
seasonal =self.to_seasonal(latent_seasonal)
Aes_weights = alpha * (1 - alpha) ** powers
seasonal_normalized_term = conv1d_fft(x - seasonal, Aes_weights)
# auxiliary term
growth = self.to_growth(latent_growth)
growth_smoothing_weights = (1 - alpha) ** powers
growth_term = conv1d_fft(growth, growth_smoothing_weights)
return seasonal_normalized_term + growth_term
# decoder classes
class LevelStack(nn.Module):
def forward(self, x, num_steps_forecast):
return repeat(x[:, -1], 'b d -> b n d', n = num_steps_forecast)
class GrowthDampening(nn.Module):
def __init__(
self,
dim,
heads = 8
):
super().__init__()
self.heads = heads
self.dampen_factor = nn.Parameter(torch.randn(heads))
def forward(self, growth, *, num_steps_forecast):
device, h = growth.device, self.heads
dampen_factor = self.dampen_factor.sigmoid()
# like level stack, it takes the last growth for forecasting
last_growth = growth[:, -1]
last_growth = rearrange(last_growth, 'b l (h d) -> b l 1 h d', h = h)
# prepare dampening factors per head and the powers
dampen_factor = rearrange(dampen_factor, 'h -> 1 1 1 h 1')
powers = (torch.arange(num_steps_forecast, device = device) + 1)
powers = rearrange(powers, 'n -> 1 1 n 1 1')
# following Eq(2) in the paper
dampened_growth = last_growth * (dampen_factor ** powers).cumsum(dim = 2)
return rearrange(dampened_growth, 'b l n h d -> b l n (h d)')
# main class
class ETSFormer(nn.Module):
def __init__(
self,
*,
model_dim,
time_features = 1,
embed_kernel_size = 3,
layers = 2,
heads = 8,
K = 4,
dropout = 0.
):
super().__init__()
assert (model_dim % heads) == 0, 'model dimension must be divisible by number of heads'
self.model_dim = model_dim
self.time_features = time_features
self.embed = InputEmbedding(time_features, model_dim, kernel_size = embed_kernel_size, dropout = dropout)
self.encoder_layers = nn.ModuleList([])
for ind in range(layers):
is_last_layer = ind == (layers - 1)
self.encoder_layers.append(nn.ModuleList([
FrequencyAttention(K = K, dropout = dropout),
MHESA(dim = model_dim, heads = heads, dropout = dropout),
FeedForwardBlock(dim = model_dim) if not is_last_layer else None,
Level(time_features = time_features, model_dim = model_dim)
]))
self.growth_dampening_module = GrowthDampening(dim = model_dim, heads = heads)
self.latents_to_time_features = nn.Linear(model_dim, time_features)
self.level_stack = LevelStack()
def forward(
self,
x,
*,
num_steps_forecast = 0,
return_latents = False
):
one_time_feature = x.ndim == 2
if one_time_feature:
x = rearrange(x, 'b n -> b n 1')
z = self.embed(x)
latent_growths = []
latent_seasonals = []
for freq_attn, mhes_attn, ff_block, level in self.encoder_layers:
latent_seasonal = freq_attn(z)
z = z - latent_seasonal
latent_growth = mhes_attn(z)
z = z - latent_growth
if exists(ff_block):
z = ff_block(z)
x = level(x, latent_growth, latent_seasonal)
latent_growths.append(latent_growth)
latent_seasonals.append(latent_seasonal)
latent_growths = torch.stack(latent_growths, dim = -2)
latent_seasonals = torch.stack(latent_seasonals, dim = -2)
latents = Intermediates(latent_growths, latent_seasonals, x)
if num_steps_forecast == 0:
return latents
latent_seasonals = rearrange(latent_seasonals, 'b n l d -> b l d n')
extrapolated_seasonals = fourier_extrapolate(latent_seasonals, x.shape[1], x.shape[1] + num_steps_forecast)
extrapolated_seasonals = rearrange(extrapolated_seasonals, 'b l d n -> b l n d')
dampened_growths = self.growth_dampening_module(latent_growths, num_steps_forecast = num_steps_forecast)
level = self.level_stack(x, num_steps_forecast = num_steps_forecast)
summed_latents = dampened_growths.sum(dim = 1) + extrapolated_seasonals.sum(dim = 1)
forecasted = level + self.latents_to_time_features(summed_latents)
if one_time_feature:
forecasted = rearrange(forecasted, 'b n 1 -> b n')
if return_latents:
return forecasted, latents
return forecasted
# classification wrapper
class MultiheadLayerNorm(nn.Module):
def __init__(self, dim, heads = 1, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(heads, 1, dim))
self.b = nn.Parameter(torch.zeros(heads, 1, dim))
def forward(self, x):
std = torch.var(x, dim = -1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = -1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class ClassificationWrapper(nn.Module):
def __init__(
self,
*,
etsformer,
num_classes = 10,
heads = 16,
dim_head = 32,
level_kernel_size = 3,
growth_kernel_size = 3,
seasonal_kernel_size = 3,
dropout = 0.
):
super().__init__()
assert isinstance(etsformer, ETSFormer)
self.etsformer = etsformer
model_dim = etsformer.model_dim
time_features = etsformer.time_features
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.dropout = nn.Dropout(dropout)
self.queries = nn.Parameter(torch.randn(heads, dim_head))
self.growth_to_kv = nn.Sequential(
Rearrange('b n d -> b d n'),
nn.Conv1d(model_dim, inner_dim * 2, growth_kernel_size, bias = False, padding = growth_kernel_size // 2),
Rearrange('... (kv h d) n -> ... (kv h) n d', kv = 2, h = heads),
MultiheadLayerNorm(dim_head, heads = 2 * heads),
)
self.seasonal_to_kv = nn.Sequential(
Rearrange('b n d -> b d n'),
nn.Conv1d(model_dim, inner_dim * 2, seasonal_kernel_size, bias = False, padding = seasonal_kernel_size // 2),
Rearrange('... (kv h d) n -> ... (kv h) n d', kv = 2, h = heads),
MultiheadLayerNorm(dim_head, heads = 2 * heads),
)
self.level_to_kv = nn.Sequential(
Rearrange('b n t -> b t n'),
nn.Conv1d(time_features, inner_dim * 2, level_kernel_size, bias = False, padding = level_kernel_size // 2),
Rearrange('b (kv h d) n -> b (kv h) n d', kv = 2, h = heads),
MultiheadLayerNorm(dim_head, heads = 2 * heads),
)
self.to_out = nn.Linear(inner_dim, model_dim)
self.to_logits = nn.Sequential(
nn.LayerNorm(model_dim),
nn.Linear(model_dim, num_classes)
)
def forward(self, timeseries):
latent_growths, latent_seasonals, level_output = self.etsformer(timeseries)
latent_growths = latent_growths.mean(dim = -2)
latent_seasonals = latent_seasonals.mean(dim = -2)
# queries, key, values
q = self.queries * self.scale
kvs = torch.cat((
self.growth_to_kv(latent_growths),
self.seasonal_to_kv(latent_seasonals),
self.level_to_kv(level_output)
), dim = -2)
k, v = kvs.chunk(2, dim = 1)
# cross attention pooling
sim = einsum('h d, b h j d -> b h j', q, k)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h j, b h j d -> b h d', attn, v)
out = rearrange(out, 'b ... -> b (...)')
out = self.to_out(out)
# project to logits
return self.to_logits(out)
| ETSformer-pytorch-main | etsformer_pytorch/etsformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'MEGABYTE-pytorch',
packages = find_packages(),
version = '0.2.1',
license='MIT',
description = 'MEGABYTE - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/MEGABYTE-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'beartype',
'einops>=0.6.1',
'torch>=1.10',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| MEGABYTE-pytorch-main | setup.py |
from MEGABYTE_pytorch import MEGABYTE
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LEN = 100
SEQ_LEN = 8192
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = MEGABYTE(
num_tokens = 256,
dim = (768, 512, 256),
depth = (6, 4, 2),
max_seq_len = (512, 4, 4),
flash_attn = True
).cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
x = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
train_x, valid_x = np.split(x, [int(90e6)])
data_train, data_val = map(torch.from_numpy, (train_x, valid_x))
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime_inp = inp[:PRIME_LEN]
prime = decode_tokens(prime_inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(prime_inp[None, :])
sample = sample.flatten(1)
output_str = decode_tokens(sample[0][PRIME_LEN:])
print(output_str)
| MEGABYTE-pytorch-main | train.py |
from MEGABYTE_pytorch.megabyte import MEGABYTE
| MEGABYTE-pytorch-main | MEGABYTE_pytorch/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, i, j, device):
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
def flash_attn(self, q, k, v, mask = None, attn_bias = None):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# single headed key / values
if k.ndim == 3:
k = rearrange(k, 'b n d -> b 1 n d')
if v.ndim == 3:
v = rearrange(v, 'b n d -> b 1 n d')
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
if self.flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# causal mask
if self.causal:
causal_mask = self.get_mask(q_len, k_len, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out
| MEGABYTE-pytorch-main | MEGABYTE_pytorch/attend.py |
import math
import functools
from itertools import zip_longest
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat, pack, unpack
from einops.layers.torch import Rearrange
from beartype import beartype
from beartype.typing import Tuple, Union
from MEGABYTE_pytorch.attend import Attend
from tqdm import tqdm
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def remainder_to_mult(num, mult):
return (mult - num % mult) % mult
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def reduce_mult(nums):
return functools.reduce(lambda x, y: x * y, nums, 1)
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# token shift, from Peng et al of RWKV
def token_shift(t):
t, t_shift = t.chunk(2, dim = -1)
t_shift = F.pad(t_shift, (0, 0, 1, -1))
return torch.cat((t, t_shift), dim = -1)
# rotary positional embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
@property
def device(self):
return next(self.buffers()).device
def forward(self, seq_len):
t = torch.arange(seq_len, device = self.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return t * pos.cos() + rotate_half(t) * pos.sin()
# norm
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# helper classes
def FeedForward(*, dim, mult = 4, dropout = 0.):
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
flash = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.attend = Attend(
causal = True,
flash = flash,
dropout = dropout
)
self.dropout = nn.Dropout(dropout)
self.norm = RMSNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, rotary_emb = None):
h, device = self.heads, x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if exists(rotary_emb):
q, k = map(lambda t: apply_rotary_pos_emb(rotary_emb, t), (q, k))
out = self.attend(q, k, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
layers,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
rel_pos = True,
flash_attn = False
):
super().__init__()
self.rotary_emb = RotaryEmbedding(dim_head) if rel_pos else None
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim)
def forward(self, x):
n = x.shape[-2]
rotary_emb = self.rotary_emb(n) if exists(self.rotary_emb) else None
for attn, ff in self.layers:
x = attn(token_shift(x), rotary_emb = rotary_emb) + x
x = ff(token_shift(x)) + x
return self.norm(x)
# main class
class MEGABYTE(nn.Module):
@beartype
def __init__(
self,
*,
num_tokens,
dim: Union[Tuple, int],
depth: Tuple,
max_seq_len: Tuple,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0,
rel_pos = False,
pos_emb = False,
flash_attn = False
):
super().__init__()
# simplified configuration for each stage of the hierarchy
# depth = (2, 2, 4) would translate to depth 2 at first stage, depth 2 second stage, depth 4 third
# max_seq_len = (16, 8, 4) would translate to max sequence length of 16 at first stage, length of 8 at second stage, length of 4 for last
assert isinstance(depth, tuple) and isinstance(max_seq_len, tuple)
assert len(depth) == len(max_seq_len)
self.stages = len(depth)
dim = cast_tuple(dim, self.stages)
assert len(dim) == self.stages
coarsest_dim, *_, fine_dim = dim
self.max_seq_len = max_seq_len
self.start_tokens = nn.ParameterList([nn.Parameter(torch.randn(h_dim)) for h_dim, seq_len in zip(dim, max_seq_len)])
self.pos_embs = nn.ModuleList([nn.Embedding(seq_len, h_dim) for h_dim, seq_len in zip(dim, max_seq_len)]) if pos_emb else None
self.token_embs = nn.ModuleList([])
patch_size = 1
self.token_embs.append(nn.Embedding(num_tokens, fine_dim))
for dim_out, seq_len in zip(reversed(dim[:-1]), reversed(max_seq_len[1:])):
patch_size *= seq_len
self.token_embs.append(nn.Sequential(
nn.Embedding(num_tokens, fine_dim),
Rearrange('... r d -> ... (r d)'),
nn.LayerNorm(patch_size * fine_dim),
nn.Linear(patch_size * fine_dim, dim_out),
nn.LayerNorm(dim_out)
))
self.transformers = nn.ModuleList([])
self.to_next_transformer_projections = nn.ModuleList([])
for h_dim, next_h_dim, stage_depth, next_seq_len in zip_longest(dim, dim[1:], depth, max_seq_len[1:]):
self.transformers.append(Transformer(
dim = h_dim,
layers = stage_depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult,
rel_pos = rel_pos,
flash_attn = flash_attn
))
proj = nn.Identity()
if exists(next_h_dim) and next_h_dim != dim:
proj = nn.Sequential(
Rearrange('b ... d -> b (...) d'),
nn.Linear(h_dim, next_h_dim * next_seq_len),
Rearrange('b m (n d) -> (b m) n d', n = next_seq_len)
)
self.to_next_transformer_projections.append(proj)
self.to_logits = nn.Linear(fine_dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = reduce_mult(self.max_seq_len)
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
batch = seq.shape[0]
for _ in tqdm(range(total_seq_len - seq.shape[-1])):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return seq.reshape(batch, *self.max_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
prev_stage_tokens_repr = None
for stage_start_tokens, transformer, proj in zip(self.start_tokens, self.transformers, self.to_next_transformer_projections):
tokens = repeat(stage_start_tokens, 'd -> b 1 d', b = batch_size)
if exists(prev_stage_tokens_repr):
tokens = tokens + prev_stage_tokens_repr[..., :tokens.shape[-2], :]
tokens = transformer(tokens)
prev_stage_tokens_repr = proj(tokens)
return self.to_logits(tokens)
def forward(self, ids, return_loss = False):
batch = ids.shape[0]
assert ids.ndim in {2, self.stages + 1}
flattened_dims = ids.ndim == 2
ids_orig_ndim = ids.ndim
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dims:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
multiple_of = reduce_mult(self.max_seq_len[1:])
padding = remainder_to_mult(seq_len, multiple_of)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = ids.reshape(batch, -1, *self.max_seq_len[1:])
b, *prec_dims, device = *ids.shape, ids.device
# check some dimensions
assert prec_dims[0] <= self.max_seq_len[0], 'the first dimension of your axial autoregressive transformer must be less than the first tuple element of max_seq_len (like any autoregressive transformer)'
assert tuple(prec_dims[1:]) == tuple(self.max_seq_len[1:]), 'all subsequent dimensions must match exactly'
# get tokens for all hierarchical stages, reducing by appropriate dimensions
# and adding the absolute positional embeddings
tokens_at_stages = []
pos_embs = default(self.pos_embs, (None,))
for ind, pos_emb, token_emb in zip_longest(range(len(prec_dims)), pos_embs, self.token_embs):
is_first = ind == 0
tokens = token_emb(ids)
if exists(pos_emb):
positions = pos_emb(torch.arange(tokens.shape[-2], device = device))
tokens = tokens + positions
tokens_at_stages.insert(0, tokens)
if is_first:
continue
ids = rearrange(ids, '... m n -> ... (m n)')
# the un-pixelshuffled representations of the previous hierarchy, starts with None
prev_stage_tokens_repr = None
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
for stage_start_tokens, stage_tokens, transformer, proj in zip(self.start_tokens, tokens_at_stages, self.transformers, self.to_next_transformer_projections):
stage_tokens, ps = pack_one(stage_tokens, '* n d')
stage_start_tokens = repeat(stage_start_tokens, 'f -> b 1 f', b = stage_tokens.shape[0])
# concat start token
stage_tokens = torch.cat((
stage_start_tokens,
stage_tokens,
), dim = -2)
# sum the previous hierarchy's representation
if exists(prev_stage_tokens_repr):
prev_stage_tokens_repr = F.pad(prev_stage_tokens_repr, (0, 0, 1, 0), value = 0.)
stage_tokens = stage_tokens + prev_stage_tokens_repr
attended = transformer(stage_tokens)
attended = unpack_one(attended, ps, '* n d')
# project for next stage in the hierarchy
prev_stage_tokens_repr = proj(attended[..., :-1, :])
# project to logits
logits = self.to_logits(attended)
start_tokens = logits[(slice(None), *((0,) * (logits.ndim - 2)), slice(None))]
start_tokens = rearrange(start_tokens, 'b d -> b 1 d')
logits = logits[..., 1:, :]
if not return_loss:
if flattened_dims:
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = logits[:, :seq_len]
return logits
logits = rearrange(logits, 'b ... c -> b (...) c')
logits = torch.cat((start_tokens, logits), dim = -2)
preds = rearrange(logits, 'b n c -> b c n')
labels = rearrange(ids, 'b ... -> b (...)')
loss = F.cross_entropy(
preds[..., :-1],
labels,
ignore_index = self.pad_id
)
return loss
| MEGABYTE-pytorch-main | MEGABYTE_pytorch/megabyte.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | __init__.py |
# Lint as: python3
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configurations for IODINE."""
# pylint: disable=missing-docstring, unused-variable
import math
def clevr6():
n_z = 64 # number of latent dimensions
num_components = 7 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/clevr6"
# For the paper we used 8 GPUs with a batch size of 4 each.
# This means a total batch size of 32, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 4
learn_rate = 0.001 * math.sqrt(batch_size / 32)
data = {
"constructor": "iodine.modules.data.CLEVR",
"batch_size": batch_size,
"path": "multi_object_datasets/clevr_with_masks_train.tfrecords",
"max_num_objects": 6,
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "linear",
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [64, 64, 64, 64, None],
"kernel_shapes": [3],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [64, 64, 64, 64],
"strides": [2],
"kernel_shapes": [3],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [256, 256],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [256],
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("color", 9, "categorical"),
("shape", 4, "categorical"),
("size", 3, "categorical"),
("position", 3, "scalar"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
def multi_dsprites():
n_z = 16 # number of latent dimensions
num_components = 6 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/multi_dsprites"
# For the paper we used 8 GPUs with a batch size of 16 each.
# This means a total batch size of 128, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 16
learn_rate = 0.0003 * math.sqrt(batch_size / 128)
data = {
"constructor":
"iodine.modules.data.MultiDSprites",
"batch_size":
batch_size,
"path":
"multi_object_datasets/multi_dsprites_colored_on_grayscale.tfrecords",
"dataset_variant":
"colored_on_grayscale",
"min_num_objs":
3,
"max_num_objs":
3,
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "cos",
"coord_freqs": 3,
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [32, 32, 32, 32, None],
"kernel_shapes": [5],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [32, 32, 32],
"strides": [2],
"kernel_shapes": [5],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [128],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [128],
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("color", 3, "scalar"),
("shape", 4, "categorical"),
("scale", 1, "scalar"),
("x", 1, "scalar"),
("y", 1, "scalar"),
("orientation", 2, "angle"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
def tetrominoes():
n_z = 32 # number of latent dimensions
num_components = 4 # number of components (K)
num_iters = 5
checkpoint_dir = "iodine/checkpoints/tetrominoes"
# For the paper we used 8 GPUs with a batch size of 32 each.
# This means a total batch size of 256, which is too large for a single GPU.
# When reducing the batch size, the learning rate should also be lowered.
batch_size = 128
learn_rate = 0.0003 * math.sqrt(batch_size / 256)
data = {
"constructor": "iodine.modules.data.Tetrominoes",
"batch_size": batch_size,
"path": "iodine/multi_object_datasets/tetrominoes_train.tfrecords",
}
model = {
"constructor": "iodine.modules.iodine.IODINE",
"n_z": n_z,
"num_components": num_components,
"num_iters": num_iters,
"iter_loss_weight": "linspace",
"coord_type": "cos",
"coord_freqs": 3,
"decoder": {
"constructor": "iodine.modules.decoder.ComponentDecoder",
"pixel_decoder": {
"constructor": "iodine.modules.networks.BroadcastConv",
"cnn_opt": {
# Final channels is irrelevant with target_output_shape
"output_channels": [32, 32, 32, 32, None],
"kernel_shapes": [5],
"strides": [1],
"activation": "elu",
},
"coord_type": "linear",
"coord_freqs": 3,
},
},
"refinement_core": {
"constructor": "iodine.modules.refinement.RefinementCore",
"encoder_net": {
"constructor": "iodine.modules.networks.CNN",
"mode": "avg_pool",
"cnn_opt": {
"output_channels": [32, 32, 32],
"strides": [2],
"kernel_shapes": [5],
"activation": "elu",
},
"mlp_opt": {
"output_sizes": [128],
"activation": "elu"
},
},
"recurrent_net": {
"constructor": "iodine.modules.networks.LSTM",
"hidden_sizes": [], # No recurrent layer used for this dataset
},
"refinement_head": {
"constructor": "iodine.modules.refinement.ResHead"
},
},
"latent_dist": {
"constructor": "iodine.modules.distributions.LocScaleDistribution",
"dist": "normal",
"scale_act": "softplus",
"scale": "var",
"name": "latent_dist",
},
"output_dist": {
"constructor": "iodine.modules.distributions.MaskedMixture",
"num_components": num_components,
"component_dist": {
"constructor":
"iodine.modules.distributions.LocScaleDistribution",
"dist":
"logistic",
"scale":
"fixed",
"scale_val":
0.03,
"name":
"pixel_distribution",
},
},
"factor_evaluator": {
"constructor":
"iodine.modules.factor_eval.FactorRegressor",
"mapping": [
("position", 2, "scalar"),
("color", 3, "scalar"),
("shape", 20, "categorical"),
],
},
}
optimizer = {
"constructor": "tensorflow.train.AdamOptimizer",
"learning_rate": {
"constructor": "tensorflow.train.exponential_decay",
"learning_rate": learn_rate,
"global_step": {
"constructor": "tensorflow.train.get_or_create_global_step"
},
"decay_steps": 1000000,
"decay_rate": 0.1,
},
"beta1": 0.95,
}
| deepmind-research-master | iodine/configurations.py |
# Lint as: python3
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-importing-member, g-multiple-import, g-import-not-at-top
# pylint: disable=protected-access, g-bad-import-order, missing-docstring
# pylint: disable=unused-variable, invalid-name, no-value-for-parameter
from copy import deepcopy
import os.path
import warnings
from absl import logging
import numpy as np
from sacred import Experiment, SETTINGS
# Ignore all tensorflow deprecation warnings
logging._warn_preinit_stderr = 0
warnings.filterwarnings("ignore", module=".*tensorflow.*")
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import sonnet as snt
from sacred.stflow import LogFileWriter
from iodine.modules import utils
from iodine import configurations
SETTINGS.CONFIG.READ_ONLY_CONFIG = False
ex = Experiment("iodine")
@ex.config
def default_config():
continue_run = False # set to continue experiment from an existing checkpoint
checkpoint_dir = ("checkpoints/iodine"
) # if continue_run is False, "_{run_id}" will be appended
save_summaries_steps = 10
save_checkpoint_steps = 1000
n_z = 64 # number of latent dimensions
num_components = 7 # number of components (K)
num_iters = 5
learn_rate = 0.001
batch_size = 4
stop_after_steps = int(1e6)
# Details for the dataset, model and optimizer are left empty here.
# They can be found in the configurations for individual datasets,
# which are provided in configurations.py and added as named configs.
data = {} # Dataset details will go here
model = {} # Model details will go here
optimizer = {} # Optimizer details will go here
ex.named_config(configurations.clevr6)
ex.named_config(configurations.multi_dsprites)
ex.named_config(configurations.tetrominoes)
@ex.capture
def build(identifier, _config):
config_copy = deepcopy(_config[identifier])
return utils.build(config_copy, identifier=identifier)
def get_train_step(model, dataset, optimizer):
loss, scalars, _ = model(dataset("train"))
global_step = tf.train.get_or_create_global_step()
grads = optimizer.compute_gradients(loss)
gradients, variables = zip(*grads)
global_norm = tf.global_norm(gradients)
gradients, global_norm = tf.clip_by_global_norm(
gradients, 5.0, use_norm=global_norm)
grads = zip(gradients, variables)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([train_op]):
overview = model.get_overview_images(dataset("summary"))
scalars["debug/global_grad_norm"] = global_norm
summaries = {
k: tf.summary.scalar(k, v) for k, v in scalars.items()
}
summaries.update(
{k: tf.summary.image(k, v) for k, v in overview.items()})
return tf.identity(global_step), scalars, train_op
@ex.capture
def get_checkpoint_dir(continue_run, checkpoint_dir, _run, _log):
if continue_run:
assert os.path.exists(checkpoint_dir)
_log.info("Continuing run from checkpoint at {}".format(checkpoint_dir))
return checkpoint_dir
run_id = _run._id
if run_id is None: # then no observer was added that provided an _id
if not _run.unobserved:
_log.warning(
"No run_id given or provided by an Observer. (Re-)using run_id=1.")
run_id = 1
checkpoint_dir = checkpoint_dir + "_{run_id}".format(run_id=run_id)
_log.info(
"Starting a new run using checkpoint dir: '{}'".format(checkpoint_dir))
return checkpoint_dir
@ex.capture
def get_session(chkp_dir, loss, stop_after_steps, save_summaries_steps,
save_checkpoint_steps):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
hooks = [
tf.train.StopAtStepHook(last_step=stop_after_steps),
tf.train.NanTensorHook(loss),
]
return tf.train.MonitoredTrainingSession(
hooks=hooks,
config=config,
checkpoint_dir=chkp_dir,
save_summaries_steps=save_summaries_steps,
save_checkpoint_steps=save_checkpoint_steps,
)
@ex.command(unobserved=True)
def load_checkpoint(use_placeholder=False, session=None):
dataset = build("data")
model = build("model")
if use_placeholder:
inputs = dataset.get_placeholders()
else:
inputs = dataset()
info = model.eval(inputs)
if session is None:
session = tf.Session()
saver = tf.train.Saver()
checkpoint_dir = get_checkpoint_dir()
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(session, checkpoint_file)
print('Successfully restored Checkpoint "{}"'.format(checkpoint_file))
# print variables
variables = tf.global_variables() + tf.local_variables()
for row in snt.format_variables(variables, join_lines=False):
print(row)
return {
"session": session,
"model": model,
"info": info,
"inputs": inputs,
"dataset": dataset,
}
@ex.automain
@LogFileWriter(ex)
def main(save_summaries_steps):
checkpoint_dir = get_checkpoint_dir()
dataset = build("data")
model = build("model")
optimizer = build("optimizer")
gstep, train_step_exports, train_op = get_train_step(model, dataset,
optimizer)
loss, ari = [], []
with get_session(checkpoint_dir, train_step_exports["loss/total"]) as sess:
while not sess.should_stop():
out = sess.run({
"step": gstep,
"loss": train_step_exports["loss/total"],
"ari": train_step_exports["loss/ari_nobg"],
"train": train_op,
})
loss.append(out["loss"])
ari.append(out["ari"])
step = out["step"]
if step % save_summaries_steps == 0:
mean_loss = np.mean(loss)
mean_ari = np.mean(ari)
ex.log_scalar("loss", mean_loss, step)
ex.log_scalar("ari", mean_ari, step)
print("{step:>6d} Loss: {loss: >12.2f}\t\tARI-nobg:{ari: >6.2f}".format(
step=step, loss=mean_loss, ari=mean_ari))
loss, ari = [], []
| deepmind-research-master | iodine/main.py |
# Lint as: python3
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders for rendering images."""
# pylint: disable=missing-docstring
from iodine.modules.distributions import MixtureParameters
import shapeguard
import sonnet as snt
class ComponentDecoder(snt.AbstractModule):
def __init__(self, pixel_decoder, name="component_decoder"):
super().__init__(name=name)
self._pixel_decoder = pixel_decoder
self._sg = shapeguard.ShapeGuard()
def set_output_shapes(self, pixel, mask):
self._sg.guard(pixel, "K, H, W, Cp")
self._sg.guard(mask, "K, H, W, 1")
self._pixel_decoder.set_output_shapes(self._sg["H, W, 1 + Cp"])
def _build(self, z):
self._sg.guard(z, "B, K, Z")
z_flat = self._sg.reshape(z, "B*K, Z")
pixel_params = self._pixel_decoder(z_flat).params
self._sg.guard(pixel_params, "B*K, H, W, 1 + Cp")
mask_params = pixel_params[..., 0:1]
pixel_params = pixel_params[..., 1:]
output = MixtureParameters(
pixel=self._sg.reshape(pixel_params, "B, K, H, W, Cp"),
mask=self._sg.reshape(mask_params, "B, K, H, W, 1"),
)
del self._sg.B
return output
| deepmind-research-master | iodine/modules/decoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.